summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/9p/fid.c17
-rw-r--r--fs/9p/fid.h1
-rw-r--r--fs/Kconfig15
-rw-r--r--fs/adfs/super.c4
-rw-r--r--fs/affs/super.c2
-rw-r--r--fs/afs/flock.c129
-rw-r--r--fs/afs/mntpt.c2
-rw-r--r--fs/afs/netdevices.c5
-rw-r--r--fs/afs/rxrpc.c21
-rw-r--r--fs/afs/super.c3
-rw-r--r--fs/aio.c2
-rw-r--r--fs/autofs4/root.c31
-rw-r--r--fs/befs/linuxvfs.c4
-rw-r--r--fs/bfs/inode.c4
-rw-r--r--fs/binfmt_elf.c181
-rw-r--r--fs/binfmt_elf_fdpic.c64
-rw-r--r--fs/binfmt_flat.c6
-rw-r--r--fs/binfmt_misc.c4
-rw-r--r--fs/binfmt_script.c4
-rw-r--r--fs/bio.c82
-rw-r--r--fs/block_dev.c4
-rw-r--r--fs/buffer.c53
-rw-r--r--fs/char_dev.c3
-rw-r--r--fs/cifs/CHANGES16
-rw-r--r--fs/cifs/README51
-rw-r--r--fs/cifs/TODO15
-rw-r--r--fs/cifs/asn1.c57
-rw-r--r--fs/cifs/cifs_debug.c103
-rw-r--r--fs/cifs/cifs_fs_sb.h2
-rw-r--r--fs/cifs/cifs_unicode.c2
-rw-r--r--fs/cifs/cifs_unicode.h39
-rw-r--r--fs/cifs/cifs_uniupr.h8
-rw-r--r--fs/cifs/cifsencrypt.c262
-rw-r--r--fs/cifs/cifsfs.c209
-rw-r--r--fs/cifs/cifsfs.h23
-rw-r--r--fs/cifs/cifsglob.h78
-rw-r--r--fs/cifs/cifspdu.h297
-rw-r--r--fs/cifs/cifsproto.h123
-rw-r--r--fs/cifs/cifssmb.c1621
-rw-r--r--fs/cifs/connect.c1262
-rw-r--r--fs/cifs/dir.c22
-rw-r--r--fs/cifs/export.c49
-rw-r--r--fs/cifs/fcntl.c2
-rw-r--r--fs/cifs/file.c316
-rw-r--r--fs/cifs/inode.c320
-rw-r--r--fs/cifs/ioctl.c4
-rw-r--r--fs/cifs/link.c116
-rw-r--r--fs/cifs/md4.c10
-rw-r--r--fs/cifs/md5.c8
-rw-r--r--fs/cifs/misc.c239
-rw-r--r--fs/cifs/netmisc.c171
-rw-r--r--fs/cifs/nterr.c8
-rw-r--r--fs/cifs/nterr.h8
-rw-r--r--fs/cifs/ntlmssp.h22
-rw-r--r--fs/cifs/readdir.c375
-rw-r--r--fs/cifs/sess.c253
-rw-r--r--fs/cifs/smbdes.c30
-rw-r--r--fs/cifs/smbencrypt.c52
-rw-r--r--fs/cifs/smberr.h8
-rw-r--r--fs/cifs/transport.c248
-rw-r--r--fs/cifs/xattr.c229
-rw-r--r--fs/coda/cache.c7
-rw-r--r--fs/coda/cnode.c7
-rw-r--r--fs/coda/coda_int.h7
-rw-r--r--fs/coda/dir.c305
-rw-r--r--fs/coda/file.c80
-rw-r--r--fs/coda/inode.c50
-rw-r--r--fs/coda/psdev.c88
-rw-r--r--fs/coda/symlink.c2
-rw-r--r--fs/coda/sysctl.c228
-rw-r--r--fs/coda/upcall.c499
-rw-r--r--fs/compat.c128
-rw-r--r--fs/compat_ioctl.c739
-rw-r--r--fs/configfs/mount.c2
-rw-r--r--fs/dcache.c4
-rw-r--r--fs/dcookies.c3
-rw-r--r--fs/debugfs/file.c36
-rw-r--r--fs/debugfs/inode.c5
-rw-r--r--fs/direct-io.c30
-rw-r--r--fs/dlm/dlm_internal.h1
-rw-r--r--fs/dlm/lock.c211
-rw-r--r--fs/dlm/lock.h3
-rw-r--r--fs/dlm/lockspace.c3
-rw-r--r--fs/dlm/lowcomms.c21
-rw-r--r--fs/dlm/member.c45
-rw-r--r--fs/dlm/memory.c14
-rw-r--r--fs/dlm/midcomms.c17
-rw-r--r--fs/dlm/rcom.c39
-rw-r--r--fs/dlm/rcom.h5
-rw-r--r--fs/dlm/recoverd.c11
-rw-r--r--fs/dlm/requestqueue.c58
-rw-r--r--fs/dlm/requestqueue.h4
-rw-r--r--fs/dnotify.c2
-rw-r--r--fs/dquot.c4
-rw-r--r--fs/ecryptfs/inode.c9
-rw-r--r--fs/ecryptfs/main.c20
-rw-r--r--fs/ecryptfs/mmap.c8
-rw-r--r--fs/ecryptfs/netlink.c16
-rw-r--r--fs/efs/super.c4
-rw-r--r--fs/eventpoll.c4
-rw-r--r--fs/exec.c703
-rw-r--r--fs/ext2/super.c10
-rw-r--r--fs/ext3/dir.c14
-rw-r--r--fs/ext3/namei.c73
-rw-r--r--fs/ext3/super.c15
-rw-r--r--fs/ext4/balloc.c4
-rw-r--r--fs/ext4/dir.c14
-rw-r--r--fs/ext4/extents.c684
-rw-r--r--fs/ext4/file.c1
-rw-r--r--fs/ext4/ialloc.c8
-rw-r--r--fs/ext4/inode.c120
-rw-r--r--fs/ext4/ioctl.c9
-rw-r--r--fs/ext4/namei.c149
-rw-r--r--fs/ext4/super.c65
-rw-r--r--fs/ext4/xattr.c276
-rw-r--r--fs/ext4/xattr.h17
-rw-r--r--fs/fat/cache.c2
-rw-r--r--fs/fat/inode.c2
-rw-r--r--fs/fcntl.c2
-rw-r--r--fs/freevxfs/vxfs_super.c4
-rw-r--r--fs/fs-writeback.c1
-rw-r--r--fs/fuse/dev.c2
-rw-r--r--fs/fuse/inode.c2
-rw-r--r--fs/gfs2/bmap.c35
-rw-r--r--fs/gfs2/daemon.c24
-rw-r--r--fs/gfs2/daemon.h1
-rw-r--r--fs/gfs2/dir.c3
-rw-r--r--fs/gfs2/eaops.c8
-rw-r--r--fs/gfs2/eaops.h4
-rw-r--r--fs/gfs2/glock.c293
-rw-r--r--fs/gfs2/glock.h5
-rw-r--r--fs/gfs2/glops.c24
-rw-r--r--fs/gfs2/incore.h31
-rw-r--r--fs/gfs2/inode.c78
-rw-r--r--fs/gfs2/inode.h3
-rw-r--r--fs/gfs2/locking/dlm/lock_dlm.h1
-rw-r--r--fs/gfs2/locking/dlm/plock.c11
-rw-r--r--fs/gfs2/locking/dlm/sysfs.c2
-rw-r--r--fs/gfs2/locking/dlm/thread.c20
-rw-r--r--fs/gfs2/locking/nolock/main.c1
-rw-r--r--fs/gfs2/log.c230
-rw-r--r--fs/gfs2/log.h2
-rw-r--r--fs/gfs2/lops.c468
-rw-r--r--fs/gfs2/main.c9
-rw-r--r--fs/gfs2/meta_io.c136
-rw-r--r--fs/gfs2/meta_io.h6
-rw-r--r--fs/gfs2/mount.c30
-rw-r--r--fs/gfs2/ops_address.c151
-rw-r--r--fs/gfs2/ops_export.c2
-rw-r--r--fs/gfs2/ops_file.c66
-rw-r--r--fs/gfs2/ops_fstype.c40
-rw-r--r--fs/gfs2/ops_inode.c38
-rw-r--r--fs/gfs2/ops_super.c14
-rw-r--r--fs/gfs2/ops_vm.c64
-rw-r--r--fs/gfs2/quota.c13
-rw-r--r--fs/gfs2/recovery.c2
-rw-r--r--fs/gfs2/rgrp.c49
-rw-r--r--fs/gfs2/super.c6
-rw-r--r--fs/gfs2/sys.c4
-rw-r--r--fs/gfs2/trans.c22
-rw-r--r--fs/gfs2/trans.h2
-rw-r--r--fs/hfs/super.c2
-rw-r--r--fs/hfsplus/super.c2
-rw-r--r--fs/hpfs/super.c4
-rw-r--r--fs/hugetlbfs/inode.c17
-rw-r--r--fs/inode.c3
-rw-r--r--fs/inotify_user.c4
-rw-r--r--fs/isofs/inode.c11
-rw-r--r--fs/jbd/journal.c8
-rw-r--r--fs/jbd/revoke.c4
-rw-r--r--fs/jbd2/journal.c87
-rw-r--r--fs/jbd2/recovery.c8
-rw-r--r--fs/jbd2/revoke.c4
-rw-r--r--fs/jffs2/background.c8
-rw-r--r--fs/jffs2/fs.c2
-rw-r--r--fs/jffs2/malloc.c18
-rw-r--r--fs/jffs2/nodelist.h5
-rw-r--r--fs/jffs2/readinode.c4
-rw-r--r--fs/jffs2/scan.c3
-rw-r--r--fs/jffs2/super.c2
-rw-r--r--fs/jffs2/write.c18
-rw-r--r--fs/jfs/jfs_logmgr.c12
-rw-r--r--fs/jfs/jfs_metapage.c17
-rw-r--r--fs/jfs/super.c2
-rw-r--r--fs/lockd/svclock.c37
-rw-r--r--fs/locks.c116
-rw-r--r--fs/mbcache.c2
-rw-r--r--fs/minix/inode.c4
-rw-r--r--fs/mpage.c12
-rw-r--r--fs/namei.c39
-rw-r--r--fs/namespace.c2
-rw-r--r--fs/ncpfs/inode.c4
-rw-r--r--fs/ncpfs/mmap.c40
-rw-r--r--fs/ncpfs/ncplib_kernel.c3
-rw-r--r--fs/nfs/callback_xdr.c10
-rw-r--r--fs/nfs/client.c29
-rw-r--r--fs/nfs/delegation.c21
-rw-r--r--fs/nfs/dir.c10
-rw-r--r--fs/nfs/direct.c2
-rw-r--r--fs/nfs/file.c18
-rw-r--r--fs/nfs/getroot.c3
-rw-r--r--fs/nfs/inode.c28
-rw-r--r--fs/nfs/namespace.c4
-rw-r--r--fs/nfs/nfs2xdr.c19
-rw-r--r--fs/nfs/nfs3proc.c60
-rw-r--r--fs/nfs/nfs3xdr.c24
-rw-r--r--fs/nfs/nfs4_fs.h2
-rw-r--r--fs/nfs/nfs4proc.c142
-rw-r--r--fs/nfs/nfs4renewd.c5
-rw-r--r--fs/nfs/nfs4state.c5
-rw-r--r--fs/nfs/nfs4xdr.c274
-rw-r--r--fs/nfs/pagelist.c2
-rw-r--r--fs/nfs/proc.c40
-rw-r--r--fs/nfs/read.c2
-rw-r--r--fs/nfs/super.c141
-rw-r--r--fs/nfs/unlink.c195
-rw-r--r--fs/nfs/write.c46
-rw-r--r--fs/nfsctl.c16
-rw-r--r--fs/nfsd/auth.c3
-rw-r--r--fs/nfsd/export.c17
-rw-r--r--fs/nfsd/nfs4idmap.c6
-rw-r--r--fs/nfsd/nfs4proc.c10
-rw-r--r--fs/nfsd/nfs4state.c18
-rw-r--r--fs/nfsd/nfs4xdr.c2
-rw-r--r--fs/nfsd/nfsfh.c20
-rw-r--r--fs/nfsd/vfs.c21
-rw-r--r--fs/ntfs/ChangeLog12
-rw-r--r--fs/ntfs/Makefile2
-rw-r--r--fs/ntfs/aops.c22
-rw-r--r--fs/ntfs/attrib.c8
-rw-r--r--fs/ntfs/file.c36
-rw-r--r--fs/ntfs/inode.c3
-rw-r--r--fs/ntfs/logfile.c143
-rw-r--r--fs/ntfs/runlist.c4
-rw-r--r--fs/ntfs/super.c10
-rw-r--r--fs/ocfs2/alloc.c487
-rw-r--r--fs/ocfs2/alloc.h7
-rw-r--r--fs/ocfs2/aops.c348
-rw-r--r--fs/ocfs2/aops.h6
-rw-r--r--fs/ocfs2/cluster/heartbeat.c7
-rw-r--r--fs/ocfs2/cluster/masklog.c3
-rw-r--r--fs/ocfs2/cluster/tcp.c24
-rw-r--r--fs/ocfs2/dir.c1423
-rw-r--r--fs/ocfs2/dir.h48
-rw-r--r--fs/ocfs2/dlm/dlmfs.c2
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c2
-rw-r--r--fs/ocfs2/dlmglue.c2
-rw-r--r--fs/ocfs2/dlmglue.h4
-rw-r--r--fs/ocfs2/export.c8
-rw-r--r--fs/ocfs2/extent_map.c6
-rw-r--r--fs/ocfs2/file.c410
-rw-r--r--fs/ocfs2/file.h2
-rw-r--r--fs/ocfs2/heartbeat.c2
-rw-r--r--fs/ocfs2/inode.c7
-rw-r--r--fs/ocfs2/inode.h1
-rw-r--r--fs/ocfs2/journal.c120
-rw-r--r--fs/ocfs2/journal.h3
-rw-r--r--fs/ocfs2/localalloc.c8
-rw-r--r--fs/ocfs2/localalloc.h2
-rw-r--r--fs/ocfs2/mmap.c34
-rw-r--r--fs/ocfs2/namei.c568
-rw-r--r--fs/ocfs2/namei.h19
-rw-r--r--fs/ocfs2/ocfs2.h15
-rw-r--r--fs/ocfs2/ocfs2_fs.h64
-rw-r--r--fs/ocfs2/suballoc.c29
-rw-r--r--fs/ocfs2/suballoc.h11
-rw-r--r--fs/ocfs2/super.c202
-rw-r--r--fs/ocfs2/super.h2
-rw-r--r--fs/ocfs2/sysfile.c10
-rw-r--r--fs/ocfs2/uptodate.c2
-rw-r--r--fs/ocfs2/vote.c4
-rw-r--r--fs/open.c75
-rw-r--r--fs/openpromfs/inode.c2
-rw-r--r--fs/partitions/check.c15
-rw-r--r--fs/partitions/msdos.c5
-rw-r--r--fs/partitions/sun.c62
-rw-r--r--fs/partitions/sun.h1
-rw-r--r--fs/pipe.c2
-rw-r--r--fs/proc/Makefile1
-rw-r--r--fs/proc/array.c44
-rw-r--r--fs/proc/base.c93
-rw-r--r--fs/proc/inode.c31
-rw-r--r--fs/proc/internal.h5
-rw-r--r--fs/proc/proc_misc.c18
-rw-r--r--fs/proc/proc_net.c200
-rw-r--r--fs/proc/root.c8
-rw-r--r--fs/qnx4/inode.c2
-rw-r--r--fs/quota.c2
-rw-r--r--fs/ramfs/file-nommu.c7
-rw-r--r--fs/reiserfs/stree.c5
-rw-r--r--fs/reiserfs/super.c15
-rw-r--r--fs/romfs/inode.c4
-rw-r--r--fs/select.c2
-rw-r--r--fs/seq_file.c33
-rw-r--r--fs/signalfd.c186
-rw-r--r--fs/smbfs/inode.c4
-rw-r--r--fs/smbfs/request.c2
-rw-r--r--fs/splice.c88
-rw-r--r--fs/sysfs/bin.c43
-rw-r--r--fs/sysfs/dir.c782
-rw-r--r--fs/sysfs/file.c249
-rw-r--r--fs/sysfs/group.c2
-rw-r--r--fs/sysfs/inode.c105
-rw-r--r--fs/sysfs/mount.c34
-rw-r--r--fs/sysfs/symlink.c38
-rw-r--r--fs/sysfs/sysfs.h185
-rw-r--r--fs/sysv/inode.c2
-rw-r--r--fs/timerfd.c6
-rw-r--r--fs/udf/balloc.c553
-rw-r--r--fs/udf/crc.c15
-rw-r--r--fs/udf/dir.c108
-rw-r--r--fs/udf/directory.c173
-rw-r--r--fs/udf/ecma_167.h191
-rw-r--r--fs/udf/file.c102
-rw-r--r--fs/udf/fsync.c6
-rw-r--r--fs/udf/ialloc.c37
-rw-r--r--fs/udf/inode.c1212
-rw-r--r--fs/udf/lowlevel.c21
-rw-r--r--fs/udf/misc.c113
-rw-r--r--fs/udf/namei.c564
-rw-r--r--fs/udf/osta_udf.h75
-rw-r--r--fs/udf/partition.c119
-rw-r--r--fs/udf/super.c1282
-rw-r--r--fs/udf/symlink.c54
-rw-r--r--fs/udf/truncate.c166
-rw-r--r--fs/udf/udf_sb.h22
-rw-r--r--fs/udf/udfdecl.h102
-rw-r--r--fs/udf/udfend.h18
-rw-r--r--fs/udf/udftime.c87
-rw-r--r--fs/udf/unicode.c258
-rw-r--r--fs/ufs/super.c8
-rw-r--r--fs/xfs/linux-2.6/kmem.h6
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c16
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c7
-rw-r--r--fs/xfs/linux-2.6/xfs_file.c40
-rw-r--r--fs/xfs/linux-2.6/xfs_globals.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl32.c4
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c4
-rw-r--r--fs/xfs/quota/xfs_qm.c3
-rw-r--r--fs/xfs/support/debug.h10
-rw-r--r--fs/xfs/xfs_da_btree.c1
-rw-r--r--fs/xfs/xfs_filestream.c10
-rw-r--r--fs/xfs/xfs_log.c12
-rw-r--r--fs/xfs/xfs_log_recover.c12
-rw-r--r--fs/xfs/xfs_mru_cache.c72
-rw-r--r--fs/xfs/xfs_mru_cache.h6
-rw-r--r--fs/xfs/xfs_vnodeops.c50
347 files changed, 14960 insertions, 12863 deletions
diff --git a/fs/9p/fid.c b/fs/9p/fid.c
index 08fa320..15e05a1 100644
--- a/fs/9p/fid.c
+++ b/fs/9p/fid.c
@@ -92,23 +92,6 @@ struct p9_fid *v9fs_fid_lookup(struct dentry *dentry)
return fid;
}
-struct p9_fid *v9fs_fid_lookup_remove(struct dentry *dentry)
-{
- struct p9_fid *fid;
- struct v9fs_dentry *dent;
-
- dent = dentry->d_fsdata;
- fid = v9fs_fid_lookup(dentry);
- if (!IS_ERR(fid)) {
- spin_lock(&dent->lock);
- list_del(&fid->dlist);
- spin_unlock(&dent->lock);
- }
-
- return fid;
-}
-
-
/**
* v9fs_fid_clone - lookup the fid for a dentry, clone a private copy and
* release it
diff --git a/fs/9p/fid.h b/fs/9p/fid.h
index 47a0ba7..26e07df 100644
--- a/fs/9p/fid.h
+++ b/fs/9p/fid.h
@@ -28,6 +28,5 @@ struct v9fs_dentry {
};
struct p9_fid *v9fs_fid_lookup(struct dentry *dentry);
-struct p9_fid *v9fs_fid_lookup_remove(struct dentry *dentry);
struct p9_fid *v9fs_fid_clone(struct dentry *dentry);
int v9fs_fid_add(struct dentry *dentry, struct p9_fid *fid);
diff --git a/fs/Kconfig b/fs/Kconfig
index 613df55..f9eed6d 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -251,7 +251,7 @@ config JBD2
config JBD2_DEBUG
bool "JBD2 (ext4dev/ext4) debugging support"
- depends on JBD2
+ depends on JBD2 && DEBUG_FS
help
If you are using the ext4dev/ext4 journaled file system (or
potentially any other filesystem/device using JBD2), this option
@@ -260,10 +260,10 @@ config JBD2_DEBUG
By default, the debugging output will be turned off.
If you select Y here, then you will be able to turn on debugging
- with "echo N > /proc/sys/fs/jbd2-debug", where N is a number between
- 1 and 5. The higher the number, the more debugging output is
- generated. To turn debugging off again, do
- "echo 0 > /proc/sys/fs/jbd2-debug".
+ with "echo N > /sys/kernel/debug/jbd2/jbd2-debug", where N is a
+ number between 1 and 5. The higher the number, the more debugging
+ output is generated. To turn debugging off again, do
+ "echo 0 > /sys/kernel/debug/jbd2/jbd2-debug".
config FS_MBCACHE
# Meta block cache for Extended Attributes (ext2/ext3/ext4)
@@ -441,9 +441,6 @@ config OCFS2_FS
Note: Features which OCFS2 does not support yet:
- extended attributes
- - shared writeable mmap
- - loopback is supported, but data written will not
- be cluster coherent.
- quotas
- cluster aware flock
- Directory change notification (F_NOTIFY)
@@ -1674,7 +1671,7 @@ config NFSD_V3_ACL
config NFSD_V4
bool "Provide NFSv4 server support (EXPERIMENTAL)"
- depends on NFSD_V3 && EXPERIMENTAL
+ depends on NFSD && NFSD_V3 && EXPERIMENTAL
select RPCSEC_GSS_KRB5
help
If you would like to include the NFSv4 server as well as the NFSv2
diff --git a/fs/adfs/super.c b/fs/adfs/super.c
index de2ed5c..1c9fd30 100644
--- a/fs/adfs/super.c
+++ b/fs/adfs/super.c
@@ -234,14 +234,14 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
inode_init_once(&ei->vfs_inode);
}
-
+
static int init_inodecache(void)
{
adfs_inode_cachep = kmem_cache_create("adfs_inode_cache",
sizeof(struct adfs_inode_info),
0, (SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD),
- init_once, NULL);
+ init_once);
if (adfs_inode_cachep == NULL)
return -ENOMEM;
return 0;
diff --git a/fs/affs/super.c b/fs/affs/super.c
index 6d0ebc3..c80191a 100644
--- a/fs/affs/super.c
+++ b/fs/affs/super.c
@@ -99,7 +99,7 @@ static int init_inodecache(void)
sizeof(struct affs_inode_info),
0, (SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD),
- init_once, NULL);
+ init_once);
if (affs_inode_cachep == NULL)
return -ENOMEM;
return 0;
diff --git a/fs/afs/flock.c b/fs/afs/flock.c
index 8f07f8d1..af6952e 100644
--- a/fs/afs/flock.c
+++ b/fs/afs/flock.c
@@ -19,6 +19,7 @@ static void afs_fl_copy_lock(struct file_lock *new, struct file_lock *fl);
static void afs_fl_release_private(struct file_lock *fl);
static struct workqueue_struct *afs_lock_manager;
+static DEFINE_MUTEX(afs_lock_manager_mutex);
static struct file_lock_operations afs_lock_ops = {
.fl_copy_lock = afs_fl_copy_lock,
@@ -30,12 +31,20 @@ static struct file_lock_operations afs_lock_ops = {
*/
static int afs_init_lock_manager(void)
{
+ int ret;
+
+ ret = 0;
if (!afs_lock_manager) {
- afs_lock_manager = create_singlethread_workqueue("kafs_lockd");
- if (!afs_lock_manager)
- return -ENOMEM;
+ mutex_lock(&afs_lock_manager_mutex);
+ if (!afs_lock_manager) {
+ afs_lock_manager =
+ create_singlethread_workqueue("kafs_lockd");
+ if (!afs_lock_manager)
+ ret = -ENOMEM;
+ }
+ mutex_unlock(&afs_lock_manager_mutex);
}
- return 0;
+ return ret;
}
/*
@@ -68,6 +77,29 @@ static void afs_schedule_lock_extension(struct afs_vnode *vnode)
}
/*
+ * grant one or more locks (readlocks are allowed to jump the queue if the
+ * first lock in the queue is itself a readlock)
+ * - the caller must hold the vnode lock
+ */
+static void afs_grant_locks(struct afs_vnode *vnode, struct file_lock *fl)
+{
+ struct file_lock *p, *_p;
+
+ list_move_tail(&fl->fl_u.afs.link, &vnode->granted_locks);
+ if (fl->fl_type == F_RDLCK) {
+ list_for_each_entry_safe(p, _p, &vnode->pending_locks,
+ fl_u.afs.link) {
+ if (p->fl_type == F_RDLCK) {
+ p->fl_u.afs.state = AFS_LOCK_GRANTED;
+ list_move_tail(&p->fl_u.afs.link,
+ &vnode->granted_locks);
+ wake_up(&p->fl_wait);
+ }
+ }
+ }
+}
+
+/*
* do work for a lock, including:
* - probing for a lock we're waiting on but didn't get immediately
* - extending a lock that's close to timing out
@@ -172,8 +204,7 @@ void afs_lock_work(struct work_struct *work)
struct file_lock, fl_u.afs.link) == fl) {
fl->fl_u.afs.state = ret;
if (ret == AFS_LOCK_GRANTED)
- list_move_tail(&fl->fl_u.afs.link,
- &vnode->granted_locks);
+ afs_grant_locks(vnode, fl);
else
list_del_init(&fl->fl_u.afs.link);
wake_up(&fl->fl_wait);
@@ -258,49 +289,50 @@ static int afs_do_setlk(struct file *file, struct file_lock *fl)
spin_lock(&vnode->lock);
- if (list_empty(&vnode->pending_locks)) {
- /* if there's no-one else with a lock on this vnode, then we
- * need to ask the server for a lock */
- if (list_empty(&vnode->granted_locks)) {
- _debug("not locked");
- ASSERTCMP(vnode->flags &
- ((1 << AFS_VNODE_LOCKING) |
- (1 << AFS_VNODE_READLOCKED) |
- (1 << AFS_VNODE_WRITELOCKED)), ==, 0);
- list_add_tail(&fl->fl_u.afs.link, &vnode->pending_locks);
- set_bit(AFS_VNODE_LOCKING, &vnode->flags);
- spin_unlock(&vnode->lock);
+ /* if we've already got a readlock on the server then we can instantly
+ * grant another readlock, irrespective of whether there are any
+ * pending writelocks */
+ if (type == AFS_LOCK_READ &&
+ vnode->flags & (1 << AFS_VNODE_READLOCKED)) {
+ _debug("instant readlock");
+ ASSERTCMP(vnode->flags &
+ ((1 << AFS_VNODE_LOCKING) |
+ (1 << AFS_VNODE_WRITELOCKED)), ==, 0);
+ ASSERT(!list_empty(&vnode->granted_locks));
+ goto sharing_existing_lock;
+ }
- ret = afs_vnode_set_lock(vnode, key, type);
- clear_bit(AFS_VNODE_LOCKING, &vnode->flags);
- switch (ret) {
- case 0:
- goto acquired_server_lock;
- case -EWOULDBLOCK:
- spin_lock(&vnode->lock);
- ASSERT(list_empty(&vnode->granted_locks));
- ASSERTCMP(vnode->pending_locks.next, ==,
- &fl->fl_u.afs.link);
- goto wait;
- default:
- spin_lock(&vnode->lock);
- list_del_init(&fl->fl_u.afs.link);
- spin_unlock(&vnode->lock);
- goto error;
- }
- }
+ /* if there's no-one else with a lock on this vnode, then we need to
+ * ask the server for a lock */
+ if (list_empty(&vnode->pending_locks) &&
+ list_empty(&vnode->granted_locks)) {
+ _debug("not locked");
+ ASSERTCMP(vnode->flags &
+ ((1 << AFS_VNODE_LOCKING) |
+ (1 << AFS_VNODE_READLOCKED) |
+ (1 << AFS_VNODE_WRITELOCKED)), ==, 0);
+ list_add_tail(&fl->fl_u.afs.link, &vnode->pending_locks);
+ set_bit(AFS_VNODE_LOCKING, &vnode->flags);
+ spin_unlock(&vnode->lock);
- /* if we've already got a readlock on the server and no waiting
- * writelocks, then we might be able to instantly grant another
- * readlock */
- if (type == AFS_LOCK_READ &&
- vnode->flags & (1 << AFS_VNODE_READLOCKED)) {
- _debug("instant readlock");
- ASSERTCMP(vnode->flags &
- ((1 << AFS_VNODE_LOCKING) |
- (1 << AFS_VNODE_WRITELOCKED)), ==, 0);
- ASSERT(!list_empty(&vnode->granted_locks));
- goto sharing_existing_lock;
+ ret = afs_vnode_set_lock(vnode, key, type);
+ clear_bit(AFS_VNODE_LOCKING, &vnode->flags);
+ switch (ret) {
+ case 0:
+ _debug("acquired");
+ goto acquired_server_lock;
+ case -EWOULDBLOCK:
+ _debug("would block");
+ spin_lock(&vnode->lock);
+ ASSERT(list_empty(&vnode->granted_locks));
+ ASSERTCMP(vnode->pending_locks.next, ==,
+ &fl->fl_u.afs.link);
+ goto wait;
+ default:
+ spin_lock(&vnode->lock);
+ list_del_init(&fl->fl_u.afs.link);
+ spin_unlock(&vnode->lock);
+ goto error;
}
}
@@ -456,7 +488,8 @@ static int afs_do_getlk(struct file *file, struct file_lock *fl)
/* check local lock records first */
ret = 0;
- if (posix_test_lock(file, fl) == 0) {
+ posix_test_lock(file, fl);
+ if (fl->fl_type == F_UNLCK) {
/* no local locks; consult the server */
ret = afs_vnode_fetch_status(vnode, NULL, key);
if (ret < 0)
diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c
index a3684dc..6f8c96f 100644
--- a/fs/afs/mntpt.c
+++ b/fs/afs/mntpt.c
@@ -235,8 +235,8 @@ static void *afs_mntpt_follow_link(struct dentry *dentry, struct nameidata *nd)
err = do_add_mount(newmnt, nd, MNT_SHRINKABLE, &afs_vfsmounts);
switch (err) {
case 0:
- mntput(nd->mnt);
dput(nd->dentry);
+ mntput(nd->mnt);
nd->mnt = newmnt;
nd->dentry = dget(newmnt->mnt_root);
schedule_delayed_work(&afs_mntpt_expiry_timer,
diff --git a/fs/afs/netdevices.c b/fs/afs/netdevices.c
index fc27d4b..49f1894 100644
--- a/fs/afs/netdevices.c
+++ b/fs/afs/netdevices.c
@@ -8,6 +8,7 @@
#include <linux/inetdevice.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
+#include <net/net_namespace.h>
#include "internal.h"
/*
@@ -23,7 +24,7 @@ int afs_get_MAC_address(u8 *mac, size_t maclen)
BUG();
rtnl_lock();
- dev = __dev_getfirstbyhwtype(ARPHRD_ETHER);
+ dev = __dev_getfirstbyhwtype(&init_net, ARPHRD_ETHER);
if (dev) {
memcpy(mac, dev->dev_addr, maclen);
ret = 0;
@@ -47,7 +48,7 @@ int afs_get_ipv4_interfaces(struct afs_interface *bufs, size_t maxbufs,
ASSERT(maxbufs > 0);
rtnl_lock();
- for_each_netdev(dev) {
+ for_each_netdev(&init_net, dev) {
if (dev->type == ARPHRD_LOOPBACK && !wantloopback)
continue;
idev = __in_dev_get_rtnl(dev);
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index 1b36f45..8ccee9e 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -792,6 +792,7 @@ void afs_send_simple_reply(struct afs_call *call, const void *buf, size_t len)
{
struct msghdr msg;
struct iovec iov[1];
+ int n;
_enter("");
@@ -806,22 +807,20 @@ void afs_send_simple_reply(struct afs_call *call, const void *buf, size_t len)
msg.msg_flags = 0;
call->state = AFS_CALL_AWAIT_ACK;
- switch (rxrpc_kernel_send_data(call->rxcall, &msg, len)) {
- case 0:
+ n = rxrpc_kernel_send_data(call->rxcall, &msg, len);
+ if (n >= 0) {
_leave(" [replied]");
return;
-
- case -ENOMEM:
+ }
+ if (n == -ENOMEM) {
_debug("oom");
rxrpc_kernel_abort_call(call->rxcall, RX_USER_ABORT);
- default:
- rxrpc_kernel_end_call(call->rxcall);
- call->rxcall = NULL;
- call->type->destructor(call);
- afs_free_call(call);
- _leave(" [error]");
- return;
}
+ rxrpc_kernel_end_call(call->rxcall);
+ call->rxcall = NULL;
+ call->type->destructor(call);
+ afs_free_call(call);
+ _leave(" [error]");
}
/*
diff --git a/fs/afs/super.c b/fs/afs/super.c
index 993cdf1..b8808b4 100644
--- a/fs/afs/super.c
+++ b/fs/afs/super.c
@@ -89,8 +89,7 @@ int __init afs_fs_init(void)
sizeof(struct afs_vnode),
0,
SLAB_HWCACHE_ALIGN,
- afs_i_init_once,
- NULL);
+ afs_i_init_once);
if (!afs_inode_cachep) {
printk(KERN_NOTICE "kAFS: Failed to allocate inode cache\n");
return ret;
diff --git a/fs/aio.c b/fs/aio.c
index dbe699e..ea2e198 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1562,6 +1562,7 @@ int fastcall io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
fput(file);
return -EAGAIN;
}
+ req->ki_filp = file;
if (iocb->aio_flags & IOCB_FLAG_RESFD) {
/*
* If the IOCB_FLAG_RESFD flag of aio_flags is set, get an
@@ -1576,7 +1577,6 @@ int fastcall io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
}
}
- req->ki_filp = file;
ret = put_user(req->ki_key, &user_iocb->aio_key);
if (unlikely(ret)) {
dprintk("EFAULT: aio_key\n");
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
index 2d4c8a3..45ff3d6 100644
--- a/fs/autofs4/root.c
+++ b/fs/autofs4/root.c
@@ -587,19 +587,20 @@ static struct dentry *autofs4_lookup(struct inode *dir, struct dentry *dentry, s
unhashed = autofs4_lookup_unhashed(sbi, dentry->d_parent, &dentry->d_name);
if (!unhashed) {
/*
- * Mark the dentry incomplete, but add it. This is needed so
- * that the VFS layer knows about the dentry, and we can count
- * on catching any lookups through the revalidate.
- *
- * Let all the hard work be done by the revalidate function that
- * needs to be able to do this anyway..
- *
- * We need to do this before we release the directory semaphore.
+ * Mark the dentry incomplete but don't hash it. We do this
+ * to serialize our inode creation operations (symlink and
+ * mkdir) which prevents deadlock during the callback to
+ * the daemon. Subsequent user space lookups for the same
+ * dentry are placed on the wait queue while the daemon
+ * itself is allowed passage unresticted so the create
+ * operation itself can then hash the dentry. Finally,
+ * we check for the hashed dentry and return the newly
+ * hashed dentry.
*/
dentry->d_op = &autofs4_root_dentry_operations;
dentry->d_fsdata = NULL;
- d_add(dentry, NULL);
+ d_instantiate(dentry, NULL);
} else {
struct autofs_info *ino = autofs4_dentry_ino(unhashed);
DPRINTK("rehash %p with %p", dentry, unhashed);
@@ -607,15 +608,17 @@ static struct dentry *autofs4_lookup(struct inode *dir, struct dentry *dentry, s
* If we are racing with expire the request might not
* be quite complete but the directory has been removed
* so it must have been successful, so just wait for it.
+ * We need to ensure the AUTOFS_INF_EXPIRING flag is clear
+ * before continuing as revalidate may fail when calling
+ * try_to_fill_dentry (returning EAGAIN) if we don't.
*/
- if (ino && (ino->flags & AUTOFS_INF_EXPIRING)) {
+ while (ino && (ino->flags & AUTOFS_INF_EXPIRING)) {
DPRINTK("wait for incomplete expire %p name=%.*s",
unhashed, unhashed->d_name.len,
unhashed->d_name.name);
autofs4_wait(sbi, unhashed, NFY_NONE);
DPRINTK("request completed");
}
- d_rehash(unhashed);
dentry = unhashed;
}
@@ -658,7 +661,7 @@ static struct dentry *autofs4_lookup(struct inode *dir, struct dentry *dentry, s
* for all system calls, but it should be OK for the operations
* we permit from an autofs.
*/
- if (dentry->d_inode && d_unhashed(dentry)) {
+ if (!oz_mode && d_unhashed(dentry)) {
/*
* A user space application can (and has done in the past)
* remove and re-create this directory during the callback.
@@ -716,7 +719,7 @@ static int autofs4_dir_symlink(struct inode *dir,
strcpy(cp, symname);
inode = autofs4_get_inode(dir->i_sb, ino);
- d_instantiate(dentry, inode);
+ d_add(dentry, inode);
if (dir == dir->i_sb->s_root->d_inode)
dentry->d_op = &autofs4_root_dentry_operations;
@@ -844,7 +847,7 @@ static int autofs4_dir_mkdir(struct inode *dir, struct dentry *dentry, int mode)
return -ENOSPC;
inode = autofs4_get_inode(dir->i_sb, ino);
- d_instantiate(dentry, inode);
+ d_add(dentry, inode);
if (dir == dir->i_sb->s_root->d_inode)
dentry->d_op = &autofs4_root_dentry_operations;
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
index a5c5171..a451418 100644
--- a/fs/befs/linuxvfs.c
+++ b/fs/befs/linuxvfs.c
@@ -414,7 +414,7 @@ befs_read_inode(struct inode *inode)
}
/* Initialize the inode cache. Called at fs setup.
- *
+ *
* Taken from NFS implementation by Al Viro.
*/
static int
@@ -424,7 +424,7 @@ befs_init_inodecache(void)
sizeof (struct befs_inode_info),
0, (SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD),
- init_once, NULL);
+ init_once);
if (befs_inode_cachep == NULL) {
printk(KERN_ERR "befs_init_inodecache: "
"Couldn't initalize inode slabcache\n");
diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c
index 58c7bd9..f346eb1 100644
--- a/fs/bfs/inode.c
+++ b/fs/bfs/inode.c
@@ -250,14 +250,14 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
inode_init_once(&bi->vfs_inode);
}
-
+
static int init_inodecache(void)
{
bfs_inode_cachep = kmem_cache_create("bfs_inode_cache",
sizeof(struct bfs_inode_info),
0, (SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD),
- init_once, NULL);
+ init_once);
if (bfs_inode_cachep == NULL)
return -ENOMEM;
return 0;
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index a27e42b..b1013f3 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -45,7 +45,7 @@
static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs);
static int load_elf_library(struct file *);
-static unsigned long elf_map (struct file *, unsigned long, struct elf_phdr *, int, int, unsigned long);
+static unsigned long elf_map (struct file *, unsigned long, struct elf_phdr *, int, int);
/*
* If we don't support core dumping, then supply a NULL so we
@@ -80,7 +80,7 @@ static struct linux_binfmt elf_format = {
.hasvdso = 1
};
-#define BAD_ADDR(x) IS_ERR_VALUE(x)
+#define BAD_ADDR(x) ((unsigned long)(x) >= TASK_SIZE)
static int set_brk(unsigned long start, unsigned long end)
{
@@ -148,6 +148,7 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
elf_addr_t *elf_info;
int ei_index = 0;
struct task_struct *tsk = current;
+ struct vm_area_struct *vma;
/*
* If this architecture has a platform capability string, copy it
@@ -234,6 +235,15 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
sp = (elf_addr_t __user *)bprm->p;
#endif
+
+ /*
+ * Grow the stack manually; some architectures have a limit on how
+ * far ahead a user-space access may be in order to grow the stack.
+ */
+ vma = find_extend_vma(current->mm, bprm->p);
+ if (!vma)
+ return -EFAULT;
+
/* Now, let's put argc (and argv, envp if appropriate) on the stack */
if (__put_user(argc, sp++))
return -EFAULT;
@@ -254,8 +264,8 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
size_t len;
if (__put_user((elf_addr_t)p, argv++))
return -EFAULT;
- len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES);
- if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
+ len = strnlen_user((void __user *)p, MAX_ARG_STRLEN);
+ if (!len || len > MAX_ARG_STRLEN)
return 0;
p += len;
}
@@ -266,8 +276,8 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
size_t len;
if (__put_user((elf_addr_t)p, envp++))
return -EFAULT;
- len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES);
- if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
+ len = strnlen_user((void __user *)p, MAX_ARG_STRLEN);
+ if (!len || len > MAX_ARG_STRLEN)
return 0;
p += len;
}
@@ -285,70 +295,33 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
#ifndef elf_map
static unsigned long elf_map(struct file *filep, unsigned long addr,
- struct elf_phdr *eppnt, int prot, int type,
- unsigned long total_size)
+ struct elf_phdr *eppnt, int prot, int type)
{
unsigned long map_addr;
- unsigned long size = eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr);
- unsigned long off = eppnt->p_offset - ELF_PAGEOFFSET(eppnt->p_vaddr);
- addr = ELF_PAGESTART(addr);
- size = ELF_PAGEALIGN(size);
+ unsigned long pageoffset = ELF_PAGEOFFSET(eppnt->p_vaddr);
+ down_write(&current->mm->mmap_sem);
/* mmap() will return -EINVAL if given a zero size, but a
* segment with zero filesize is perfectly valid */
- if (!size)
- return addr;
-
- down_write(&current->mm->mmap_sem);
- /*
- * total_size is the size of the ELF (interpreter) image.
- * The _first_ mmap needs to know the full size, otherwise
- * randomization might put this image into an overlapping
- * position with the ELF binary image. (since size < total_size)
- * So we first map the 'big' image - and unmap the remainder at
- * the end. (which unmap is needed for ELF images with holes.)
- */
- if (total_size) {
- total_size = ELF_PAGEALIGN(total_size);
- map_addr = do_mmap(filep, addr, total_size, prot, type, off);
- if (!BAD_ADDR(map_addr))
- do_munmap(current->mm, map_addr+size, total_size-size);
- } else
- map_addr = do_mmap(filep, addr, size, prot, type, off);
-
+ if (eppnt->p_filesz + pageoffset)
+ map_addr = do_mmap(filep, ELF_PAGESTART(addr),
+ eppnt->p_filesz + pageoffset, prot, type,
+ eppnt->p_offset - pageoffset);
+ else
+ map_addr = ELF_PAGESTART(addr);
up_write(&current->mm->mmap_sem);
return(map_addr);
}
#endif /* !elf_map */
-static unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
-{
- int i, first_idx = -1, last_idx = -1;
-
- for (i = 0; i < nr; i++) {
- if (cmds[i].p_type == PT_LOAD) {
- last_idx = i;
- if (first_idx == -1)
- first_idx = i;
- }
- }
- if (first_idx == -1)
- return 0;
-
- return cmds[last_idx].p_vaddr + cmds[last_idx].p_memsz -
- ELF_PAGESTART(cmds[first_idx].p_vaddr);
-}
-
-
/* This is much more generalized than the library routine read function,
so we keep this separate. Technically the library read function
is only provided so that we can read a.out libraries that have
an ELF header */
static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
- struct file *interpreter, unsigned long *interp_map_addr,
- unsigned long no_base)
+ struct file *interpreter, unsigned long *interp_load_addr)
{
struct elf_phdr *elf_phdata;
struct elf_phdr *eppnt;
@@ -356,7 +329,6 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
int load_addr_set = 0;
unsigned long last_bss = 0, elf_bss = 0;
unsigned long error = ~0UL;
- unsigned long total_size;
int retval, i, size;
/* First of all, some simple consistency checks */
@@ -395,12 +367,6 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
goto out_close;
}
- total_size = total_mapping_size(elf_phdata, interp_elf_ex->e_phnum);
- if (!total_size) {
- error = -EINVAL;
- goto out_close;
- }
-
eppnt = elf_phdata;
for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
if (eppnt->p_type == PT_LOAD) {
@@ -418,14 +384,9 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
vaddr = eppnt->p_vaddr;
if (interp_elf_ex->e_type == ET_EXEC || load_addr_set)
elf_type |= MAP_FIXED;
- else if (no_base && interp_elf_ex->e_type == ET_DYN)
- load_addr = -vaddr;
map_addr = elf_map(interpreter, load_addr + vaddr,
- eppnt, elf_prot, elf_type, total_size);
- total_size = 0;
- if (!*interp_map_addr)
- *interp_map_addr = map_addr;
+ eppnt, elf_prot, elf_type);
error = map_addr;
if (BAD_ADDR(map_addr))
goto out_close;
@@ -491,7 +452,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
goto out_close;
}
- error = load_addr;
+ *interp_load_addr = load_addr;
+ error = ((unsigned long)interp_elf_ex->e_entry) + load_addr;
out_close:
kfree(elf_phdata);
@@ -588,8 +550,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
int elf_exec_fileno;
int retval, i;
unsigned int size;
- unsigned long elf_entry;
- unsigned long interp_load_addr = 0;
+ unsigned long elf_entry, interp_load_addr = 0;
unsigned long start_code, end_code, start_data, end_data;
unsigned long reloc_func_desc = 0;
char passed_fileno[6];
@@ -826,10 +787,6 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
}
/* OK, This is the point of no return */
- current->mm->start_data = 0;
- current->mm->end_data = 0;
- current->mm->end_code = 0;
- current->mm->mmap = NULL;
current->flags &= ~PF_FORKNOEXEC;
current->mm->def_flags = def_flags;
@@ -857,7 +814,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
current->mm->start_stack = bprm->p;
/* Now we do a little grungy work by mmaping the ELF image into
- the correct location in memory. */
+ the correct location in memory. At this point, we assume that
+ the image should be loaded at fixed address, not at a variable
+ address. */
for(i = 0, elf_ppnt = elf_phdata;
i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
int elf_prot = 0, elf_flags;
@@ -911,15 +870,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
* default mmap base, as well as whatever program they
* might try to exec. This is because the brk will
* follow the loader, and is not movable. */
-#ifdef CONFIG_X86
- load_bias = 0;
-#else
load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
-#endif
}
error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
- elf_prot, elf_flags,0);
+ elf_prot, elf_flags);
if (BAD_ADDR(error)) {
send_sig(SIGKILL, current, 0);
retval = IS_ERR((void *)error) ?
@@ -995,25 +950,13 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
}
if (elf_interpreter) {
- if (interpreter_type == INTERPRETER_AOUT) {
+ if (interpreter_type == INTERPRETER_AOUT)
elf_entry = load_aout_interp(&loc->interp_ex,
interpreter);
- } else {
- unsigned long uninitialized_var(interp_map_addr);
-
+ else
elf_entry = load_elf_interp(&loc->interp_elf_ex,
interpreter,
- &interp_map_addr,
- load_bias);
- if (!BAD_ADDR(elf_entry)) {
- /*
- * load_elf_interp() returns relocation
- * adjustment
- */
- interp_load_addr = elf_entry;
- elf_entry += loc->interp_elf_ex.e_entry;
- }
- }
+ &interp_load_addr);
if (BAD_ADDR(elf_entry)) {
force_sig(SIGSEGV, current);
retval = IS_ERR((void *)elf_entry) ?
@@ -1051,9 +994,13 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
compute_creds(bprm);
current->flags &= ~PF_FORKNOEXEC;
- create_elf_tables(bprm, &loc->elf_ex,
+ retval = create_elf_tables(bprm, &loc->elf_ex,
(interpreter_type == INTERPRETER_AOUT),
load_addr, interp_load_addr);
+ if (retval < 0) {
+ send_sig(SIGKILL, current, 0);
+ goto out;
+ }
/* N.B. passed_fileno might not be initialized? */
if (interpreter_type == INTERPRETER_AOUT)
current->mm->arg_start += strlen(passed_fileno) + 1;
@@ -1252,7 +1199,7 @@ static int dump_seek(struct file *file, loff_t off)
*
* I think we should skip something. But I am not sure how. H.J.
*/
-static int maydump(struct vm_area_struct *vma)
+static int maydump(struct vm_area_struct *vma, unsigned long mm_flags)
{
/* The vma can be set up to tell us the answer directly. */
if (vma->vm_flags & VM_ALWAYSDUMP)
@@ -1262,15 +1209,19 @@ static int maydump(struct vm_area_struct *vma)
if (vma->vm_flags & (VM_IO | VM_RESERVED))
return 0;
- /* Dump shared memory only if mapped from an anonymous file. */
- if (vma->vm_flags & VM_SHARED)
- return vma->vm_file->f_path.dentry->d_inode->i_nlink == 0;
+ /* By default, dump shared memory if mapped from an anonymous file. */
+ if (vma->vm_flags & VM_SHARED) {
+ if (vma->vm_file->f_path.dentry->d_inode->i_nlink == 0)
+ return test_bit(MMF_DUMP_ANON_SHARED, &mm_flags);
+ else
+ return test_bit(MMF_DUMP_MAPPED_SHARED, &mm_flags);
+ }
- /* If it hasn't been written to, don't write it out */
+ /* By default, if it hasn't been written to, don't write it out. */
if (!vma->anon_vma)
- return 0;
+ return test_bit(MMF_DUMP_MAPPED_PRIVATE, &mm_flags);
- return 1;
+ return test_bit(MMF_DUMP_ANON_PRIVATE, &mm_flags);
}
/* An ELF note in memory */
@@ -1562,9 +1513,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file)
#endif
int thread_status_size = 0;
elf_addr_t *auxv;
-#ifdef ELF_CORE_WRITE_EXTRA_NOTES
- int extra_notes_size;
-#endif
+ unsigned long mm_flags;
/*
* We no longer stop all VM operations.
@@ -1693,10 +1642,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file)
sz += thread_status_size;
-#ifdef ELF_CORE_WRITE_EXTRA_NOTES
- extra_notes_size = ELF_CORE_EXTRA_NOTES_SIZE;
- sz += extra_notes_size;
-#endif
+ sz += elf_coredump_extra_notes_size();
fill_elf_note_phdr(&phdr, sz, offset);
offset += sz;
@@ -1705,6 +1651,13 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file)
dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
+ /*
+ * We must use the same mm->flags while dumping core to avoid
+ * inconsistency between the program headers and bodies, otherwise an
+ * unusable core file can be generated.
+ */
+ mm_flags = current->mm->flags;
+
/* Write program headers for segments dump */
for (vma = first_vma(current, gate_vma); vma != NULL;
vma = next_vma(vma, gate_vma)) {
@@ -1717,7 +1670,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file)
phdr.p_offset = offset;
phdr.p_vaddr = vma->vm_start;
phdr.p_paddr = 0;
- phdr.p_filesz = maydump(vma) ? sz : 0;
+ phdr.p_filesz = maydump(vma, mm_flags) ? sz : 0;
phdr.p_memsz = sz;
offset += phdr.p_filesz;
phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
@@ -1739,10 +1692,8 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file)
if (!writenote(notes + i, file, &foffset))
goto end_coredump;
-#ifdef ELF_CORE_WRITE_EXTRA_NOTES
- ELF_CORE_WRITE_EXTRA_NOTES;
- foffset += extra_notes_size;
-#endif
+ if (elf_coredump_extra_notes_write(file, &foffset))
+ goto end_coredump;
/* write out the thread status notes section */
list_for_each(t, &thread_list) {
@@ -1761,7 +1712,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file)
vma = next_vma(vma, gate_vma)) {
unsigned long addr;
- if (!maydump(vma))
+ if (!maydump(vma, mm_flags))
continue;
for (addr = vma->vm_start;
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
index 9d62fbad..2f5d8db 100644
--- a/fs/binfmt_elf_fdpic.c
+++ b/fs/binfmt_elf_fdpic.c
@@ -621,8 +621,8 @@ static int create_elf_fdpic_tables(struct linux_binprm *bprm,
p = (char __user *) current->mm->arg_start;
for (loop = bprm->argc; loop > 0; loop--) {
__put_user((elf_caddr_t) p, argv++);
- len = strnlen_user(p, PAGE_SIZE * MAX_ARG_PAGES);
- if (!len || len > PAGE_SIZE * MAX_ARG_PAGES)
+ len = strnlen_user(p, MAX_ARG_STRLEN);
+ if (!len || len > MAX_ARG_STRLEN)
return -EINVAL;
p += len;
}
@@ -633,8 +633,8 @@ static int create_elf_fdpic_tables(struct linux_binprm *bprm,
current->mm->env_start = (unsigned long) p;
for (loop = bprm->envc; loop > 0; loop--) {
__put_user((elf_caddr_t)(unsigned long) p, envp++);
- len = strnlen_user(p, PAGE_SIZE * MAX_ARG_PAGES);
- if (!len || len > PAGE_SIZE * MAX_ARG_PAGES)
+ len = strnlen_user(p, MAX_ARG_STRLEN);
+ if (!len || len > MAX_ARG_STRLEN)
return -EINVAL;
p += len;
}
@@ -1181,8 +1181,10 @@ static int dump_seek(struct file *file, loff_t off)
*
* I think we should skip something. But I am not sure how. H.J.
*/
-static int maydump(struct vm_area_struct *vma)
+static int maydump(struct vm_area_struct *vma, unsigned long mm_flags)
{
+ int dump_ok;
+
/* Do not dump I/O mapped devices or special mappings */
if (vma->vm_flags & (VM_IO | VM_RESERVED)) {
kdcore("%08lx: %08lx: no (IO)", vma->vm_start, vma->vm_flags);
@@ -1197,27 +1199,35 @@ static int maydump(struct vm_area_struct *vma)
return 0;
}
- /* Dump shared memory only if mapped from an anonymous file. */
+ /* By default, dump shared memory if mapped from an anonymous file. */
if (vma->vm_flags & VM_SHARED) {
if (vma->vm_file->f_path.dentry->d_inode->i_nlink == 0) {
- kdcore("%08lx: %08lx: no (share)", vma->vm_start, vma->vm_flags);
- return 1;
+ dump_ok = test_bit(MMF_DUMP_ANON_SHARED, &mm_flags);
+ kdcore("%08lx: %08lx: %s (share)", vma->vm_start,
+ vma->vm_flags, dump_ok ? "yes" : "no");
+ return dump_ok;
}
- kdcore("%08lx: %08lx: no (share)", vma->vm_start, vma->vm_flags);
- return 0;
+ dump_ok = test_bit(MMF_DUMP_MAPPED_SHARED, &mm_flags);
+ kdcore("%08lx: %08lx: %s (share)", vma->vm_start,
+ vma->vm_flags, dump_ok ? "yes" : "no");
+ return dump_ok;
}
#ifdef CONFIG_MMU
- /* If it hasn't been written to, don't write it out */
+ /* By default, if it hasn't been written to, don't write it out */
if (!vma->anon_vma) {
- kdcore("%08lx: %08lx: no (!anon)", vma->vm_start, vma->vm_flags);
- return 0;
+ dump_ok = test_bit(MMF_DUMP_MAPPED_PRIVATE, &mm_flags);
+ kdcore("%08lx: %08lx: %s (!anon)", vma->vm_start,
+ vma->vm_flags, dump_ok ? "yes" : "no");
+ return dump_ok;
}
#endif
- kdcore("%08lx: %08lx: yes", vma->vm_start, vma->vm_flags);
- return 1;
+ dump_ok = test_bit(MMF_DUMP_ANON_PRIVATE, &mm_flags);
+ kdcore("%08lx: %08lx: %s", vma->vm_start, vma->vm_flags,
+ dump_ok ? "yes" : "no");
+ return dump_ok;
}
/* An ELF note in memory */
@@ -1456,15 +1466,15 @@ static int elf_dump_thread_status(long signr, struct elf_thread_status *t)
* dump the segments for an MMU process
*/
#ifdef CONFIG_MMU
-static int elf_fdpic_dump_segments(struct file *file, struct mm_struct *mm,
- size_t *size, unsigned long *limit)
+static int elf_fdpic_dump_segments(struct file *file, size_t *size,
+ unsigned long *limit, unsigned long mm_flags)
{
struct vm_area_struct *vma;
for (vma = current->mm->mmap; vma; vma = vma->vm_next) {
unsigned long addr;
- if (!maydump(vma))
+ if (!maydump(vma, mm_flags))
continue;
for (addr = vma->vm_start;
@@ -1511,15 +1521,15 @@ end_coredump:
* dump the segments for a NOMMU process
*/
#ifndef CONFIG_MMU
-static int elf_fdpic_dump_segments(struct file *file, struct mm_struct *mm,
- size_t *size, unsigned long *limit)
+static int elf_fdpic_dump_segments(struct file *file, size_t *size,
+ unsigned long *limit, unsigned long mm_flags)
{
struct vm_list_struct *vml;
for (vml = current->mm->context.vmlist; vml; vml = vml->next) {
struct vm_area_struct *vma = vml->vma;
- if (!maydump(vma))
+ if (!maydump(vma, mm_flags))
continue;
if ((*size += PAGE_SIZE) > *limit)
@@ -1570,6 +1580,7 @@ static int elf_fdpic_core_dump(long signr, struct pt_regs *regs,
struct vm_list_struct *vml;
#endif
elf_addr_t *auxv;
+ unsigned long mm_flags;
/*
* We no longer stop all VM operations.
@@ -1707,6 +1718,13 @@ static int elf_fdpic_core_dump(long signr, struct pt_regs *regs,
/* Page-align dumped data */
dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
+ /*
+ * We must use the same mm->flags while dumping core to avoid
+ * inconsistency between the program headers and bodies, otherwise an
+ * unusable core file can be generated.
+ */
+ mm_flags = current->mm->flags;
+
/* write program headers for segments dump */
for (
#ifdef CONFIG_MMU
@@ -1728,7 +1746,7 @@ static int elf_fdpic_core_dump(long signr, struct pt_regs *regs,
phdr.p_offset = offset;
phdr.p_vaddr = vma->vm_start;
phdr.p_paddr = 0;
- phdr.p_filesz = maydump(vma) ? sz : 0;
+ phdr.p_filesz = maydump(vma, mm_flags) ? sz : 0;
phdr.p_memsz = sz;
offset += phdr.p_filesz;
phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
@@ -1762,7 +1780,7 @@ static int elf_fdpic_core_dump(long signr, struct pt_regs *regs,
DUMP_SEEK(dataoff);
- if (elf_fdpic_dump_segments(file, current->mm, &size, &limit) < 0)
+ if (elf_fdpic_dump_segments(file, &size, &limit, mm_flags) < 0)
goto end_coredump;
#ifdef ELF_CORE_WRITE_EXTRA_DATA
diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
index 861141b..fcb3405 100644
--- a/fs/binfmt_flat.c
+++ b/fs/binfmt_flat.c
@@ -742,6 +742,7 @@ static int load_flat_file(struct linux_binprm * bprm,
* __start to address 4 so that is okay).
*/
if (rev > OLD_FLAT_VERSION) {
+ unsigned long persistent = 0;
for (i=0; i < relocs; i++) {
unsigned long addr, relval;
@@ -749,6 +750,8 @@ static int load_flat_file(struct linux_binprm * bprm,
relocated (of course, the address has to be
relocated first). */
relval = ntohl(reloc[i]);
+ if (flat_set_persistent (relval, &persistent))
+ continue;
addr = flat_get_relocate_addr(relval);
rp = (unsigned long *) calc_reloc(addr, libinfo, id, 1);
if (rp == (unsigned long *)RELOC_FAILED) {
@@ -757,7 +760,8 @@ static int load_flat_file(struct linux_binprm * bprm,
}
/* Get the pointer's value. */
- addr = flat_get_addr_from_rp(rp, relval, flags);
+ addr = flat_get_addr_from_rp(rp, relval, flags,
+ &persistent);
if (addr != 0) {
/*
* Do the relocation. PIC relocs in the data section are
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
index 330fd3f..42e94b3 100644
--- a/fs/binfmt_misc.c
+++ b/fs/binfmt_misc.c
@@ -126,7 +126,9 @@ static int load_misc_binary(struct linux_binprm *bprm, struct pt_regs *regs)
goto _ret;
if (!(fmt->flags & MISC_FMT_PRESERVE_ARGV0)) {
- remove_arg_zero(bprm);
+ retval = remove_arg_zero(bprm);
+ if (retval)
+ goto _ret;
}
if (fmt->flags & MISC_FMT_OPEN_BINARY) {
diff --git a/fs/binfmt_script.c b/fs/binfmt_script.c
index 304c885..4d0e0f6 100644
--- a/fs/binfmt_script.c
+++ b/fs/binfmt_script.c
@@ -67,7 +67,9 @@ static int load_script(struct linux_binprm *bprm,struct pt_regs *regs)
* This is done in reverse order, because of how the
* user environment and arguments are stored.
*/
- remove_arg_zero(bprm);
+ retval = remove_arg_zero(bprm);
+ if (retval)
+ return retval;
retval = copy_strings_kernel(1, &bprm->interp, bprm);
if (retval < 0) return retval;
bprm->argc++;
diff --git a/fs/bio.c b/fs/bio.c
index 33e4634..5f604f2 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -230,7 +230,7 @@ void bio_put(struct bio *bio)
}
}
-inline int bio_phys_segments(request_queue_t *q, struct bio *bio)
+inline int bio_phys_segments(struct request_queue *q, struct bio *bio)
{
if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
blk_recount_segments(q, bio);
@@ -238,7 +238,7 @@ inline int bio_phys_segments(request_queue_t *q, struct bio *bio)
return bio->bi_phys_segments;
}
-inline int bio_hw_segments(request_queue_t *q, struct bio *bio)
+inline int bio_hw_segments(struct request_queue *q, struct bio *bio)
{
if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
blk_recount_segments(q, bio);
@@ -257,7 +257,7 @@ inline int bio_hw_segments(request_queue_t *q, struct bio *bio)
*/
void __bio_clone(struct bio *bio, struct bio *bio_src)
{
- request_queue_t *q = bdev_get_queue(bio_src->bi_bdev);
+ struct request_queue *q = bdev_get_queue(bio_src->bi_bdev);
memcpy(bio->bi_io_vec, bio_src->bi_io_vec,
bio_src->bi_max_vecs * sizeof(struct bio_vec));
@@ -303,7 +303,7 @@ struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask)
*/
int bio_get_nr_vecs(struct block_device *bdev)
{
- request_queue_t *q = bdev_get_queue(bdev);
+ struct request_queue *q = bdev_get_queue(bdev);
int nr_pages;
nr_pages = ((q->max_sectors << 9) + PAGE_SIZE - 1) >> PAGE_SHIFT;
@@ -315,7 +315,7 @@ int bio_get_nr_vecs(struct block_device *bdev)
return nr_pages;
}
-static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page
+static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
*page, unsigned int len, unsigned int offset,
unsigned short max_sectors)
{
@@ -425,7 +425,7 @@ static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page
* smaller than PAGE_SIZE, so it is always possible to add a single
* page to an empty bio. This should only be used by REQ_PC bios.
*/
-int bio_add_pc_page(request_queue_t *q, struct bio *bio, struct page *page,
+int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page *page,
unsigned int len, unsigned int offset)
{
return __bio_add_page(q, bio, page, len, offset, q->max_hw_sectors);
@@ -523,7 +523,7 @@ int bio_uncopy_user(struct bio *bio)
* to/from kernel pages as necessary. Must be paired with
* call bio_uncopy_user() on io completion.
*/
-struct bio *bio_copy_user(request_queue_t *q, unsigned long uaddr,
+struct bio *bio_copy_user(struct request_queue *q, unsigned long uaddr,
unsigned int len, int write_to_vm)
{
unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
@@ -600,7 +600,7 @@ out_bmd:
return ERR_PTR(ret);
}
-static struct bio *__bio_map_user_iov(request_queue_t *q,
+static struct bio *__bio_map_user_iov(struct request_queue *q,
struct block_device *bdev,
struct sg_iovec *iov, int iov_count,
int write_to_vm)
@@ -712,7 +712,7 @@ static struct bio *__bio_map_user_iov(request_queue_t *q,
/**
* bio_map_user - map user address into bio
- * @q: the request_queue_t for the bio
+ * @q: the struct request_queue for the bio
* @bdev: destination block device
* @uaddr: start of user address
* @len: length in bytes
@@ -721,7 +721,7 @@ static struct bio *__bio_map_user_iov(request_queue_t *q,
* Map the user space address into a bio suitable for io to a block
* device. Returns an error pointer in case of error.
*/
-struct bio *bio_map_user(request_queue_t *q, struct block_device *bdev,
+struct bio *bio_map_user(struct request_queue *q, struct block_device *bdev,
unsigned long uaddr, unsigned int len, int write_to_vm)
{
struct sg_iovec iov;
@@ -734,7 +734,7 @@ struct bio *bio_map_user(request_queue_t *q, struct block_device *bdev,
/**
* bio_map_user_iov - map user sg_iovec table into bio
- * @q: the request_queue_t for the bio
+ * @q: the struct request_queue for the bio
* @bdev: destination block device
* @iov: the iovec.
* @iov_count: number of elements in the iovec
@@ -743,7 +743,7 @@ struct bio *bio_map_user(request_queue_t *q, struct block_device *bdev,
* Map the user space address into a bio suitable for io to a block
* device. Returns an error pointer in case of error.
*/
-struct bio *bio_map_user_iov(request_queue_t *q, struct block_device *bdev,
+struct bio *bio_map_user_iov(struct request_queue *q, struct block_device *bdev,
struct sg_iovec *iov, int iov_count,
int write_to_vm)
{
@@ -798,17 +798,13 @@ void bio_unmap_user(struct bio *bio)
bio_put(bio);
}
-static int bio_map_kern_endio(struct bio *bio, unsigned int bytes_done, int err)
+static void bio_map_kern_endio(struct bio *bio, int err)
{
- if (bio->bi_size)
- return 1;
-
bio_put(bio);
- return 0;
}
-static struct bio *__bio_map_kern(request_queue_t *q, void *data,
+static struct bio *__bio_map_kern(struct request_queue *q, void *data,
unsigned int len, gfp_t gfp_mask)
{
unsigned long kaddr = (unsigned long)data;
@@ -847,7 +843,7 @@ static struct bio *__bio_map_kern(request_queue_t *q, void *data,
/**
* bio_map_kern - map kernel address into bio
- * @q: the request_queue_t for the bio
+ * @q: the struct request_queue for the bio
* @data: pointer to buffer to map
* @len: length in bytes
* @gfp_mask: allocation flags for bio allocation
@@ -855,7 +851,7 @@ static struct bio *__bio_map_kern(request_queue_t *q, void *data,
* Map the kernel address into a bio suitable for io to a block
* device. Returns an error pointer in case of error.
*/
-struct bio *bio_map_kern(request_queue_t *q, void *data, unsigned int len,
+struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len,
gfp_t gfp_mask)
{
struct bio *bio;
@@ -1002,34 +998,26 @@ void bio_check_pages_dirty(struct bio *bio)
/**
* bio_endio - end I/O on a bio
* @bio: bio
- * @bytes_done: number of bytes completed
* @error: error, if any
*
* Description:
- * bio_endio() will end I/O on @bytes_done number of bytes. This may be
- * just a partial part of the bio, or it may be the whole bio. bio_endio()
- * is the preferred way to end I/O on a bio, it takes care of decrementing
- * bi_size and clearing BIO_UPTODATE on error. @error is 0 on success, and
- * and one of the established -Exxxx (-EIO, for instance) error values in
- * case something went wrong. Noone should call bi_end_io() directly on
- * a bio unless they own it and thus know that it has an end_io function.
+ * bio_endio() will end I/O on the whole bio. bio_endio() is the
+ * preferred way to end I/O on a bio, it takes care of clearing
+ * BIO_UPTODATE on error. @error is 0 on success, and and one of the
+ * established -Exxxx (-EIO, for instance) error values in case
+ * something went wrong. Noone should call bi_end_io() directly on a
+ * bio unless they own it and thus know that it has an end_io
+ * function.
**/
-void bio_endio(struct bio *bio, unsigned int bytes_done, int error)
+void bio_endio(struct bio *bio, int error)
{
if (error)
clear_bit(BIO_UPTODATE, &bio->bi_flags);
-
- if (unlikely(bytes_done > bio->bi_size)) {
- printk("%s: want %u bytes done, only %u left\n", __FUNCTION__,
- bytes_done, bio->bi_size);
- bytes_done = bio->bi_size;
- }
-
- bio->bi_size -= bytes_done;
- bio->bi_sector += (bytes_done >> 9);
+ else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
+ error = -EIO;
if (bio->bi_end_io)
- bio->bi_end_io(bio, bytes_done, error);
+ bio->bi_end_io(bio, error);
}
void bio_pair_release(struct bio_pair *bp)
@@ -1037,37 +1025,29 @@ void bio_pair_release(struct bio_pair *bp)
if (atomic_dec_and_test(&bp->cnt)) {
struct bio *master = bp->bio1.bi_private;
- bio_endio(master, master->bi_size, bp->error);
+ bio_endio(master, bp->error);
mempool_free(bp, bp->bio2.bi_private);
}
}
-static int bio_pair_end_1(struct bio * bi, unsigned int done, int err)
+static void bio_pair_end_1(struct bio *bi, int err)
{
struct bio_pair *bp = container_of(bi, struct bio_pair, bio1);
if (err)
bp->error = err;
- if (bi->bi_size)
- return 1;
-
bio_pair_release(bp);
- return 0;
}
-static int bio_pair_end_2(struct bio * bi, unsigned int done, int err)
+static void bio_pair_end_2(struct bio *bi, int err)
{
struct bio_pair *bp = container_of(bi, struct bio_pair, bio2);
if (err)
bp->error = err;
- if (bi->bi_size)
- return 1;
-
bio_pair_release(bp);
- return 0;
}
/*
@@ -1187,7 +1167,7 @@ static void __init biovec_init_slabs(void)
size = bvs->nr_vecs * sizeof(struct bio_vec);
bvs->slab = kmem_cache_create(bvs->name, size, 0,
- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
+ SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
}
}
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 3635315e..6339a30 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -172,7 +172,7 @@ blkdev_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
}
#if 0
-static int blk_end_aio(struct bio *bio, unsigned int bytes_done, int error)
+static void blk_end_aio(struct bio *bio, int error)
{
struct kiocb *iocb = bio->bi_private;
atomic_t *bio_count = &iocb->ki_bio_count;
@@ -517,7 +517,7 @@ void __init bdev_cache_init(void)
bdev_cachep = kmem_cache_create("bdev_cache", sizeof(struct bdev_inode),
0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD|SLAB_PANIC),
- init_once, NULL);
+ init_once);
err = register_filesystem(&bd_type);
if (err)
panic("Cannot register bdev pseudo-fs");
diff --git a/fs/buffer.c b/fs/buffer.c
index 0f90067..75b51df 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -2194,6 +2194,52 @@ int generic_commit_write(struct file *file, struct page *page,
return 0;
}
+/*
+ * block_page_mkwrite() is not allowed to change the file size as it gets
+ * called from a page fault handler when a page is first dirtied. Hence we must
+ * be careful to check for EOF conditions here. We set the page up correctly
+ * for a written page which means we get ENOSPC checking when writing into
+ * holes and correct delalloc and unwritten extent mapping on filesystems that
+ * support these features.
+ *
+ * We are not allowed to take the i_mutex here so we have to play games to
+ * protect against truncate races as the page could now be beyond EOF. Because
+ * vmtruncate() writes the inode size before removing pages, once we have the
+ * page lock we can determine safely if the page is beyond EOF. If it is not
+ * beyond EOF, then the page is guaranteed safe against truncation until we
+ * unlock the page.
+ */
+int
+block_page_mkwrite(struct vm_area_struct *vma, struct page *page,
+ get_block_t get_block)
+{
+ struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
+ unsigned long end;
+ loff_t size;
+ int ret = -EINVAL;
+
+ lock_page(page);
+ size = i_size_read(inode);
+ if ((page->mapping != inode->i_mapping) ||
+ (page_offset(page) > size)) {
+ /* page got truncated out from underneath us */
+ goto out_unlock;
+ }
+
+ /* page is wholly or partially inside EOF */
+ if (((page->index + 1) << PAGE_CACHE_SHIFT) > size)
+ end = size & ~PAGE_CACHE_MASK;
+ else
+ end = PAGE_CACHE_SIZE;
+
+ ret = block_prepare_write(page, 0, end, get_block);
+ if (!ret)
+ ret = block_commit_write(page, 0, end);
+
+out_unlock:
+ unlock_page(page);
+ return ret;
+}
/*
* nobh_prepare_write()'s prereads are special: the buffer_heads are freed
@@ -2588,13 +2634,10 @@ sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
return tmp.b_blocknr;
}
-static int end_bio_bh_io_sync(struct bio *bio, unsigned int bytes_done, int err)
+static void end_bio_bh_io_sync(struct bio *bio, int err)
{
struct buffer_head *bh = bio->bi_private;
- if (bio->bi_size)
- return 1;
-
if (err == -EOPNOTSUPP) {
set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
set_bit(BH_Eopnotsupp, &bh->b_state);
@@ -2602,7 +2645,6 @@ static int end_bio_bh_io_sync(struct bio *bio, unsigned int bytes_done, int err)
bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
bio_put(bio);
- return 0;
}
int submit_bh(int rw, struct buffer_head * bh)
@@ -2977,6 +3019,7 @@ EXPORT_SYMBOL(__brelse);
EXPORT_SYMBOL(__wait_on_buffer);
EXPORT_SYMBOL(block_commit_write);
EXPORT_SYMBOL(block_prepare_write);
+EXPORT_SYMBOL(block_page_mkwrite);
EXPORT_SYMBOL(block_read_full_page);
EXPORT_SYMBOL(block_sync_page);
EXPORT_SYMBOL(block_truncate_page);
diff --git a/fs/char_dev.c b/fs/char_dev.c
index 164a45c..bbbf07b 100644
--- a/fs/char_dev.c
+++ b/fs/char_dev.c
@@ -321,14 +321,13 @@ void unregister_chrdev_region(dev_t from, unsigned count)
}
}
-int unregister_chrdev(unsigned int major, const char *name)
+void unregister_chrdev(unsigned int major, const char *name)
{
struct char_device_struct *cd;
cd = __unregister_chrdev_region(major, 0, 256);
if (cd && cd->cdev)
cdev_del(cd->cdev);
kfree(cd);
- return 0;
}
static DEFINE_SPINLOCK(cdev_lock);
diff --git a/fs/cifs/CHANGES b/fs/cifs/CHANGES
index a9b6bc5..bed6215 100644
--- a/fs/cifs/CHANGES
+++ b/fs/cifs/CHANGES
@@ -1,3 +1,13 @@
+Version 1.50
+------------
+Fix NTLMv2 signing. NFS server mounted over cifs works (if cifs mount is
+done with "serverino" mount option). Add support for POSIX Unlink
+(helps with certain sharing violation cases when server such as
+Samba supports newer POSIX CIFS Protocol Extensions). Add "nounix"
+mount option to allow disabling the CIFS Unix Extensions for just
+that mount. Fix hang on spinlock in find_writable_file (race when
+reopening file after session crash).
+
Version 1.49
------------
IPv6 support. Enable ipv6 addresses to be passed on mount (put the ipv6
@@ -8,7 +18,11 @@ when Unix Extensions were ignored). This allows users to override the
default uid and gid for files when they are certain that the uids or
gids on the server do not match those of the client. Make "sec=none"
mount override username (so that null user connection is attempted)
-to match what documentation said.
+to match what documentation said. Support for very large reads, over 127K,
+available to some newer servers (such as Samba 3.0.26 and later but
+note that it also requires setting CIFSMaxBufSize at module install
+time to a larger value which may hurt performance in some cases).
+Make sign option force signing (or fail if server does not support it).
Version 1.48
------------
diff --git a/fs/cifs/README b/fs/cifs/README
index 4d01697..b806b11 100644
--- a/fs/cifs/README
+++ b/fs/cifs/README
@@ -301,10 +301,21 @@ A partial list of the supported mount options follows:
during the local client kernel build will be used.
If server does not support Unicode, this parameter is
unused.
- rsize default read size (usually 16K)
- wsize default write size (usually 16K, 32K is often better over GigE)
- maximum wsize currently allowed by CIFS is 57344 (14 4096 byte
- pages)
+ rsize default read size (usually 16K). The client currently
+ can not use rsize larger than CIFSMaxBufSize. CIFSMaxBufSize
+ defaults to 16K and may be changed (from 8K to the maximum
+ kmalloc size allowed by your kernel) at module install time
+ for cifs.ko. Setting CIFSMaxBufSize to a very large value
+ will cause cifs to use more memory and may reduce performance
+ in some cases. To use rsize greater than 127K (the original
+ cifs protocol maximum) also requires that the server support
+ a new Unix Capability flag (for very large read) which some
+ newer servers (e.g. Samba 3.0.26 or later) do. rsize can be
+ set from a minimum of 2048 to a maximum of 130048 (127K or
+ CIFSMaxBufSize, whichever is smaller)
+ wsize default write size (default 57344)
+ maximum wsize currently allowed by CIFS is 57344 (fourteen
+ 4096 byte pages)
rw mount the network share read-write (note that the
server may still consider the share read-only)
ro mount network share read-only
@@ -359,7 +370,7 @@ A partial list of the supported mount options follows:
Note that this does not affect the normal ACL check on the
target machine done by the server software (of the server
ACL against the user name provided at mount time).
- serverino Use servers inode numbers instead of generating automatically
+ serverino Use server's inode numbers instead of generating automatically
incrementing inode numbers on the client. Although this will
make it easier to spot hardlinked files (as they will have
the same inode numbers) and inode numbers may be persistent,
@@ -367,12 +378,11 @@ A partial list of the supported mount options follows:
are unique if multiple server side mounts are exported under a
single share (since inode numbers on the servers might not
be unique if multiple filesystems are mounted under the same
- shared higher level directory). Note that this requires that
- the server support the CIFS Unix Extensions as other servers
- do not return a unique IndexNumber on SMB FindFirst (most
- servers return zero as the IndexNumber). Parameter has no
- effect to Windows servers and others which do not support the
- CIFS Unix Extensions.
+ shared higher level directory). Note that some older
+ (e.g. pre-Windows 2000) do not support returning UniqueIDs
+ or the CIFS Unix Extensions equivalent and for those
+ this mount option will have no effect. Exporting cifs mounts
+ under nfsd requires this mount option on the cifs mount.
noserverino Client generates inode numbers (rather than using the actual one
from the server) by default.
setuids If the CIFS Unix extensions are negotiated with the server
@@ -434,6 +444,13 @@ A partial list of the supported mount options follows:
noposixpaths If CIFS Unix extensions are supported, do not request
posix path name support (this may cause servers to
reject creatingfile with certain reserved characters).
+ nounix Disable the CIFS Unix Extensions for this mount (tree
+ connection). This is rarely needed, but it may be useful
+ in order to turn off multiple settings all at once (ie
+ posix acls, posix locks, posix paths, symlink support
+ and retrieving uids/gids/mode from the server) or to
+ work around a bug in server which implement the Unix
+ Extensions.
nobrl Do not send byte range lock requests to the server.
This is necessary for certain applications that break
with cifs style mandatory byte range locks (and most
@@ -441,6 +458,12 @@ A partial list of the supported mount options follows:
byte range locks).
remount remount the share (often used to change from ro to rw mounts
or vice versa)
+ servern Specify the server 's netbios name (RFC1001 name) to use
+ when attempting to setup a session to the server. This is
+ This is needed for mounting to some older servers (such
+ as OS/2 or Windows 98 and Windows ME) since they do not
+ support a default server name. A server name can be up
+ to 15 characters long and is usually uppercased.
sfu When the CIFS Unix Extensions are not negotiated, attempt to
create device files and fifos in a format compatible with
Services for Unix (SFU). In addition retrieve bits 10-12
@@ -582,10 +605,10 @@ the start of smb requests and responses can be enabled via:
echo 1 > /proc/fs/cifs/traceSMB
-Two other experimental features are under development and to test
-require enabling CONFIG_CIFS_EXPERIMENTAL
+Two other experimental features are under development. To test these
+requires enabling CONFIG_CIFS_EXPERIMENTAL
- More efficient write operations
+ ipv6 enablement
DNOTIFY fcntl: needed for support of directory change
notification and perhaps later for file leases)
diff --git a/fs/cifs/TODO b/fs/cifs/TODO
index 78b620e..29d4b27 100644
--- a/fs/cifs/TODO
+++ b/fs/cifs/TODO
@@ -18,9 +18,9 @@ better)
d) Kerberos/SPNEGO session setup support - (started)
-e) More testing of NTLMv2 authentication (mostly implemented - double check
-that NTLMv2 signing works, also need to cleanup now unneeded SessSetup code in
-fs/cifs/connect.c)
+e) Cleanup now unneeded SessSetup code in
+fs/cifs/connect.c and add back in NTLMSSP code if any servers
+need it
f) MD5-HMAC signing SMB PDUs when SPNEGO style SessionSetup
used (Kerberos or NTLMSSP). Signing alreadyimplemented for NTLM
@@ -82,8 +82,7 @@ u) DOS attrs - returned as pseudo-xattr in Samba format (check VFAT and NTFS for
v) mount check for unmatched uids
-w) Add mount option for Linux extension disable per mount, and partial
-disable per mount (uid off, symlink/fifo/mknod on but what about posix acls?)
+w) Add support for new vfs entry points for setlease and fallocate
x) Fix Samba 3 server to handle Linux kernel aio so dbench with lots of
processes can proceed better in parallel (on the server)
@@ -106,6 +105,12 @@ but recognizes them
succeed but still return access denied (appears to be Windows
server not cifs client problem) and has not been reproduced recently.
NTFS partitions do not have this problem.
+4) Unix/POSIX capabilities are reset after reconnection, and affect
+a few fields in the tree connection but we do do not know which
+superblocks to apply these changes to. We should probably walk
+the list of superblocks to set these. Also need to check the
+flags on the second mount to the same share, and see if we
+can do the same trick that NFS does to remount duplicate shares.
Misc testing to do
==================
diff --git a/fs/cifs/asn1.c b/fs/cifs/asn1.c
index 2e75883..f50a88d 100644
--- a/fs/cifs/asn1.c
+++ b/fs/cifs/asn1.c
@@ -1,7 +1,7 @@
-/*
+/*
* The ASB.1/BER parsing code is derived from ip_nat_snmp_basic.c which was in
* turn derived from the gxsnmp package by Gregory McLean & Jochen Friedrich
- *
+ *
* Copyright (c) 2000 RP Internet (www.rpi.net.au).
*
* This program is free software; you can redistribute it and/or modify
@@ -80,7 +80,7 @@
static unsigned long SPNEGO_OID[7] = { 1, 3, 6, 1, 5, 5, 2 };
static unsigned long NTLMSSP_OID[10] = { 1, 3, 6, 1, 4, 1, 311, 2, 2, 10 };
-/*
+/*
* ASN.1 context.
*/
struct asn1_ctx {
@@ -190,7 +190,7 @@ asn1_header_decode(struct asn1_ctx *ctx,
unsigned char **eoc,
unsigned int *cls, unsigned int *con, unsigned int *tag)
{
- unsigned int def = 0;
+ unsigned int def = 0;
unsigned int len = 0;
if (!asn1_id_decode(ctx, cls, con, tag))
@@ -331,7 +331,7 @@ static unsigned char asn1_ulong_decode(struct asn1_ctx *ctx,
*integer |= ch;
}
return 1;
-}
+}
static unsigned char
asn1_octets_decode(struct asn1_ctx *ctx,
@@ -376,7 +376,7 @@ asn1_subid_decode(struct asn1_ctx *ctx, unsigned long *subid)
return 1;
}
-static int
+static int
asn1_oid_decode(struct asn1_ctx *ctx,
unsigned char *eoc, unsigned long **oid, unsigned int *len)
{
@@ -459,7 +459,7 @@ decode_negTokenInit(unsigned char *security_blob, int length,
unsigned int cls, con, tag, oidlen, rc;
int use_ntlmssp = FALSE;
- *secType = NTLM; /* BB eventually make Kerberos or NLTMSSP the default */
+ *secType = NTLM; /* BB eventually make Kerberos or NLTMSSP the default*/
/* cifs_dump_mem(" Received SecBlob ", security_blob, length); */
@@ -498,7 +498,8 @@ decode_negTokenInit(unsigned char *security_blob, int length,
return 0;
} else if ((cls != ASN1_CTX) || (con != ASN1_CON)
|| (tag != ASN1_EOC)) {
- cFYI(1,("cls = %d con = %d tag = %d end = %p (%d) exit 0",
+ cFYI(1,
+ ("cls = %d con = %d tag = %d end = %p (%d) exit 0",
cls, con, tag, end, *end));
return 0;
}
@@ -508,7 +509,8 @@ decode_negTokenInit(unsigned char *security_blob, int length,
return 0;
} else if ((cls != ASN1_UNI) || (con != ASN1_CON)
|| (tag != ASN1_SEQ)) {
- cFYI(1,("cls = %d con = %d tag = %d end = %p (%d) exit 1",
+ cFYI(1,
+ ("cls = %d con = %d tag = %d end = %p (%d) exit 1",
cls, con, tag, end, *end));
return 0;
}
@@ -540,32 +542,34 @@ decode_negTokenInit(unsigned char *security_blob, int length,
rc = asn1_header_decode(&ctx, &end, &cls, &con, &tag);
if (!rc) {
cFYI(1,
- ("Error 1 decoding negTokenInit header exit 2"));
+ ("Error decoding negTokenInit hdr exit2"));
return 0;
}
if ((tag == ASN1_OJI) && (con == ASN1_PRI)) {
rc = asn1_oid_decode(&ctx, end, &oid, &oidlen);
- if(rc) {
+ if (rc) {
cFYI(1,
- ("OID len = %d oid = 0x%lx 0x%lx 0x%lx 0x%lx",
- oidlen, *oid, *(oid + 1), *(oid + 2),
- *(oid + 3)));
- rc = compare_oid(oid, oidlen, NTLMSSP_OID,
- NTLMSSP_OID_LEN);
+ ("OID len = %d oid = 0x%lx 0x%lx "
+ "0x%lx 0x%lx",
+ oidlen, *oid, *(oid + 1),
+ *(oid + 2), *(oid + 3)));
+ rc = compare_oid(oid, oidlen,
+ NTLMSSP_OID, NTLMSSP_OID_LEN);
kfree(oid);
if (rc)
use_ntlmssp = TRUE;
}
} else {
- cFYI(1,("This should be an oid what is going on? "));
+ cFYI(1, ("Should be an oid what is going on?"));
}
}
if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
cFYI(1,
- ("Error decoding last part of negTokenInit exit 3"));
+ ("Error decoding last part negTokenInit exit3"));
return 0;
- } else if ((cls != ASN1_CTX) || (con != ASN1_CON)) { /* tag = 3 indicating mechListMIC */
+ } else if ((cls != ASN1_CTX) || (con != ASN1_CON)) {
+ /* tag = 3 indicating mechListMIC */
cFYI(1,
("Exit 4 cls = %d con = %d tag = %d end = %p (%d)",
cls, con, tag, end, *end));
@@ -573,7 +577,7 @@ decode_negTokenInit(unsigned char *security_blob, int length,
}
if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
cFYI(1,
- ("Error decoding last part of negTokenInit exit 5"));
+ ("Error decoding last part negTokenInit exit5"));
return 0;
} else if ((cls != ASN1_UNI) || (con != ASN1_CON)
|| (tag != ASN1_SEQ)) {
@@ -584,7 +588,7 @@ decode_negTokenInit(unsigned char *security_blob, int length,
if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
cFYI(1,
- ("Error decoding last part of negTokenInit exit 7"));
+ ("Error decoding last part negTokenInit exit 7"));
return 0;
} else if ((cls != ASN1_CTX) || (con != ASN1_CON)) {
cFYI(1,
@@ -594,20 +598,21 @@ decode_negTokenInit(unsigned char *security_blob, int length,
}
if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
cFYI(1,
- ("Error decoding last part of negTokenInit exit 9"));
+ ("Error decoding last part negTokenInit exit9"));
return 0;
} else if ((cls != ASN1_UNI) || (con != ASN1_PRI)
|| (tag != ASN1_GENSTR)) {
cFYI(1,
- ("Exit 10 cls = %d con = %d tag = %d end = %p (%d)",
+ ("Exit10 cls = %d con = %d tag = %d end = %p (%d)",
cls, con, tag, end, *end));
return 0;
}
- cFYI(1, ("Need to call asn1_octets_decode() function for this %s", ctx.pointer)); /* is this UTF-8 or ASCII? */
+ cFYI(1, ("Need to call asn1_octets_decode() function for %s",
+ ctx.pointer)); /* is this UTF-8 or ASCII? */
}
- /* if (use_kerberos)
- *secType = Kerberos
+ /* if (use_kerberos)
+ *secType = Kerberos
else */
if (use_ntlmssp) {
*secType = NTLMSSP;
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index 07838b2..1bf8cf5 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -58,7 +58,7 @@ cifs_dump_mem(char *label, void *data, int length)
}
#ifdef CONFIG_CIFS_DEBUG2
-void cifs_dump_detail(struct smb_hdr * smb)
+void cifs_dump_detail(struct smb_hdr *smb)
{
cERROR(1, ("Cmd: %d Err: 0x%x Flags: 0x%x Flgs2: 0x%x Mid: %d Pid: %d",
smb->Command, smb->Status.CifsError,
@@ -67,10 +67,10 @@ void cifs_dump_detail(struct smb_hdr * smb)
}
-void cifs_dump_mids(struct TCP_Server_Info * server)
+void cifs_dump_mids(struct TCP_Server_Info *server)
{
struct list_head *tmp;
- struct mid_q_entry * mid_entry;
+ struct mid_q_entry *mid_entry;
if (server == NULL)
return;
@@ -114,12 +114,12 @@ cifs_debug_data_read(char *buf, char **beginBuffer, off_t offset,
{
struct list_head *tmp;
struct list_head *tmp1;
- struct mid_q_entry * mid_entry;
+ struct mid_q_entry *mid_entry;
struct cifsSesInfo *ses;
struct cifsTconInfo *tcon;
int i;
int length = 0;
- char * original_buf = buf;
+ char *original_buf = buf;
*beginBuffer = buf + offset;
@@ -145,7 +145,6 @@ cifs_debug_data_read(char *buf, char **beginBuffer, off_t offset,
(ses->serverNOS == NULL)) {
buf += sprintf(buf, "\nentry for %s not fully "
"displayed\n\t", ses->serverName);
-
} else {
length =
sprintf(buf,
@@ -901,90 +900,14 @@ security_flags_write(struct file *file, const char __user *buffer,
}
/* flags look ok - update the global security flags for cifs module */
extended_security = flags;
+ if (extended_security & CIFSSEC_MUST_SIGN) {
+ /* requiring signing implies signing is allowed */
+ extended_security |= CIFSSEC_MAY_SIGN;
+ cFYI(1, ("packet signing now required"));
+ } else if ((extended_security & CIFSSEC_MAY_SIGN) == 0) {
+ cFYI(1, ("packet signing disabled"));
+ }
+ /* BB should we turn on MAY flags for other MUST options? */
return count;
}
-
-/* static int
-ntlmv2_enabled_read(char *page, char **start, off_t off,
- int count, int *eof, void *data)
-{
- int len;
-
- len = sprintf(page, "%d\n", ntlmv2_support);
-
- len -= off;
- *start = page + off;
-
- if (len > count)
- len = count;
- else
- *eof = 1;
-
- if (len < 0)
- len = 0;
-
- return len;
-}
-static int
-ntlmv2_enabled_write(struct file *file, const char __user *buffer,
- unsigned long count, void *data)
-{
- char c;
- int rc;
-
- rc = get_user(c, buffer);
- if (rc)
- return rc;
- if (c == '0' || c == 'n' || c == 'N')
- ntlmv2_support = 0;
- else if (c == '1' || c == 'y' || c == 'Y')
- ntlmv2_support = 1;
- else if (c == '2')
- ntlmv2_support = 2;
-
- return count;
-}
-
-static int
-packet_signing_enabled_read(char *page, char **start, off_t off,
- int count, int *eof, void *data)
-{
- int len;
-
- len = sprintf(page, "%d\n", sign_CIFS_PDUs);
-
- len -= off;
- *start = page + off;
-
- if (len > count)
- len = count;
- else
- *eof = 1;
-
- if (len < 0)
- len = 0;
-
- return len;
-}
-static int
-packet_signing_enabled_write(struct file *file, const char __user *buffer,
- unsigned long count, void *data)
-{
- char c;
- int rc;
-
- rc = get_user(c, buffer);
- if (rc)
- return rc;
- if (c == '0' || c == 'n' || c == 'N')
- sign_CIFS_PDUs = 0;
- else if (c == '1' || c == 'y' || c == 'Y')
- sign_CIFS_PDUs = 1;
- else if (c == '2')
- sign_CIFS_PDUs = 2;
-
- return count;
-} */
-
-
#endif
diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h
index 4cc2012e..34af556 100644
--- a/fs/cifs/cifs_fs_sb.h
+++ b/fs/cifs/cifs_fs_sb.h
@@ -43,6 +43,6 @@ struct cifs_sb_info {
mode_t mnt_dir_mode;
int mnt_cifs_flags;
int prepathlen;
- char * prepath;
+ char *prepath;
};
#endif /* _CIFS_FS_SB_H */
diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
index 701e9a9..b5903b8 100644
--- a/fs/cifs/cifs_unicode.c
+++ b/fs/cifs/cifs_unicode.c
@@ -66,7 +66,7 @@ cifs_strtoUCS(__le16 * to, const char *from, int len,
{
int charlen;
int i;
- wchar_t * wchar_to = (wchar_t *)to; /* needed to quiet sparse */
+ wchar_t *wchar_to = (wchar_t *)to; /* needed to quiet sparse */
for (i = 0; len && *from; i++, from += charlen, len -= charlen) {
diff --git a/fs/cifs/cifs_unicode.h b/fs/cifs/cifs_unicode.h
index 39e5b97..614c11f 100644
--- a/fs/cifs/cifs_unicode.h
+++ b/fs/cifs/cifs_unicode.h
@@ -5,20 +5,20 @@
* Convert a unicode character to upper or lower case using
* compressed tables.
*
- * Copyright (c) International Business Machines Corp., 2000,2005555555555555555555555555555555555555555555555555555555
+ * Copyright (c) International Business Machines Corp., 2000,2007
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
+ * the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
- *
+ *
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
+ * along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*
@@ -70,7 +70,7 @@ int cifs_strtoUCS(__le16 *, const char *, int, const struct nls_table *);
* Address of the first string
*/
static inline wchar_t *
-UniStrcat(wchar_t * ucs1, const wchar_t * ucs2)
+UniStrcat(wchar_t *ucs1, const wchar_t *ucs2)
{
wchar_t *anchor = ucs1; /* save a pointer to start of ucs1 */
@@ -88,7 +88,7 @@ UniStrcat(wchar_t * ucs1, const wchar_t * ucs2)
* or NULL if the character is not in the string
*/
static inline wchar_t *
-UniStrchr(const wchar_t * ucs, wchar_t uc)
+UniStrchr(const wchar_t *ucs, wchar_t uc)
{
while ((*ucs != uc) && *ucs)
ucs++;
@@ -107,7 +107,7 @@ UniStrchr(const wchar_t * ucs, wchar_t uc)
* > 0: First string is greater than second
*/
static inline int
-UniStrcmp(const wchar_t * ucs1, const wchar_t * ucs2)
+UniStrcmp(const wchar_t *ucs1, const wchar_t *ucs2)
{
while ((*ucs1 == *ucs2) && *ucs1) {
ucs1++;
@@ -120,7 +120,7 @@ UniStrcmp(const wchar_t * ucs1, const wchar_t * ucs2)
* UniStrcpy: Copy a string
*/
static inline wchar_t *
-UniStrcpy(wchar_t * ucs1, const wchar_t * ucs2)
+UniStrcpy(wchar_t *ucs1, const wchar_t *ucs2)
{
wchar_t *anchor = ucs1; /* save the start of result string */
@@ -132,7 +132,7 @@ UniStrcpy(wchar_t * ucs1, const wchar_t * ucs2)
* UniStrlen: Return the length of a string (in 16 bit Unicode chars not bytes)
*/
static inline size_t
-UniStrlen(const wchar_t * ucs1)
+UniStrlen(const wchar_t *ucs1)
{
int i = 0;
@@ -142,10 +142,11 @@ UniStrlen(const wchar_t * ucs1)
}
/*
- * UniStrnlen: Return the length (in 16 bit Unicode chars not bytes) of a string (length limited)
+ * UniStrnlen: Return the length (in 16 bit Unicode chars not bytes) of a
+ * string (length limited)
*/
static inline size_t
-UniStrnlen(const wchar_t * ucs1, int maxlen)
+UniStrnlen(const wchar_t *ucs1, int maxlen)
{
int i = 0;
@@ -161,7 +162,7 @@ UniStrnlen(const wchar_t * ucs1, int maxlen)
* UniStrncat: Concatenate length limited string
*/
static inline wchar_t *
-UniStrncat(wchar_t * ucs1, const wchar_t * ucs2, size_t n)
+UniStrncat(wchar_t *ucs1, const wchar_t *ucs2, size_t n)
{
wchar_t *anchor = ucs1; /* save pointer to string 1 */
@@ -179,7 +180,7 @@ UniStrncat(wchar_t * ucs1, const wchar_t * ucs2, size_t n)
* UniStrncmp: Compare length limited string
*/
static inline int
-UniStrncmp(const wchar_t * ucs1, const wchar_t * ucs2, size_t n)
+UniStrncmp(const wchar_t *ucs1, const wchar_t *ucs2, size_t n)
{
if (!n)
return 0; /* Null strings are equal */
@@ -194,7 +195,7 @@ UniStrncmp(const wchar_t * ucs1, const wchar_t * ucs2, size_t n)
* UniStrncmp_le: Compare length limited string - native to little-endian
*/
static inline int
-UniStrncmp_le(const wchar_t * ucs1, const wchar_t * ucs2, size_t n)
+UniStrncmp_le(const wchar_t *ucs1, const wchar_t *ucs2, size_t n)
{
if (!n)
return 0; /* Null strings are equal */
@@ -209,7 +210,7 @@ UniStrncmp_le(const wchar_t * ucs1, const wchar_t * ucs2, size_t n)
* UniStrncpy: Copy length limited string with pad
*/
static inline wchar_t *
-UniStrncpy(wchar_t * ucs1, const wchar_t * ucs2, size_t n)
+UniStrncpy(wchar_t *ucs1, const wchar_t *ucs2, size_t n)
{
wchar_t *anchor = ucs1;
@@ -226,7 +227,7 @@ UniStrncpy(wchar_t * ucs1, const wchar_t * ucs2, size_t n)
* UniStrncpy_le: Copy length limited string with pad to little-endian
*/
static inline wchar_t *
-UniStrncpy_le(wchar_t * ucs1, const wchar_t * ucs2, size_t n)
+UniStrncpy_le(wchar_t *ucs1, const wchar_t *ucs2, size_t n)
{
wchar_t *anchor = ucs1;
@@ -247,7 +248,7 @@ UniStrncpy_le(wchar_t * ucs1, const wchar_t * ucs2, size_t n)
* NULL if no matching string is found
*/
static inline wchar_t *
-UniStrstr(const wchar_t * ucs1, const wchar_t * ucs2)
+UniStrstr(const wchar_t *ucs1, const wchar_t *ucs2)
{
const wchar_t *anchor1 = ucs1;
const wchar_t *anchor2 = ucs2;
@@ -297,7 +298,7 @@ UniToupper(register wchar_t uc)
* UniStrupr: Upper case a unicode string
*/
static inline wchar_t *
-UniStrupr(register wchar_t * upin)
+UniStrupr(register wchar_t *upin)
{
register wchar_t *up;
@@ -338,7 +339,7 @@ UniTolower(wchar_t uc)
* UniStrlwr: Lower case a unicode string
*/
static inline wchar_t *
-UniStrlwr(register wchar_t * upin)
+UniStrlwr(register wchar_t *upin)
{
register wchar_t *up;
diff --git a/fs/cifs/cifs_uniupr.h b/fs/cifs/cifs_uniupr.h
index da2ad5b..18a9d97 100644
--- a/fs/cifs/cifs_uniupr.h
+++ b/fs/cifs/cifs_uniupr.h
@@ -3,16 +3,16 @@
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
+ * the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
- *
+ *
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
+ * along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* uniupr.h - Unicode compressed case ranges
@@ -53,7 +53,7 @@ signed char CifsUniUpperTable[512] = {
0, -1, 0, -1, 0, -1, 0, 0, -1, 0, 0, 0, 0, -1, 0, 0, /* 1a0-1af */
-1, 0, 0, 0, -1, 0, -1, 0, 0, -1, 0, 0, 0, -1, 0, 0, /* 1b0-1bf */
0, 0, 0, 0, 0, -1, -2, 0, -1, -2, 0, -1, -2, 0, -1, 0, /* 1c0-1cf */
- -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, -79, 0, -1, /* 1d0-1df */
+ -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, -79, 0, -1, /* 1d0-1df */
0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 1e0-1ef */
0, 0, -1, -2, 0, -1, 0, 0, 0, -1, 0, -1, 0, -1, 0, -1, /* 1f0-1ff */
};
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index fdeda51..3627229 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -21,7 +21,7 @@
#include <linux/fs.h>
#include "cifspdu.h"
-#include "cifsglob.h"
+#include "cifsglob.h"
#include "cifs_debug.h"
#include "md5.h"
#include "cifs_unicode.h"
@@ -29,54 +29,57 @@
#include <linux/ctype.h>
#include <linux/random.h>
-/* Calculate and return the CIFS signature based on the mac key and the smb pdu */
+/* Calculate and return the CIFS signature based on the mac key and SMB PDU */
/* the 16 byte signature must be allocated by the caller */
/* Note we only use the 1st eight bytes */
-/* Note that the smb header signature field on input contains the
+/* Note that the smb header signature field on input contains the
sequence number before this function is called */
extern void mdfour(unsigned char *out, unsigned char *in, int n);
extern void E_md4hash(const unsigned char *passwd, unsigned char *p16);
extern void SMBencrypt(unsigned char *passwd, unsigned char *c8,
- unsigned char *p24);
-
-static int cifs_calculate_signature(const struct smb_hdr * cifs_pdu,
- const char * key, char * signature)
+ unsigned char *p24);
+
+static int cifs_calculate_signature(const struct smb_hdr *cifs_pdu,
+ const struct mac_key *key, char *signature)
{
struct MD5Context context;
- if((cifs_pdu == NULL) || (signature == NULL))
+ if ((cifs_pdu == NULL) || (signature == NULL) || (key == NULL))
return -EINVAL;
MD5Init(&context);
- MD5Update(&context,key,CIFS_SESS_KEY_SIZE+16);
- MD5Update(&context,cifs_pdu->Protocol,cifs_pdu->smb_buf_length);
- MD5Final(signature,&context);
+ MD5Update(&context, (char *)&key->data, key->len);
+ MD5Update(&context, cifs_pdu->Protocol, cifs_pdu->smb_buf_length);
+
+ MD5Final(signature, &context);
return 0;
}
-int cifs_sign_smb(struct smb_hdr * cifs_pdu, struct TCP_Server_Info * server,
- __u32 * pexpected_response_sequence_number)
+int cifs_sign_smb(struct smb_hdr *cifs_pdu, struct TCP_Server_Info *server,
+ __u32 *pexpected_response_sequence_number)
{
int rc = 0;
char smb_signature[20];
- if((cifs_pdu == NULL) || (server == NULL))
+ if ((cifs_pdu == NULL) || (server == NULL))
return -EINVAL;
- if((cifs_pdu->Flags2 & SMBFLG2_SECURITY_SIGNATURE) == 0)
+ if ((cifs_pdu->Flags2 & SMBFLG2_SECURITY_SIGNATURE) == 0)
return rc;
spin_lock(&GlobalMid_Lock);
- cifs_pdu->Signature.Sequence.SequenceNumber = cpu_to_le32(server->sequence_number);
+ cifs_pdu->Signature.Sequence.SequenceNumber =
+ cpu_to_le32(server->sequence_number);
cifs_pdu->Signature.Sequence.Reserved = 0;
-
+
*pexpected_response_sequence_number = server->sequence_number++;
server->sequence_number++;
spin_unlock(&GlobalMid_Lock);
- rc = cifs_calculate_signature(cifs_pdu, server->mac_signing_key,smb_signature);
- if(rc)
+ rc = cifs_calculate_signature(cifs_pdu, &server->mac_signing_key,
+ smb_signature);
+ if (rc)
memset(cifs_pdu->Signature.SecuritySignature, 0, 8);
else
memcpy(cifs_pdu->Signature.SecuritySignature, smb_signature, 8);
@@ -84,115 +87,119 @@ int cifs_sign_smb(struct smb_hdr * cifs_pdu, struct TCP_Server_Info * server,
return rc;
}
-static int cifs_calc_signature2(const struct kvec * iov, int n_vec,
- const char * key, char * signature)
+static int cifs_calc_signature2(const struct kvec *iov, int n_vec,
+ const struct mac_key *key, char *signature)
{
struct MD5Context context;
int i;
- if((iov == NULL) || (signature == NULL))
+ if ((iov == NULL) || (signature == NULL) || (key == NULL))
return -EINVAL;
MD5Init(&context);
- MD5Update(&context,key,CIFS_SESS_KEY_SIZE+16);
- for(i=0;i<n_vec;i++) {
- if(iov[i].iov_base == NULL) {
- cERROR(1,("null iovec entry"));
+ MD5Update(&context, (char *)&key->data, key->len);
+ for (i = 0; i < n_vec; i++) {
+ if (iov[i].iov_base == NULL) {
+ cERROR(1, ("null iovec entry"));
return -EIO;
- } else if(iov[i].iov_len == 0)
+ } else if (iov[i].iov_len == 0)
break; /* bail out if we are sent nothing to sign */
- /* The first entry includes a length field (which does not get
+ /* The first entry includes a length field (which does not get
signed that occupies the first 4 bytes before the header */
- if(i==0) {
+ if (i == 0) {
if (iov[0].iov_len <= 8 ) /* cmd field at offset 9 */
break; /* nothing to sign or corrupt header */
- MD5Update(&context,iov[0].iov_base+4, iov[0].iov_len-4);
+ MD5Update(&context, iov[0].iov_base+4,
+ iov[0].iov_len-4);
} else
- MD5Update(&context,iov[i].iov_base, iov[i].iov_len);
+ MD5Update(&context, iov[i].iov_base, iov[i].iov_len);
}
- MD5Final(signature,&context);
+ MD5Final(signature, &context);
return 0;
}
-int cifs_sign_smb2(struct kvec * iov, int n_vec, struct TCP_Server_Info *server,
+int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *server,
__u32 * pexpected_response_sequence_number)
{
int rc = 0;
char smb_signature[20];
- struct smb_hdr * cifs_pdu = iov[0].iov_base;
+ struct smb_hdr *cifs_pdu = iov[0].iov_base;
- if((cifs_pdu == NULL) || (server == NULL))
+ if ((cifs_pdu == NULL) || (server == NULL))
return -EINVAL;
- if((cifs_pdu->Flags2 & SMBFLG2_SECURITY_SIGNATURE) == 0)
+ if ((cifs_pdu->Flags2 & SMBFLG2_SECURITY_SIGNATURE) == 0)
return rc;
- spin_lock(&GlobalMid_Lock);
- cifs_pdu->Signature.Sequence.SequenceNumber =
+ spin_lock(&GlobalMid_Lock);
+ cifs_pdu->Signature.Sequence.SequenceNumber =
cpu_to_le32(server->sequence_number);
- cifs_pdu->Signature.Sequence.Reserved = 0;
+ cifs_pdu->Signature.Sequence.Reserved = 0;
- *pexpected_response_sequence_number = server->sequence_number++;
- server->sequence_number++;
- spin_unlock(&GlobalMid_Lock);
+ *pexpected_response_sequence_number = server->sequence_number++;
+ server->sequence_number++;
+ spin_unlock(&GlobalMid_Lock);
- rc = cifs_calc_signature2(iov, n_vec, server->mac_signing_key,
+ rc = cifs_calc_signature2(iov, n_vec, &server->mac_signing_key,
smb_signature);
- if(rc)
- memset(cifs_pdu->Signature.SecuritySignature, 0, 8);
- else
- memcpy(cifs_pdu->Signature.SecuritySignature, smb_signature, 8);
-
- return rc;
+ if (rc)
+ memset(cifs_pdu->Signature.SecuritySignature, 0, 8);
+ else
+ memcpy(cifs_pdu->Signature.SecuritySignature, smb_signature, 8);
+ return rc;
}
-int cifs_verify_signature(struct smb_hdr * cifs_pdu, const char * mac_key,
- __u32 expected_sequence_number)
+int cifs_verify_signature(struct smb_hdr *cifs_pdu,
+ const struct mac_key *mac_key,
+ __u32 expected_sequence_number)
{
unsigned int rc;
char server_response_sig[8];
char what_we_think_sig_should_be[20];
- if((cifs_pdu == NULL) || (mac_key == NULL))
+ if ((cifs_pdu == NULL) || (mac_key == NULL))
return -EINVAL;
if (cifs_pdu->Command == SMB_COM_NEGOTIATE)
return 0;
if (cifs_pdu->Command == SMB_COM_LOCKING_ANDX) {
- struct smb_com_lock_req * pSMB = (struct smb_com_lock_req *)cifs_pdu;
- if(pSMB->LockType & LOCKING_ANDX_OPLOCK_RELEASE)
+ struct smb_com_lock_req *pSMB =
+ (struct smb_com_lock_req *)cifs_pdu;
+ if (pSMB->LockType & LOCKING_ANDX_OPLOCK_RELEASE)
return 0;
}
- /* BB what if signatures are supposed to be on for session but server does not
- send one? BB */
-
+ /* BB what if signatures are supposed to be on for session but
+ server does not send one? BB */
+
/* Do not need to verify session setups with signature "BSRSPYL " */
- if(memcmp(cifs_pdu->Signature.SecuritySignature,"BSRSPYL ",8)==0)
- cFYI(1,("dummy signature received for smb command 0x%x",cifs_pdu->Command));
+ if (memcmp(cifs_pdu->Signature.SecuritySignature, "BSRSPYL ", 8) == 0)
+ cFYI(1, ("dummy signature received for smb command 0x%x",
+ cifs_pdu->Command));
/* save off the origiginal signature so we can modify the smb and check
its signature against what the server sent */
- memcpy(server_response_sig,cifs_pdu->Signature.SecuritySignature,8);
+ memcpy(server_response_sig, cifs_pdu->Signature.SecuritySignature, 8);
- cifs_pdu->Signature.Sequence.SequenceNumber = cpu_to_le32(expected_sequence_number);
+ cifs_pdu->Signature.Sequence.SequenceNumber =
+ cpu_to_le32(expected_sequence_number);
cifs_pdu->Signature.Sequence.Reserved = 0;
rc = cifs_calculate_signature(cifs_pdu, mac_key,
what_we_think_sig_should_be);
- if(rc)
+ if (rc)
return rc;
-
-/* cifs_dump_mem("what we think it should be: ",what_we_think_sig_should_be,16); */
+/* cifs_dump_mem("what we think it should be: ",
+ what_we_think_sig_should_be, 16); */
- if(memcmp(server_response_sig, what_we_think_sig_should_be, 8))
+ if (memcmp(server_response_sig, what_we_think_sig_should_be, 8))
return -EACCES;
else
return 0;
@@ -200,89 +207,94 @@ int cifs_verify_signature(struct smb_hdr * cifs_pdu, const char * mac_key,
}
/* We fill in key by putting in 40 byte array which was allocated by caller */
-int cifs_calculate_mac_key(char * key, const char * rn, const char * password)
+int cifs_calculate_mac_key(struct mac_key *key, const char *rn,
+ const char *password)
{
char temp_key[16];
if ((key == NULL) || (rn == NULL))
return -EINVAL;
E_md4hash(password, temp_key);
- mdfour(key,temp_key,16);
- memcpy(key+16,rn, CIFS_SESS_KEY_SIZE);
+ mdfour(key->data.ntlm, temp_key, 16);
+ memcpy(key->data.ntlm+16, rn, CIFS_SESS_KEY_SIZE);
+ key->len = 40;
return 0;
}
-int CalcNTLMv2_partial_mac_key(struct cifsSesInfo * ses,
- const struct nls_table * nls_info)
+int CalcNTLMv2_partial_mac_key(struct cifsSesInfo *ses,
+ const struct nls_table *nls_info)
{
char temp_hash[16];
struct HMACMD5Context ctx;
- char * ucase_buf;
- __le16 * unicode_buf;
- unsigned int i,user_name_len,dom_name_len;
+ char *ucase_buf;
+ __le16 *unicode_buf;
+ unsigned int i, user_name_len, dom_name_len;
- if(ses == NULL)
+ if (ses == NULL)
return -EINVAL;
E_md4hash(ses->password, temp_hash);
hmac_md5_init_limK_to_64(temp_hash, 16, &ctx);
user_name_len = strlen(ses->userName);
- if(user_name_len > MAX_USERNAME_SIZE)
+ if (user_name_len > MAX_USERNAME_SIZE)
return -EINVAL;
- if(ses->domainName == NULL)
+ if (ses->domainName == NULL)
return -EINVAL; /* BB should we use CIFS_LINUX_DOM */
dom_name_len = strlen(ses->domainName);
- if(dom_name_len > MAX_USERNAME_SIZE)
+ if (dom_name_len > MAX_USERNAME_SIZE)
return -EINVAL;
-
+
ucase_buf = kmalloc((MAX_USERNAME_SIZE+1), GFP_KERNEL);
- if(ucase_buf == NULL)
+ if (ucase_buf == NULL)
return -ENOMEM;
unicode_buf = kmalloc((MAX_USERNAME_SIZE+1)*4, GFP_KERNEL);
- if(unicode_buf == NULL) {
+ if (unicode_buf == NULL) {
kfree(ucase_buf);
return -ENOMEM;
}
-
- for(i=0;i<user_name_len;i++)
+
+ for (i = 0; i < user_name_len; i++)
ucase_buf[i] = nls_info->charset2upper[(int)ses->userName[i]];
ucase_buf[i] = 0;
- user_name_len = cifs_strtoUCS(unicode_buf, ucase_buf, MAX_USERNAME_SIZE*2, nls_info);
+ user_name_len = cifs_strtoUCS(unicode_buf, ucase_buf,
+ MAX_USERNAME_SIZE*2, nls_info);
unicode_buf[user_name_len] = 0;
user_name_len++;
- for(i=0;i<dom_name_len;i++)
+ for (i = 0; i < dom_name_len; i++)
ucase_buf[i] = nls_info->charset2upper[(int)ses->domainName[i]];
ucase_buf[i] = 0;
- dom_name_len = cifs_strtoUCS(unicode_buf+user_name_len, ucase_buf, MAX_USERNAME_SIZE*2, nls_info);
+ dom_name_len = cifs_strtoUCS(unicode_buf+user_name_len, ucase_buf,
+ MAX_USERNAME_SIZE*2, nls_info);
unicode_buf[user_name_len + dom_name_len] = 0;
hmac_md5_update((const unsigned char *) unicode_buf,
- (user_name_len+dom_name_len)*2,&ctx);
+ (user_name_len+dom_name_len)*2, &ctx);
- hmac_md5_final(ses->server->mac_signing_key,&ctx);
+ hmac_md5_final(ses->server->ntlmv2_hash, &ctx);
kfree(ucase_buf);
kfree(unicode_buf);
return 0;
}
#ifdef CONFIG_CIFS_WEAK_PW_HASH
-void calc_lanman_hash(struct cifsSesInfo * ses, char * lnm_session_key)
+void calc_lanman_hash(struct cifsSesInfo *ses, char *lnm_session_key)
{
int i;
char password_with_pad[CIFS_ENCPWD_SIZE];
- if(ses->server == NULL)
+ if (ses->server == NULL)
return;
memset(password_with_pad, 0, CIFS_ENCPWD_SIZE);
- if(ses->password)
+ if (ses->password)
strncpy(password_with_pad, ses->password, CIFS_ENCPWD_SIZE);
- if((ses->server->secMode & SECMODE_PW_ENCRYPT) == 0)
- if(extended_security & CIFSSEC_MAY_PLNTXT) {
- memcpy(lnm_session_key, password_with_pad, CIFS_ENCPWD_SIZE);
+ if ((ses->server->secMode & SECMODE_PW_ENCRYPT) == 0)
+ if (extended_security & CIFSSEC_MAY_PLNTXT) {
+ memcpy(lnm_session_key, password_with_pad,
+ CIFS_ENCPWD_SIZE);
return;
}
@@ -297,7 +309,7 @@ void calc_lanman_hash(struct cifsSesInfo * ses, char * lnm_session_key)
utf8 and other multibyte codepages each need their own strupper
function since a byte at a time will ont work. */
- for(i = 0; i < CIFS_ENCPWD_SIZE; i++) {
+ for (i = 0; i < CIFS_ENCPWD_SIZE; i++) {
password_with_pad[i] = toupper(password_with_pad[i]);
}
@@ -307,19 +319,19 @@ void calc_lanman_hash(struct cifsSesInfo * ses, char * lnm_session_key)
}
#endif /* CIFS_WEAK_PW_HASH */
-static int calc_ntlmv2_hash(struct cifsSesInfo *ses,
- const struct nls_table * nls_cp)
+static int calc_ntlmv2_hash(struct cifsSesInfo *ses,
+ const struct nls_table *nls_cp)
{
int rc = 0;
int len;
char nt_hash[16];
- struct HMACMD5Context * pctxt;
- wchar_t * user;
- wchar_t * domain;
+ struct HMACMD5Context *pctxt;
+ wchar_t *user;
+ wchar_t *domain;
pctxt = kmalloc(sizeof(struct HMACMD5Context), GFP_KERNEL);
- if(pctxt == NULL)
+ if (pctxt == NULL)
return -ENOMEM;
/* calculate md4 hash of password */
@@ -331,41 +343,45 @@ static int calc_ntlmv2_hash(struct cifsSesInfo *ses,
/* convert ses->userName to unicode and uppercase */
len = strlen(ses->userName);
user = kmalloc(2 + (len * 2), GFP_KERNEL);
- if(user == NULL)
+ if (user == NULL)
goto calc_exit_2;
len = cifs_strtoUCS(user, ses->userName, len, nls_cp);
UniStrupr(user);
hmac_md5_update((char *)user, 2*len, pctxt);
/* convert ses->domainName to unicode and uppercase */
- if(ses->domainName) {
+ if (ses->domainName) {
len = strlen(ses->domainName);
- domain = kmalloc(2 + (len * 2), GFP_KERNEL);
- if(domain == NULL)
+ domain = kmalloc(2 + (len * 2), GFP_KERNEL);
+ if (domain == NULL)
goto calc_exit_1;
len = cifs_strtoUCS(domain, ses->domainName, len, nls_cp);
- UniStrupr(domain);
+ /* the following line was removed since it didn't work well
+ with lower cased domain name that passed as an option.
+ Maybe converting the domain name earlier makes sense */
+ /* UniStrupr(domain); */
hmac_md5_update((char *)domain, 2*len, pctxt);
-
+
kfree(domain);
}
calc_exit_1:
kfree(user);
calc_exit_2:
- /* BB FIXME what about bytes 24 through 40 of the signing key?
+ /* BB FIXME what about bytes 24 through 40 of the signing key?
compare with the NTLM example */
- hmac_md5_final(ses->server->mac_signing_key, pctxt);
+ hmac_md5_final(ses->server->ntlmv2_hash, pctxt);
return rc;
}
-void setup_ntlmv2_rsp(struct cifsSesInfo * ses, char * resp_buf,
- const struct nls_table * nls_cp)
+void setup_ntlmv2_rsp(struct cifsSesInfo *ses, char *resp_buf,
+ const struct nls_table *nls_cp)
{
int rc;
- struct ntlmv2_resp * buf = (struct ntlmv2_resp *)resp_buf;
+ struct ntlmv2_resp *buf = (struct ntlmv2_resp *)resp_buf;
+ struct HMACMD5Context context;
buf->blob_signature = cpu_to_le32(0x00000101);
buf->reserved = 0;
@@ -379,21 +395,31 @@ void setup_ntlmv2_rsp(struct cifsSesInfo * ses, char * resp_buf,
/* calculate buf->ntlmv2_hash */
rc = calc_ntlmv2_hash(ses, nls_cp);
- if(rc)
- cERROR(1,("could not get v2 hash rc %d",rc));
+ if (rc)
+ cERROR(1, ("could not get v2 hash rc %d", rc));
CalcNTLMv2_response(ses, resp_buf);
+
+ /* now calculate the MAC key for NTLMv2 */
+ hmac_md5_init_limK_to_64(ses->server->ntlmv2_hash, 16, &context);
+ hmac_md5_update(resp_buf, 16, &context);
+ hmac_md5_final(ses->server->mac_signing_key.data.ntlmv2.key, &context);
+
+ memcpy(&ses->server->mac_signing_key.data.ntlmv2.resp, resp_buf,
+ sizeof(struct ntlmv2_resp));
+ ses->server->mac_signing_key.len = 16 + sizeof(struct ntlmv2_resp);
}
-void CalcNTLMv2_response(const struct cifsSesInfo * ses, char * v2_session_response)
+void CalcNTLMv2_response(const struct cifsSesInfo *ses,
+ char *v2_session_response)
{
struct HMACMD5Context context;
/* rest of v2 struct already generated */
- memcpy(v2_session_response + 8, ses->server->cryptKey,8);
- hmac_md5_init_limK_to_64(ses->server->mac_signing_key, 16, &context);
+ memcpy(v2_session_response + 8, ses->server->cryptKey, 8);
+ hmac_md5_init_limK_to_64(ses->server->ntlmv2_hash, 16, &context);
- hmac_md5_update(v2_session_response+8,
+ hmac_md5_update(v2_session_response+8,
sizeof(struct ntlmv2_resp) - 8, &context);
- hmac_md5_final(v2_session_response,&context);
+ hmac_md5_final(v2_session_response, &context);
/* cifs_dump_mem("v2_sess_rsp: ", v2_session_response, 32); */
}
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index bd0f2f2..cabb6a5 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -64,23 +64,27 @@ unsigned int multiuser_mount = 0;
unsigned int extended_security = CIFSSEC_DEF;
/* unsigned int ntlmv2_support = 0; */
unsigned int sign_CIFS_PDUs = 1;
-extern struct task_struct * oplockThread; /* remove sparse warning */
-struct task_struct * oplockThread = NULL;
+extern struct task_struct *oplockThread; /* remove sparse warning */
+struct task_struct *oplockThread = NULL;
/* extern struct task_struct * dnotifyThread; remove sparse warning */
-static struct task_struct * dnotifyThread = NULL;
+static struct task_struct *dnotifyThread = NULL;
static const struct super_operations cifs_super_ops;
unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
module_param(CIFSMaxBufSize, int, 0);
-MODULE_PARM_DESC(CIFSMaxBufSize,"Network buffer size (not including header). Default: 16384 Range: 8192 to 130048");
+MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header). "
+ "Default: 16384 Range: 8192 to 130048");
unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
module_param(cifs_min_rcv, int, 0);
-MODULE_PARM_DESC(cifs_min_rcv,"Network buffers in pool. Default: 4 Range: 1 to 64");
+MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: "
+ "1 to 64");
unsigned int cifs_min_small = 30;
module_param(cifs_min_small, int, 0);
-MODULE_PARM_DESC(cifs_min_small,"Small network buffers in pool. Default: 30 Range: 2 to 256");
+MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
+ "Range: 2 to 256");
unsigned int cifs_max_pending = CIFS_MAX_REQ;
module_param(cifs_max_pending, int, 0);
-MODULE_PARM_DESC(cifs_max_pending,"Simultaneous requests to server. Default: 50 Range: 2 to 256");
+MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server. "
+ "Default: 50 Range: 2 to 256");
extern mempool_t *cifs_sm_req_poolp;
extern mempool_t *cifs_req_poolp;
@@ -95,10 +99,10 @@ cifs_read_super(struct super_block *sb, void *data,
struct inode *inode;
struct cifs_sb_info *cifs_sb;
int rc = 0;
-
+
/* BB should we make this contingent on mount parm? */
sb->s_flags |= MS_NODIRATIME | MS_NOATIME;
- sb->s_fs_info = kzalloc(sizeof(struct cifs_sb_info),GFP_KERNEL);
+ sb->s_fs_info = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL);
cifs_sb = CIFS_SB(sb);
if (cifs_sb == NULL)
return -ENOMEM;
@@ -114,12 +118,9 @@ cifs_read_super(struct super_block *sb, void *data,
sb->s_magic = CIFS_MAGIC_NUMBER;
sb->s_op = &cifs_super_ops;
-#ifdef CONFIG_CIFS_EXPERIMENTAL
- if (experimEnabled != 0)
- sb->s_export_op = &cifs_export_ops;
-#endif /* EXPERIMENTAL */
/* if (cifs_sb->tcon->ses->server->maxBuf > MAX_CIFS_HDR_SIZE + 512)
- sb->s_blocksize = cifs_sb->tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE; */
+ sb->s_blocksize =
+ cifs_sb->tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE; */
#ifdef CONFIG_CIFS_QUOTA
sb->s_qcop = &cifs_quotactl_ops;
#endif
@@ -139,6 +140,13 @@ cifs_read_super(struct super_block *sb, void *data,
goto out_no_root;
}
+#ifdef CONFIG_CIFS_EXPERIMENTAL
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
+ cFYI(1, ("export ops supported"));
+ sb->s_export_op = &cifs_export_ops;
+ }
+#endif /* EXPERIMENTAL */
+
return 0;
out_no_root:
@@ -149,7 +157,7 @@ out_no_root:
out_mount_failed:
if (cifs_sb) {
if (cifs_sb->local_nls)
- unload_nls(cifs_sb->local_nls);
+ unload_nls(cifs_sb->local_nls);
kfree(cifs_sb);
}
return rc;
@@ -164,10 +172,10 @@ cifs_put_super(struct super_block *sb)
cFYI(1, ("In cifs_put_super"));
cifs_sb = CIFS_SB(sb);
if (cifs_sb == NULL) {
- cFYI(1,("Empty cifs superblock info passed to unmount"));
+ cFYI(1, ("Empty cifs superblock info passed to unmount"));
return;
}
- rc = cifs_umount(sb, cifs_sb);
+ rc = cifs_umount(sb, cifs_sb);
if (rc) {
cERROR(1, ("cifs_umount failed with return code %d", rc));
}
@@ -180,7 +188,7 @@ static int
cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct super_block *sb = dentry->d_sb;
- int xid;
+ int xid;
int rc = -EOPNOTSUPP;
struct cifs_sb_info *cifs_sb;
struct cifsTconInfo *pTcon;
@@ -193,7 +201,7 @@ cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
buf->f_type = CIFS_MAGIC_NUMBER;
/* instead could get the real value via SMB_QUERY_FS_ATTRIBUTE_INFO */
- buf->f_namelen = PATH_MAX; /* PATH_MAX may be too long - it would
+ buf->f_namelen = PATH_MAX; /* PATH_MAX may be too long - it would
presumably be total path, but note
that some servers (includinng Samba 3)
have a shorter maximum path */
@@ -217,8 +225,7 @@ cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
bypassed it because we detected that this was an older LANMAN sess */
if (rc)
rc = SMBOldQFSInfo(xid, pTcon, buf);
- /*
- int f_type;
+ /* int f_type;
__fsid_t f_fsid;
int f_namelen; */
/* BB get from info in tcon struct at mount time call to QFSAttrInfo */
@@ -227,7 +234,7 @@ cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
longer available? */
}
-static int cifs_permission(struct inode * inode, int mask, struct nameidata *nd)
+static int cifs_permission(struct inode *inode, int mask, struct nameidata *nd)
{
struct cifs_sb_info *cifs_sb;
@@ -235,10 +242,10 @@ static int cifs_permission(struct inode * inode, int mask, struct nameidata *nd)
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
return 0;
- } else /* file mode might have been restricted at mount time
- on the client (above and beyond ACL on servers) for
+ } else /* file mode might have been restricted at mount time
+ on the client (above and beyond ACL on servers) for
servers which do not support setting and viewing mode bits,
- so allowing client to check permissions is useful */
+ so allowing client to check permissions is useful */
return generic_permission(inode, mask, NULL);
}
@@ -267,7 +274,7 @@ cifs_alloc_inode(struct super_block *sb)
cifs_inode->clientCanCacheRead = FALSE;
cifs_inode->clientCanCacheAll = FALSE;
cifs_inode->vfs_inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */
-
+
/* Can not set i_flags here - they get immediately overwritten
to zero by the VFS */
/* cifs_inode->vfs_inode.i_flags = S_NOATIME | S_NOCMTIME;*/
@@ -309,26 +316,26 @@ cifs_show_options(struct seq_file *s, struct vfsmount *m)
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
seq_printf(s, ",posixpaths");
if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID) ||
- !(cifs_sb->tcon->ses->capabilities & CAP_UNIX))
+ !(cifs_sb->tcon->unix_ext))
seq_printf(s, ",uid=%d", cifs_sb->mnt_uid);
if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID) ||
- !(cifs_sb->tcon->ses->capabilities & CAP_UNIX))
+ !(cifs_sb->tcon->unix_ext))
seq_printf(s, ",gid=%d", cifs_sb->mnt_gid);
- seq_printf(s, ",rsize=%d",cifs_sb->rsize);
- seq_printf(s, ",wsize=%d",cifs_sb->wsize);
+ seq_printf(s, ",rsize=%d", cifs_sb->rsize);
+ seq_printf(s, ",wsize=%d", cifs_sb->wsize);
}
return 0;
}
#ifdef CONFIG_CIFS_QUOTA
-int cifs_xquota_set(struct super_block * sb, int quota_type, qid_t qid,
- struct fs_disk_quota * pdquota)
+int cifs_xquota_set(struct super_block *sb, int quota_type, qid_t qid,
+ struct fs_disk_quota *pdquota)
{
int xid;
int rc = 0;
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
struct cifsTconInfo *pTcon;
-
+
if (cifs_sb)
pTcon = cifs_sb->tcon;
else
@@ -337,7 +344,7 @@ int cifs_xquota_set(struct super_block * sb, int quota_type, qid_t qid,
xid = GetXid();
if (pTcon) {
- cFYI(1,("set type: 0x%x id: %d",quota_type,qid));
+ cFYI(1, ("set type: 0x%x id: %d", quota_type, qid));
} else {
return -EIO;
}
@@ -346,8 +353,8 @@ int cifs_xquota_set(struct super_block * sb, int quota_type, qid_t qid,
return rc;
}
-int cifs_xquota_get(struct super_block * sb, int quota_type, qid_t qid,
- struct fs_disk_quota * pdquota)
+int cifs_xquota_get(struct super_block *sb, int quota_type, qid_t qid,
+ struct fs_disk_quota *pdquota)
{
int xid;
int rc = 0;
@@ -361,7 +368,7 @@ int cifs_xquota_get(struct super_block * sb, int quota_type, qid_t qid,
xid = GetXid();
if (pTcon) {
- cFYI(1,("set type: 0x%x id: %d",quota_type,qid));
+ cFYI(1, ("set type: 0x%x id: %d", quota_type, qid));
} else {
rc = -EIO;
}
@@ -370,9 +377,9 @@ int cifs_xquota_get(struct super_block * sb, int quota_type, qid_t qid,
return rc;
}
-int cifs_xstate_set(struct super_block * sb, unsigned int flags, int operation)
+int cifs_xstate_set(struct super_block *sb, unsigned int flags, int operation)
{
- int xid;
+ int xid;
int rc = 0;
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
struct cifsTconInfo *pTcon;
@@ -384,7 +391,7 @@ int cifs_xstate_set(struct super_block * sb, unsigned int flags, int operation)
xid = GetXid();
if (pTcon) {
- cFYI(1,("flags: 0x%x operation: 0x%x",flags,operation));
+ cFYI(1, ("flags: 0x%x operation: 0x%x", flags, operation));
} else {
rc = -EIO;
}
@@ -393,7 +400,7 @@ int cifs_xstate_set(struct super_block * sb, unsigned int flags, int operation)
return rc;
}
-int cifs_xstate_get(struct super_block * sb, struct fs_quota_stat *qstats)
+int cifs_xstate_get(struct super_block *sb, struct fs_quota_stat *qstats)
{
int xid;
int rc = 0;
@@ -407,7 +414,7 @@ int cifs_xstate_get(struct super_block * sb, struct fs_quota_stat *qstats)
}
xid = GetXid();
if (pTcon) {
- cFYI(1,("pqstats %p",qstats));
+ cFYI(1, ("pqstats %p", qstats));
} else {
rc = -EIO;
}
@@ -424,10 +431,10 @@ static struct quotactl_ops cifs_quotactl_ops = {
};
#endif
-static void cifs_umount_begin(struct vfsmount * vfsmnt, int flags)
+static void cifs_umount_begin(struct vfsmount *vfsmnt, int flags)
{
struct cifs_sb_info *cifs_sb;
- struct cifsTconInfo * tcon;
+ struct cifsTconInfo *tcon;
if (!(flags & MNT_FORCE))
return;
@@ -445,9 +452,8 @@ static void cifs_umount_begin(struct vfsmount * vfsmnt, int flags)
/* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
/* cancel_notify_requests(tcon); */
- if (tcon->ses && tcon->ses->server)
- {
- cFYI(1,("wake up tasks now - umount begin not complete"));
+ if (tcon->ses && tcon->ses->server) {
+ cFYI(1, ("wake up tasks now - umount begin not complete"));
wake_up_all(&tcon->ses->server->request_q);
wake_up_all(&tcon->ses->server->response_q);
msleep(1); /* yield */
@@ -480,10 +486,11 @@ static const struct super_operations cifs_super_ops = {
.statfs = cifs_statfs,
.alloc_inode = cifs_alloc_inode,
.destroy_inode = cifs_destroy_inode,
-/* .drop_inode = generic_delete_inode,
- .delete_inode = cifs_delete_inode, *//* Do not need the above two functions
- unless later we add lazy close of inodes or unless the kernel forgets to call
- us with the same number of releases (closes) as opens */
+/* .drop_inode = generic_delete_inode,
+ .delete_inode = cifs_delete_inode, */ /* Do not need above two
+ functions unless later we add lazy close of inodes or unless the
+ kernel forgets to call us with the same number of releases (closes)
+ as opens */
.show_options = cifs_show_options,
.umount_begin = cifs_umount_begin,
.remount_fs = cifs_remount,
@@ -586,11 +593,11 @@ const struct inode_operations cifs_file_inode_ops = {
.getxattr = cifs_getxattr,
.listxattr = cifs_listxattr,
.removexattr = cifs_removexattr,
-#endif
+#endif
};
const struct inode_operations cifs_symlink_inode_ops = {
- .readlink = generic_readlink,
+ .readlink = generic_readlink,
.follow_link = cifs_follow_link,
.put_link = cifs_put_link,
.permission = cifs_permission,
@@ -602,7 +609,7 @@ const struct inode_operations cifs_symlink_inode_ops = {
.getxattr = cifs_getxattr,
.listxattr = cifs_listxattr,
.removexattr = cifs_removexattr,
-#endif
+#endif
};
const struct file_operations cifs_file_ops = {
@@ -628,7 +635,7 @@ const struct file_operations cifs_file_ops = {
};
const struct file_operations cifs_file_direct_ops = {
- /* no mmap, no aio, no readv -
+ /* no mmap, no aio, no readv -
BB reevaluate whether they can be done with directio, no cache */
.read = cifs_user_read,
.write = cifs_user_write,
@@ -668,7 +675,7 @@ const struct file_operations cifs_file_nobrl_ops = {
};
const struct file_operations cifs_file_direct_nobrl_ops = {
- /* no mmap, no aio, no readv -
+ /* no mmap, no aio, no readv -
BB reevaluate whether they can be done with directio, no cache */
.read = cifs_user_read,
.write = cifs_user_write,
@@ -693,11 +700,11 @@ const struct file_operations cifs_dir_ops = {
#ifdef CONFIG_CIFS_EXPERIMENTAL
.dir_notify = cifs_dir_notify,
#endif /* CONFIG_CIFS_EXPERIMENTAL */
- .ioctl = cifs_ioctl,
+ .ioctl = cifs_ioctl,
};
static void
-cifs_init_once(void *inode, struct kmem_cache * cachep, unsigned long flags)
+cifs_init_once(void *inode, struct kmem_cache *cachep, unsigned long flags)
{
struct cifsInodeInfo *cifsi = inode;
@@ -712,7 +719,7 @@ cifs_init_inodecache(void)
sizeof (struct cifsInodeInfo),
0, (SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD),
- cifs_init_once, NULL);
+ cifs_init_once);
if (cifs_inode_cachep == NULL)
return -ENOMEM;
@@ -741,7 +748,7 @@ cifs_init_request_bufs(void)
cifs_req_cachep = kmem_cache_create("cifs_request",
CIFSMaxBufSize +
MAX_CIFS_HDR_SIZE, 0,
- SLAB_HWCACHE_ALIGN, NULL, NULL);
+ SLAB_HWCACHE_ALIGN, NULL);
if (cifs_req_cachep == NULL)
return -ENOMEM;
@@ -749,7 +756,7 @@ cifs_init_request_bufs(void)
cifs_min_rcv = 1;
else if (cifs_min_rcv > 64) {
cifs_min_rcv = 64;
- cERROR(1,("cifs_min_rcv set to maximum (64)"));
+ cERROR(1, ("cifs_min_rcv set to maximum (64)"));
}
cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
@@ -762,25 +769,25 @@ cifs_init_request_bufs(void)
/* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
almost all handle based requests (but not write response, nor is it
sufficient for path based requests). A smaller size would have
- been more efficient (compacting multiple slab items on one 4k page)
+ been more efficient (compacting multiple slab items on one 4k page)
for the case in which debug was on, but this larger size allows
more SMBs to use small buffer alloc and is still much more
- efficient to alloc 1 per page off the slab compared to 17K (5page)
+ efficient to alloc 1 per page off the slab compared to 17K (5page)
alloc of large cifs buffers even when page debugging is on */
cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
- NULL, NULL);
+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
+ NULL);
if (cifs_sm_req_cachep == NULL) {
mempool_destroy(cifs_req_poolp);
kmem_cache_destroy(cifs_req_cachep);
- return -ENOMEM;
+ return -ENOMEM;
}
if (cifs_min_small < 2)
cifs_min_small = 2;
else if (cifs_min_small > 256) {
cifs_min_small = 256;
- cFYI(1,("cifs_min_small set to maximum (256)"));
+ cFYI(1, ("cifs_min_small set to maximum (256)"));
}
cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
@@ -810,7 +817,7 @@ cifs_init_mids(void)
{
cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
sizeof (struct mid_q_entry), 0,
- SLAB_HWCACHE_ALIGN, NULL, NULL);
+ SLAB_HWCACHE_ALIGN, NULL);
if (cifs_mid_cachep == NULL)
return -ENOMEM;
@@ -823,7 +830,7 @@ cifs_init_mids(void)
cifs_oplock_cachep = kmem_cache_create("cifs_oplock_structs",
sizeof (struct oplock_q_entry), 0,
- SLAB_HWCACHE_ALIGN, NULL, NULL);
+ SLAB_HWCACHE_ALIGN, NULL);
if (cifs_oplock_cachep == NULL) {
mempool_destroy(cifs_mid_poolp);
kmem_cache_destroy(cifs_mid_cachep);
@@ -841,42 +848,43 @@ cifs_destroy_mids(void)
kmem_cache_destroy(cifs_oplock_cachep);
}
-static int cifs_oplock_thread(void * dummyarg)
+static int cifs_oplock_thread(void *dummyarg)
{
- struct oplock_q_entry * oplock_item;
+ struct oplock_q_entry *oplock_item;
struct cifsTconInfo *pTcon;
- struct inode * inode;
+ struct inode *inode;
__u16 netfid;
int rc;
set_freezable();
do {
- if (try_to_freeze())
+ if (try_to_freeze())
continue;
-
+
spin_lock(&GlobalMid_Lock);
if (list_empty(&GlobalOplock_Q)) {
spin_unlock(&GlobalMid_Lock);
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(39*HZ);
} else {
- oplock_item = list_entry(GlobalOplock_Q.next,
+ oplock_item = list_entry(GlobalOplock_Q.next,
struct oplock_q_entry, qhead);
if (oplock_item) {
- cFYI(1,("found oplock item to write out"));
+ cFYI(1, ("found oplock item to write out"));
pTcon = oplock_item->tcon;
inode = oplock_item->pinode;
netfid = oplock_item->netfid;
spin_unlock(&GlobalMid_Lock);
DeleteOplockQEntry(oplock_item);
/* can not grab inode sem here since it would
- deadlock when oplock received on delete
+ deadlock when oplock received on delete
since vfs_unlink holds the i_mutex across
the call */
/* mutex_lock(&inode->i_mutex);*/
if (S_ISREG(inode->i_mode)) {
rc = filemap_fdatawrite(inode->i_mapping);
- if (CIFS_I(inode)->clientCanCacheRead == 0) {
+ if (CIFS_I(inode)->clientCanCacheRead
+ == 0) {
filemap_fdatawait(inode->i_mapping);
invalidate_remote_inode(inode);
}
@@ -885,20 +893,22 @@ static int cifs_oplock_thread(void * dummyarg)
/* mutex_unlock(&inode->i_mutex);*/
if (rc)
CIFS_I(inode)->write_behind_rc = rc;
- cFYI(1,("Oplock flush inode %p rc %d",inode,rc));
-
- /* releasing a stale oplock after recent reconnection
- of smb session using a now incorrect file
- handle is not a data integrity issue but do
- not bother sending an oplock release if session
- to server still is disconnected since oplock
+ cFYI(1, ("Oplock flush inode %p rc %d",
+ inode, rc));
+
+ /* releasing stale oplock after recent reconnect
+ of smb session using a now incorrect file
+ handle is not a data integrity issue but do
+ not bother sending an oplock release if session
+ to server still is disconnected since oplock
already released by the server in that case */
if (pTcon->tidStatus != CifsNeedReconnect) {
rc = CIFSSMBLock(0, pTcon, netfid,
- 0 /* len */ , 0 /* offset */, 0,
+ 0 /* len */ , 0 /* offset */, 0,
0, LOCKING_ANDX_OPLOCK_RELEASE,
0 /* wait flag */);
- cFYI(1,("Oplock release rc = %d ",rc));
+ cFYI(1,
+ ("Oplock release rc = %d ", rc));
}
} else
spin_unlock(&GlobalMid_Lock);
@@ -910,7 +920,7 @@ static int cifs_oplock_thread(void * dummyarg)
return 0;
}
-static int cifs_dnotify_thread(void * dummyarg)
+static int cifs_dnotify_thread(void *dummyarg)
{
struct list_head *tmp;
struct cifsSesInfo *ses;
@@ -925,9 +935,9 @@ static int cifs_dnotify_thread(void * dummyarg)
to be woken up and wakeq so the
thread can wake up and error out */
list_for_each(tmp, &GlobalSMBSessionList) {
- ses = list_entry(tmp, struct cifsSesInfo,
+ ses = list_entry(tmp, struct cifsSesInfo,
cifsSessionList);
- if (ses && ses->server &&
+ if (ses && ses->server &&
atomic_read(&ses->server->inFlight))
wake_up_all(&ses->server->response_q);
}
@@ -951,13 +961,13 @@ init_cifs(void)
#ifdef CONFIG_CIFS_EXPERIMENTAL
INIT_LIST_HEAD(&GlobalDnotifyReqList);
INIT_LIST_HEAD(&GlobalDnotifyRsp_Q);
-#endif
+#endif
/*
* Initialize Global counters
*/
atomic_set(&sesInfoAllocCount, 0);
atomic_set(&tconInfoAllocCount, 0);
- atomic_set(&tcpSesAllocCount,0);
+ atomic_set(&tcpSesAllocCount, 0);
atomic_set(&tcpSesReconnectCount, 0);
atomic_set(&tconInfoReconnectCount, 0);
@@ -978,10 +988,10 @@ init_cifs(void)
if (cifs_max_pending < 2) {
cifs_max_pending = 2;
- cFYI(1,("cifs_max_pending set to min of 2"));
+ cFYI(1, ("cifs_max_pending set to min of 2"));
} else if (cifs_max_pending > 256) {
cifs_max_pending = 256;
- cFYI(1,("cifs_max_pending set to max of 256"));
+ cFYI(1, ("cifs_max_pending set to max of 256"));
}
rc = cifs_init_inodecache();
@@ -1003,14 +1013,14 @@ init_cifs(void)
oplockThread = kthread_run(cifs_oplock_thread, NULL, "cifsoplockd");
if (IS_ERR(oplockThread)) {
rc = PTR_ERR(oplockThread);
- cERROR(1,("error %d create oplock thread", rc));
+ cERROR(1, ("error %d create oplock thread", rc));
goto out_unregister_filesystem;
}
dnotifyThread = kthread_run(cifs_dnotify_thread, NULL, "cifsdnotifyd");
if (IS_ERR(dnotifyThread)) {
rc = PTR_ERR(dnotifyThread);
- cERROR(1,("error %d create dnotify thread", rc));
+ cERROR(1, ("error %d create dnotify thread", rc));
goto out_stop_oplock_thread;
}
@@ -1036,7 +1046,7 @@ init_cifs(void)
static void __exit
exit_cifs(void)
{
- cFYI(0, ("In unregister ie exit_cifs"));
+ cFYI(0, ("exit_cifs"));
#ifdef CONFIG_PROC_FS
cifs_proc_clean();
#endif
@@ -1049,9 +1059,10 @@ exit_cifs(void)
}
MODULE_AUTHOR("Steve French <sfrench@us.ibm.com>");
-MODULE_LICENSE("GPL"); /* combination of LGPL + GPL source behaves as GPL */
+MODULE_LICENSE("GPL"); /* combination of LGPL + GPL source behaves as GPL */
MODULE_DESCRIPTION
- ("VFS to access servers complying with the SNIA CIFS Specification e.g. Samba and Windows");
+ ("VFS to access servers complying with the SNIA CIFS Specification "
+ "e.g. Samba and Windows");
MODULE_VERSION(CIFS_VERSION);
module_init(init_cifs)
module_exit(exit_cifs)
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index c235d32..a20de77 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -16,7 +16,7 @@
*
* You should have received a copy of the GNU Lesser General Public License
* along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _CIFSFS_H
@@ -43,9 +43,9 @@ extern void cifs_read_inode(struct inode *);
/* Functions related to inodes */
extern const struct inode_operations cifs_dir_inode_ops;
-extern int cifs_create(struct inode *, struct dentry *, int,
+extern int cifs_create(struct inode *, struct dentry *, int,
struct nameidata *);
-extern struct dentry * cifs_lookup(struct inode *, struct dentry *,
+extern struct dentry *cifs_lookup(struct inode *, struct dentry *,
struct nameidata *);
extern int cifs_unlink(struct inode *, struct dentry *);
extern int cifs_hardlink(struct dentry *, struct inode *, struct dentry *);
@@ -63,16 +63,16 @@ extern const struct inode_operations cifs_symlink_inode_ops;
/* Functions related to files and directories */
extern const struct file_operations cifs_file_ops;
-extern const struct file_operations cifs_file_direct_ops; /* if directio mount */
+extern const struct file_operations cifs_file_direct_ops; /* if directio mnt */
extern const struct file_operations cifs_file_nobrl_ops;
-extern const struct file_operations cifs_file_direct_nobrl_ops; /* if directio mount */
+extern const struct file_operations cifs_file_direct_nobrl_ops; /* no brlocks */
extern int cifs_open(struct inode *inode, struct file *file);
extern int cifs_close(struct inode *inode, struct file *file);
extern int cifs_closedir(struct inode *inode, struct file *file);
extern ssize_t cifs_user_read(struct file *file, char __user *read_data,
- size_t read_size, loff_t * poffset);
+ size_t read_size, loff_t *poffset);
extern ssize_t cifs_user_write(struct file *file, const char __user *write_data,
- size_t write_size, loff_t * poffset);
+ size_t write_size, loff_t *poffset);
extern int cifs_lock(struct file *, int, struct file_lock *);
extern int cifs_fsync(struct file *, struct dentry *, int);
extern int cifs_flush(struct file *, fl_owner_t id);
@@ -88,8 +88,9 @@ extern struct dentry_operations cifs_ci_dentry_ops;
/* Functions related to symlinks */
extern void *cifs_follow_link(struct dentry *direntry, struct nameidata *nd);
-extern void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *);
-extern int cifs_readlink(struct dentry *direntry, char __user *buffer,
+extern void cifs_put_link(struct dentry *direntry,
+ struct nameidata *nd, void *);
+extern int cifs_readlink(struct dentry *direntry, char __user *buffer,
int buflen);
extern int cifs_symlink(struct inode *inode, struct dentry *direntry,
const char *symname);
@@ -98,7 +99,7 @@ extern int cifs_setxattr(struct dentry *, const char *, const void *,
size_t, int);
extern ssize_t cifs_getxattr(struct dentry *, const char *, void *, size_t);
extern ssize_t cifs_listxattr(struct dentry *, char *, size_t);
-extern int cifs_ioctl (struct inode * inode, struct file * filep,
+extern int cifs_ioctl (struct inode *inode, struct file *filep,
unsigned int command, unsigned long arg);
-#define CIFS_VERSION "1.49"
+#define CIFS_VERSION "1.50"
#endif /* _CIFSFS_H */
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 23655de..b98742f 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -1,7 +1,7 @@
/*
* fs/cifs/cifsglob.h
*
- * Copyright (C) International Business Machines Corp., 2002,2006
+ * Copyright (C) International Business Machines Corp., 2002,2007
* Author(s): Steve French (sfrench@us.ibm.com)
* Jeremy Allison (jra@samba.org)
*
@@ -14,7 +14,7 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU Lesser General Public License for more details.
- *
+ *
*/
#include <linux/in.h>
#include <linux/in6.h>
@@ -28,7 +28,7 @@
#define MAX_TREE_SIZE 2 + MAX_SERVER_SIZE + 1 + MAX_SHARE_SIZE + 1
#define MAX_SERVER_SIZE 15
-#define MAX_SHARE_SIZE 64 /* used to be 20 - this should still be enough */
+#define MAX_SHARE_SIZE 64 /* used to be 20, this should still be enough */
#define MAX_USERNAME_SIZE 32 /* 32 is to allow for 15 char names + null
termination then *2 for unicode versions */
#define MAX_PASSWORD_SIZE 16
@@ -38,13 +38,13 @@
/*
* MAX_REQ is the maximum number of requests that WE will send
* on one socket concurently. It also matches the most common
- * value of max multiplex returned by servers. We may
+ * value of max multiplex returned by servers. We may
* eventually want to use the negotiated value (in case
* future servers can handle more) when we are more confident that
* we will not have problems oveloading the socket with pending
* write data.
*/
-#define CIFS_MAX_REQ 50
+#define CIFS_MAX_REQ 50
#define SERVER_NAME_LENGTH 15
#define SERVER_NAME_LEN_WITH_NULL (SERVER_NAME_LENGTH + 1)
@@ -104,6 +104,17 @@ enum protocolEnum {
/* Netbios frames protocol not supported at this time */
};
+struct mac_key {
+ unsigned int len;
+ union {
+ char ntlm[CIFS_SESS_KEY_SIZE + 16];
+ struct {
+ char key[16];
+ struct ntlmv2_resp resp;
+ } ntlmv2;
+ } data;
+};
+
/*
*****************************************************************
* Except the CIFS PDUs themselves all the
@@ -120,13 +131,13 @@ struct TCP_Server_Info {
struct sockaddr_in sockAddr;
struct sockaddr_in6 sockAddr6;
} addr;
- wait_queue_head_t response_q;
+ wait_queue_head_t response_q;
wait_queue_head_t request_q; /* if more than maxmpx to srvr must block*/
struct list_head pending_mid_q;
void *Server_NlsInfo; /* BB - placeholder for future NLS info */
unsigned short server_codepage; /* codepage for the server */
unsigned long ip_address; /* IP addr for the server if known */
- enum protocolEnum protocolType;
+ enum protocolEnum protocolType;
char versionMajor;
char versionMinor;
unsigned svlocal:1; /* local server or remote */
@@ -159,14 +170,15 @@ struct TCP_Server_Info {
/* 16th byte of RFC1001 workstation name is always null */
char workstation_RFC1001_name[SERVER_NAME_LEN_WITH_NULL];
__u32 sequence_number; /* needed for CIFS PDU signature */
- char mac_signing_key[CIFS_SESS_KEY_SIZE + 16];
+ struct mac_key mac_signing_key;
+ char ntlmv2_hash[16];
unsigned long lstrp; /* when we got last response from this server */
};
/*
* The following is our shortcut to user information. We surface the uid,
* and name. We always get the password on the fly in case it
- * has changed. We also hang a list of sessions owned by this user off here.
+ * has changed. We also hang a list of sessions owned by this user off here.
*/
struct cifsUidInfo {
struct list_head userList;
@@ -197,11 +209,11 @@ struct cifsSesInfo {
int Suid; /* remote smb uid */
uid_t linux_uid; /* local Linux uid */
int capabilities;
- char serverName[SERVER_NAME_LEN_WITH_NULL * 2]; /* BB make bigger for
+ char serverName[SERVER_NAME_LEN_WITH_NULL * 2]; /* BB make bigger for
TCP names - will ipv6 and sctp addresses fit? */
char userName[MAX_USERNAME_SIZE + 1];
- char * domainName;
- char * password;
+ char *domainName;
+ char *password;
};
/* no more than one of the following three session flags may be set */
#define CIFS_SES_NT4 1
@@ -213,7 +225,7 @@ struct cifsSesInfo {
#define CIFS_SES_LANMAN 8
/*
* there is one of these for each connection to a resource on a particular
- * session
+ * session
*/
struct cifsTconInfo {
struct list_head cifsConnectionList;
@@ -269,7 +281,9 @@ struct cifsTconInfo {
FILE_SYSTEM_UNIX_INFO fsUnixInfo;
unsigned retry:1;
unsigned nocase:1;
- /* BB add field for back pointer to sb struct? */
+ unsigned unix_ext:1; /* if off disable Linux extensions to CIFS protocol
+ for this mount even if server would support */
+ /* BB add field for back pointer to sb struct(s)? */
};
/*
@@ -291,9 +305,9 @@ struct cifs_search_info {
__u16 entries_in_buffer;
__u16 info_level;
__u32 resume_key;
- char * ntwrk_buf_start;
- char * srch_entries_start;
- char * presume_name;
+ char *ntwrk_buf_start;
+ char *srch_entries_start;
+ char *presume_name;
unsigned int resume_name_len;
unsigned endOfSearch:1;
unsigned emptyDir:1;
@@ -309,15 +323,15 @@ struct cifsFileInfo {
__u16 netfid; /* file id from remote */
/* BB add lock scope info here if needed */ ;
/* lock scope id (0 if none) */
- struct file * pfile; /* needed for writepage */
- struct inode * pInode; /* needed for oplock break */
+ struct file *pfile; /* needed for writepage */
+ struct inode *pInode; /* needed for oplock break */
struct mutex lock_mutex;
struct list_head llist; /* list of byte range locks we have. */
unsigned closePend:1; /* file is marked to close */
unsigned invalidHandle:1; /* file closed via session abend */
atomic_t wrtPending; /* handle in use - defer close */
struct semaphore fh_sem; /* prevents reopen race after dead ses*/
- char * search_resume_name; /* BB removeme BB */
+ char *search_resume_name; /* BB removeme BB */
struct cifs_search_info srch_inf;
};
@@ -327,7 +341,7 @@ struct cifsFileInfo {
struct cifsInodeInfo {
struct list_head lockList;
- /* BB add in lists for dirty pages - i.e. write caching info for oplock */
+ /* BB add in lists for dirty pages i.e. write caching info for oplock */
struct list_head openFileList;
int write_behind_rc;
__u32 cifsAttrs; /* e.g. DOS archive bit, sparse, compressed, system */
@@ -381,9 +395,9 @@ static inline void cifs_stats_bytes_read(struct cifsTconInfo *tcon,
}
#else
-#define cifs_stats_inc(field) do {} while(0)
-#define cifs_stats_bytes_written(tcon, bytes) do {} while(0)
-#define cifs_stats_bytes_read(tcon, bytes) do {} while(0)
+#define cifs_stats_inc(field) do {} while (0)
+#define cifs_stats_bytes_written(tcon, bytes) do {} while (0)
+#define cifs_stats_bytes_read(tcon, bytes) do {} while (0)
#endif
@@ -410,8 +424,8 @@ struct mid_q_entry {
struct oplock_q_entry {
struct list_head qhead;
- struct inode * pinode;
- struct cifsTconInfo * tcon;
+ struct inode *pinode;
+ struct cifsTconInfo *tcon;
__u16 netfid;
};
@@ -426,7 +440,7 @@ struct dir_notify_req {
__u16 netfid;
__u32 filter; /* CompletionFilter (for multishot) */
int multishot;
- struct file * pfile;
+ struct file *pfile;
};
#define MID_FREE 0
@@ -464,7 +478,7 @@ require use of the stronger protocol */
#define CIFSSEC_MUST_LANMAN 0x10010
#define CIFSSEC_MUST_PLNTXT 0x20020
#define CIFSSEC_MASK 0x37037 /* current flags supported if weak */
-#else
+#else
#define CIFSSEC_MASK 0x07007 /* flags supported if no weak config */
#endif /* WEAK_PW_HASH */
#define CIFSSEC_MUST_SEAL 0x40040 /* not supported yet */
@@ -502,7 +516,7 @@ require use of the stronger protocol */
* ----------
* sesSem operations on smb session
* tconSem operations on tree connection
- * fh_sem file handle reconnection operations
+ * fh_sem file handle reconnection operations
*
****************************************************************************/
@@ -515,7 +529,7 @@ require use of the stronger protocol */
/*
* The list of servers that did not respond with NT LM 0.12.
* This list helps improve performance and eliminate the messages indicating
- * that we had a communications error talking to the server in this list.
+ * that we had a communications error talking to the server in this list.
*/
/* Feature not supported */
/* GLOBAL_EXTERN struct servers_not_supported *NotSuppList; */
@@ -568,12 +582,12 @@ GLOBAL_EXTERN atomic_t midCount;
/* Misc globals */
GLOBAL_EXTERN unsigned int multiuser_mount; /* if enabled allows new sessions
to be established on existing mount if we
- have the uid/password or Kerberos credential
+ have the uid/password or Kerberos credential
or equivalent for current user */
GLOBAL_EXTERN unsigned int oplockEnabled;
GLOBAL_EXTERN unsigned int experimEnabled;
GLOBAL_EXTERN unsigned int lookupCacheEnabled;
-GLOBAL_EXTERN unsigned int extended_security; /* if on, session setup sent
+GLOBAL_EXTERN unsigned int extended_security; /* if on, session setup sent
with more secure ntlmssp2 challenge/resp */
GLOBAL_EXTERN unsigned int sign_CIFS_PDUs; /* enable smb packet signing */
GLOBAL_EXTERN unsigned int linuxExtEnabled;/*enable Linux/Unix CIFS extensions*/
diff --git a/fs/cifs/cifspdu.h b/fs/cifs/cifspdu.h
index d619ca7..6a2056e 100644
--- a/fs/cifs/cifspdu.h
+++ b/fs/cifs/cifspdu.h
@@ -144,7 +144,7 @@
#define SMBOPEN_OAPPEND 0x0001
/*
- * SMB flag definitions
+ * SMB flag definitions
*/
#define SMBFLG_EXTD_LOCK 0x01 /* server supports lock-read write-unlock smb */
#define SMBFLG_RCV_POSTED 0x02 /* obsolete */
@@ -157,9 +157,9 @@
#define SMBFLG_RESPONSE 0x80 /* this PDU is a response from server */
/*
- * SMB flag2 definitions
+ * SMB flag2 definitions
*/
-#define SMBFLG2_KNOWS_LONG_NAMES cpu_to_le16(1) /* can send long (non-8.3)
+#define SMBFLG2_KNOWS_LONG_NAMES cpu_to_le16(1) /* can send long (non-8.3)
path names in response */
#define SMBFLG2_KNOWS_EAS cpu_to_le16(2)
#define SMBFLG2_SECURITY_SIGNATURE cpu_to_le16(4)
@@ -260,7 +260,7 @@
#define ATTR_SPARSE 0x0200
#define ATTR_REPARSE 0x0400
#define ATTR_COMPRESSED 0x0800
-#define ATTR_OFFLINE 0x1000 /* ie file not immediately available -
+#define ATTR_OFFLINE 0x1000 /* ie file not immediately available -
on offline storage */
#define ATTR_NOT_CONTENT_INDEXED 0x2000
#define ATTR_ENCRYPTED 0x4000
@@ -300,7 +300,7 @@
#define CREATE_DELETE_ON_CLOSE 0x00001000
#define CREATE_OPEN_BY_ID 0x00002000
#define OPEN_REPARSE_POINT 0x00200000
-#define CREATE_OPTIONS_MASK 0x007FFFFF
+#define CREATE_OPTIONS_MASK 0x007FFFFF
#define CREATE_OPTION_SPECIAL 0x20000000 /* system. NB not sent over wire */
/* ImpersonationLevel flags */
@@ -366,17 +366,19 @@ struct smb_hdr {
#define pByteArea(smb_var) ((unsigned char *)smb_var + sizeof(struct smb_hdr) + (2* smb_var->WordCount) + 2 )
/*
- * Computer Name Length
+ * Computer Name Length (since Netbios name was length 16 with last byte 0x20)
+ * No longer as important, now that TCP names are more commonly used to
+ * resolve hosts.
*/
#define CNLEN 15
/*
- * Share Name Length @S8A
- * Note: This length is limited by the SMB used to get @S8A
- * the Share info. NetShareEnum only returns 13 @S8A
- * chars, including the null termination. @S8A
+ * Share Name Length (SNLEN)
+ * Note: This length was limited by the SMB used to get
+ * the Share info. NetShareEnum only returned 13
+ * chars, including the null termination.
+ * This was removed because it no longer is limiting.
*/
-#define SNLEN 12 /*@S8A */
/*
* Comment Length
@@ -394,8 +396,8 @@ struct smb_hdr {
*
* The Naming convention is the lower case version of the
* smb command code name for the struct and this is typedef to the
- * uppercase version of the same name with the prefix SMB_ removed
- * for brevity. Although typedefs are not commonly used for
+ * uppercase version of the same name with the prefix SMB_ removed
+ * for brevity. Although typedefs are not commonly used for
* structure definitions in the Linux kernel, their use in the
* CIFS standards document, which this code is based on, may
* make this one of the cases where typedefs for structures make
@@ -403,7 +405,7 @@ struct smb_hdr {
* Typedefs can always be removed later if they are too distracting
* and they are only used for the CIFSs PDUs themselves, not
* internal cifs vfs structures
- *
+ *
*/
typedef struct negotiate_req {
@@ -511,7 +513,7 @@ typedef union smb_com_session_setup_andx {
unsigned char SecurityBlob[1]; /* followed by */
/* STRING NativeOS */
/* STRING NativeLanMan */
- } __attribute__((packed)) req; /* NTLM request format (with
+ } __attribute__((packed)) req; /* NTLM request format (with
extended security */
struct { /* request format */
@@ -549,7 +551,7 @@ typedef union smb_com_session_setup_andx {
/* unsigned char * NativeOS; */
/* unsigned char * NativeLanMan; */
/* unsigned char * PrimaryDomain; */
- } __attribute__((packed)) resp; /* NTLM response
+ } __attribute__((packed)) resp; /* NTLM response
(with or without extended sec) */
struct { /* request format */
@@ -618,7 +620,7 @@ struct ntlmv2_resp {
#define CAP_NT_SMBS 0x00000010
#define CAP_STATUS32 0x00000040
#define CAP_LEVEL_II_OPLOCKS 0x00000080
-#define CAP_NT_FIND 0x00000200 /* reserved should be zero
+#define CAP_NT_FIND 0x00000200 /* reserved should be zero
(because NT_SMBs implies the same thing?) */
#define CAP_BULK_TRANSFER 0x20000000
#define CAP_EXTENDED_SECURITY 0x80000000
@@ -676,7 +678,7 @@ typedef struct smb_com_logoff_andx_rsp {
__u16 ByteCount;
} __attribute__((packed)) LOGOFF_ANDX_RSP;
-typedef union smb_com_tree_disconnect { /* as an altetnative can use flag on
+typedef union smb_com_tree_disconnect { /* as an altetnative can use flag on
tree_connect PDU to effect disconnect */
/* tdis is probably simplest SMB PDU */
struct {
@@ -712,6 +714,7 @@ typedef struct smb_com_findclose_req {
#define REQ_OPLOCK 0x00000002
#define REQ_BATCHOPLOCK 0x00000004
#define REQ_OPENDIRONLY 0x00000008
+#define REQ_EXTENDED_INFO 0x00000010
typedef struct smb_com_open_req { /* also handles create */
struct smb_hdr hdr; /* wct = 24 */
@@ -799,27 +802,28 @@ typedef struct smb_com_openx_rsp {
__u32 FileId;
__u16 Reserved;
__u16 ByteCount;
-} __attribute__((packed)) OPENX_RSP;
+} __attribute__((packed)) OPENX_RSP;
/* For encoding of POSIX Open Request - see trans2 function 0x209 data struct */
/* Legacy write request for older servers */
typedef struct smb_com_writex_req {
- struct smb_hdr hdr; /* wct = 12 */
- __u8 AndXCommand;
- __u8 AndXReserved;
- __le16 AndXOffset;
- __u16 Fid;
- __le32 OffsetLow;
- __u32 Reserved; /* Timeout */
- __le16 WriteMode; /* 1 = write through */
- __le16 Remaining;
- __le16 Reserved2;
- __le16 DataLengthLow;
- __le16 DataOffset;
- __le16 ByteCount;
- __u8 Pad; /* BB check for whether padded to DWORD boundary and optimum performance here */
- char Data[0];
+ struct smb_hdr hdr; /* wct = 12 */
+ __u8 AndXCommand;
+ __u8 AndXReserved;
+ __le16 AndXOffset;
+ __u16 Fid;
+ __le32 OffsetLow;
+ __u32 Reserved; /* Timeout */
+ __le16 WriteMode; /* 1 = write through */
+ __le16 Remaining;
+ __le16 Reserved2;
+ __le16 DataLengthLow;
+ __le16 DataOffset;
+ __le16 ByteCount;
+ __u8 Pad; /* BB check for whether padded to DWORD
+ boundary and optimum performance here */
+ char Data[0];
} __attribute__((packed)) WRITEX_REQ;
typedef struct smb_com_write_req {
@@ -837,7 +841,8 @@ typedef struct smb_com_write_req {
__le16 DataOffset;
__le32 OffsetHigh;
__le16 ByteCount;
- __u8 Pad; /* BB check for whether padded to DWORD boundary and optimum performance here */
+ __u8 Pad; /* BB check for whether padded to DWORD
+ boundary and optimum performance here */
char Data[0];
} __attribute__((packed)) WRITE_REQ;
@@ -855,17 +860,17 @@ typedef struct smb_com_write_rsp {
/* legacy read request for older servers */
typedef struct smb_com_readx_req {
- struct smb_hdr hdr; /* wct = 10 */
- __u8 AndXCommand;
- __u8 AndXReserved;
- __le16 AndXOffset;
- __u16 Fid;
- __le32 OffsetLow;
- __le16 MaxCount;
- __le16 MinCount; /* obsolete */
- __le32 Reserved;
- __le16 Remaining;
- __le16 ByteCount;
+ struct smb_hdr hdr; /* wct = 10 */
+ __u8 AndXCommand;
+ __u8 AndXReserved;
+ __le16 AndXOffset;
+ __u16 Fid;
+ __le32 OffsetLow;
+ __le16 MaxCount;
+ __le16 MinCount; /* obsolete */
+ __le32 Reserved;
+ __le16 Remaining;
+ __le16 ByteCount;
} __attribute__((packed)) READX_REQ;
typedef struct smb_com_read_req {
@@ -896,7 +901,8 @@ typedef struct smb_com_read_rsp {
__le16 DataLengthHigh;
__u64 Reserved2;
__u16 ByteCount;
- __u8 Pad; /* BB check for whether padded to DWORD boundary and optimum performance here */
+ __u8 Pad; /* BB check for whether padded to DWORD
+ boundary and optimum performance here */
char Data[1];
} __attribute__((packed)) READ_RSP;
@@ -967,7 +973,7 @@ typedef struct smb_com_rename_req {
#define COPY_TARGET_MODE_ASCII 0x0004 /* if not set, binary */
#define COPY_SOURCE_MODE_ASCII 0x0008 /* if not set, binary */
#define COPY_VERIFY_WRITES 0x0010
-#define COPY_TREE 0x0020
+#define COPY_TREE 0x0020
typedef struct smb_com_copy_req {
struct smb_hdr hdr; /* wct = 3 */
@@ -975,7 +981,7 @@ typedef struct smb_com_copy_req {
__le16 OpenFunction;
__le16 Flags;
__le16 ByteCount;
- __u8 BufferFormat; /* 4 = ASCII or Unicode */
+ __u8 BufferFormat; /* 4 = ASCII or Unicode */
unsigned char OldFileName[1];
/* followed by __u8 BufferFormat2 */
/* followed by NewFileName string */
@@ -1083,28 +1089,28 @@ typedef struct smb_com_setattr_rsp {
/*******************************************************/
/* NT Transact structure defintions follow */
-/* Currently only ioctl, acl (get security descriptor) */
+/* Currently only ioctl, acl (get security descriptor) */
/* and notify are implemented */
/*******************************************************/
typedef struct smb_com_ntransact_req {
- struct smb_hdr hdr; /* wct >= 19 */
- __u8 MaxSetupCount;
- __u16 Reserved;
- __le32 TotalParameterCount;
- __le32 TotalDataCount;
- __le32 MaxParameterCount;
- __le32 MaxDataCount;
- __le32 ParameterCount;
- __le32 ParameterOffset;
- __le32 DataCount;
- __le32 DataOffset;
- __u8 SetupCount; /* four setup words follow subcommand */
- /* SNIA spec incorrectly included spurious pad here */
- __le16 SubCommand; /* 2 = IOCTL/FSCTL */
- /* SetupCount words follow then */
- __le16 ByteCount;
- __u8 Pad[3];
- __u8 Parms[0];
+ struct smb_hdr hdr; /* wct >= 19 */
+ __u8 MaxSetupCount;
+ __u16 Reserved;
+ __le32 TotalParameterCount;
+ __le32 TotalDataCount;
+ __le32 MaxParameterCount;
+ __le32 MaxDataCount;
+ __le32 ParameterCount;
+ __le32 ParameterOffset;
+ __le32 DataCount;
+ __le32 DataOffset;
+ __u8 SetupCount; /* four setup words follow subcommand */
+ /* SNIA spec incorrectly included spurious pad here */
+ __le16 SubCommand; /* 2 = IOCTL/FSCTL */
+ /* SetupCount words follow then */
+ __le16 ByteCount;
+ __u8 Pad[3];
+ __u8 Parms[0];
} __attribute__((packed)) NTRANSACT_REQ;
typedef struct smb_com_ntransact_rsp {
@@ -1120,7 +1126,7 @@ typedef struct smb_com_ntransact_rsp {
__le32 DataDisplacement;
__u8 SetupCount; /* 0 */
__u16 ByteCount;
- /* __u8 Pad[3]; */
+ /* __u8 Pad[3]; */
/* parms and data follow */
} __attribute__((packed)) NTRANSACT_RSP;
@@ -1215,7 +1221,7 @@ typedef struct smb_com_transaction_change_notify_req {
/* __u8 Data[1];*/
} __attribute__((packed)) TRANSACT_CHANGE_NOTIFY_REQ;
-/* BB eventually change to use generic ntransact rsp struct
+/* BB eventually change to use generic ntransact rsp struct
and validation routine */
typedef struct smb_com_transaction_change_notify_rsp {
struct smb_hdr hdr; /* wct = 18 */
@@ -1262,7 +1268,7 @@ struct file_notify_information {
__le32 Action;
__le32 FileNameLength;
__u8 FileName[0];
-} __attribute__((packed));
+} __attribute__((packed));
struct reparse_data {
__u32 ReparseTag;
@@ -1331,7 +1337,7 @@ struct trans2_resp {
__u8 Reserved1;
/* SetupWords[SetupCount];
__u16 ByteCount;
- __u16 Reserved2;*/
+ __u16 Reserved2;*/
/* data area follows */
} __attribute__((packed));
@@ -1370,9 +1376,9 @@ struct smb_t2_rsp {
#define SMB_QUERY_FILE_INTERNAL_INFO 0x3ee
#define SMB_QUERY_FILE_ACCESS_INFO 0x3f0
#define SMB_QUERY_FILE_NAME_INFO2 0x3f1 /* 0x30 bytes */
-#define SMB_QUERY_FILE_POSITION_INFO 0x3f6
+#define SMB_QUERY_FILE_POSITION_INFO 0x3f6
#define SMB_QUERY_FILE_MODE_INFO 0x3f8
-#define SMB_QUERY_FILE_ALGN_INFO 0x3f9
+#define SMB_QUERY_FILE_ALGN_INFO 0x3f9
#define SMB_SET_FILE_BASIC_INFO 0x101
@@ -1506,35 +1512,35 @@ struct smb_com_transaction2_sfi_req {
__u16 Pad1;
__u16 Fid;
__le16 InformationLevel;
- __u16 Reserved4;
+ __u16 Reserved4;
} __attribute__((packed));
struct smb_com_transaction2_sfi_rsp {
struct smb_hdr hdr; /* wct = 10 + SetupCount */
struct trans2_resp t2;
__u16 ByteCount;
- __u16 Reserved2; /* parameter word reserved -
+ __u16 Reserved2; /* parameter word reserved -
present for infolevels > 100 */
} __attribute__((packed));
struct smb_t2_qfi_req {
- struct smb_hdr hdr;
- struct trans2_req t2;
+ struct smb_hdr hdr;
+ struct trans2_req t2;
__u8 Pad;
__u16 Fid;
__le16 InformationLevel;
} __attribute__((packed));
struct smb_t2_qfi_rsp {
- struct smb_hdr hdr; /* wct = 10 + SetupCount */
- struct trans2_resp t2;
- __u16 ByteCount;
- __u16 Reserved2; /* parameter word reserved -
- present for infolevels > 100 */
+ struct smb_hdr hdr; /* wct = 10 + SetupCount */
+ struct trans2_resp t2;
+ __u16 ByteCount;
+ __u16 Reserved2; /* parameter word reserved -
+ present for infolevels > 100 */
} __attribute__((packed));
/*
- * Flags on T2 FINDFIRST and FINDNEXT
+ * Flags on T2 FINDFIRST and FINDNEXT
*/
#define CIFS_SEARCH_CLOSE_ALWAYS 0x0001
#define CIFS_SEARCH_CLOSE_AT_END 0x0002
@@ -1743,7 +1749,9 @@ typedef struct smb_com_transaction2_get_dfs_refer_req {
__u8 Reserved3;
__le16 SubCommand; /* one setup word */
__le16 ByteCount;
- __u8 Pad[3]; /* Win2K has sent 0x0F01 (max resp length perhaps?) followed by one byte pad - doesn't seem to matter though */
+ __u8 Pad[3]; /* Win2K has sent 0x0F01 (max response length
+ perhaps?) followed by one byte pad - doesn't
+ seem to matter though */
__le16 MaxReferralLevel;
char RequestFileName[1];
} __attribute__((packed)) TRANSACTION2_GET_DFS_REFER_REQ;
@@ -1752,7 +1760,10 @@ typedef struct dfs_referral_level_3 {
__le16 VersionNumber;
__le16 ReferralSize;
__le16 ServerType; /* 0x0001 = CIFS server */
- __le16 ReferralFlags; /* or proximity - not clear which since always set to zero - SNIA spec says 0x01 means strip off PathConsumed chars before submitting RequestFileName to remote node */
+ __le16 ReferralFlags; /* or proximity - not clear which since it is
+ always set to zero - SNIA spec says 0x01
+ means strip off PathConsumed chars before
+ submitting RequestFileName to remote node */
__le16 TimeToLive;
__le16 Proximity;
__le16 DfsPathOffset;
@@ -1778,11 +1789,13 @@ typedef struct smb_com_transaction_get_dfs_refer_rsp {
#define DFSREF_STORAGE_SERVER 0x0002
/* IOCTL information */
-/* List of ioctl function codes that look to be of interest to remote clients like this. */
-/* Need to do some experimentation to make sure they all work remotely. */
-/* Some of the following such as the encryption/compression ones would be */
-/* invoked from tools via a specialized hook into the VFS rather than via the */
-/* standard vfs entry points */
+/*
+ * List of ioctl function codes that look to be of interest to remote clients
+ * like this one. Need to do some experimentation to make sure they all work
+ * remotely. Some of the following, such as the encryption/compression ones
+ * would be invoked from tools via a specialized hook into the VFS rather
+ * than via the standard vfs entry points
+ */
#define FSCTL_REQUEST_OPLOCK_LEVEL_1 0x00090000
#define FSCTL_REQUEST_OPLOCK_LEVEL_2 0x00090004
#define FSCTL_REQUEST_BATCH_OPLOCK 0x00090008
@@ -1811,7 +1824,7 @@ typedef struct smb_com_transaction_get_dfs_refer_rsp {
/*
************************************************************************
* All structs for everything above the SMB PDUs themselves
- * (such as the T2 level specific data) go here
+ * (such as the T2 level specific data) go here
************************************************************************
*/
@@ -1857,7 +1870,7 @@ typedef struct {
__le64 FreeAllocationUnits;
__le32 SectorsPerAllocationUnit;
__le32 BytesPerSector;
-} __attribute__((packed)) FILE_SYSTEM_INFO; /* size info, level 0x103 */
+} __attribute__((packed)) FILE_SYSTEM_INFO; /* size info, level 0x103 */
typedef struct {
__le32 fsid;
@@ -1871,7 +1884,7 @@ typedef struct {
__le16 MajorVersionNumber;
__le16 MinorVersionNumber;
__le64 Capability;
-} __attribute__((packed)) FILE_SYSTEM_UNIX_INFO; /* Unix extensions info, level 0x200 */
+} __attribute__((packed)) FILE_SYSTEM_UNIX_INFO; /* Unix extension level 0x200*/
/* Version numbers for CIFS UNIX major and minor. */
#define CIFS_UNIX_MAJOR_VERSION 1
@@ -1885,16 +1898,20 @@ typedef struct {
#define CIFS_UNIX_POSIX_PATHNAMES_CAP 0x00000010 /* Allow POSIX path chars */
#define CIFS_UNIX_POSIX_PATH_OPS_CAP 0x00000020 /* Allow new POSIX path based
calls including posix open
- and posix unlink */
+ and posix unlink */
+#define CIFS_UNIX_LARGE_READ_CAP 0x00000040 /* support reads >128K (up
+ to 0xFFFF00 */
+#define CIFS_UNIX_LARGE_WRITE_CAP 0x00000080
+
#ifdef CONFIG_CIFS_POSIX
/* Can not set pathnames cap yet until we send new posix create SMB since
otherwise server can treat such handles opened with older ntcreatex
(by a new client which knows how to send posix path ops)
as non-posix handles (can affect write behavior with byte range locks.
We can add back in POSIX_PATH_OPS cap when Posix Create/Mkdir finished */
-/* #define CIFS_UNIX_CAP_MASK 0x0000003b */
-#define CIFS_UNIX_CAP_MASK 0x0000001b
-#else
+/* #define CIFS_UNIX_CAP_MASK 0x000000fb */
+#define CIFS_UNIX_CAP_MASK 0x000000db
+#else
#define CIFS_UNIX_CAP_MASK 0x00000013
#endif /* CONFIG_CIFS_POSIX */
@@ -1904,10 +1921,10 @@ typedef struct {
typedef struct {
/* For undefined recommended transfer size return -1 in that field */
__le32 OptimalTransferSize; /* bsize on some os, iosize on other os */
- __le32 BlockSize;
+ __le32 BlockSize;
/* The next three fields are in terms of the block size.
(above). If block size is unknown, 4096 would be a
- reasonable block size for a server to report.
+ reasonable block size for a server to report.
Note that returning the blocks/blocksavail removes need
to make a second call (to QFSInfo level 0x103 to get this info.
UserBlockAvail is typically less than or equal to BlocksAvail,
@@ -2062,9 +2079,9 @@ struct file_alt_name_info {
struct file_stream_info {
__le32 number_of_streams; /* BB check sizes and verify location */
- /* followed by info on streams themselves
+ /* followed by info on streams themselves
u64 size;
- u64 allocation_size
+ u64 allocation_size
stream info */
}; /* level 0x109 */
@@ -2083,7 +2100,7 @@ struct cifs_posix_ace { /* access control entry (ACE) */
__u8 cifs_e_tag;
__u8 cifs_e_perm;
__le64 cifs_uid; /* or gid */
-} __attribute__((packed));
+} __attribute__((packed));
struct cifs_posix_acl { /* access conrol list (ACL) */
__le16 version;
@@ -2138,6 +2155,12 @@ typedef struct {
/* struct following varies based on requested level */
} __attribute__((packed)) OPEN_PSX_RSP; /* level 0x209 SetPathInfo data */
+#define SMB_POSIX_UNLINK_FILE_TARGET 0
+#define SMB_POSIX_UNLINK_DIRECTORY_TARGET 1
+
+struct unlink_psx_rq { /* level 0x20a SetPathInfo */
+ __le16 type;
+} __attribute__((packed));
struct file_internal_info {
__u64 UniqueId; /* inode number */
@@ -2154,7 +2177,7 @@ struct file_attrib_tag {
/********************************************************/
-/* FindFirst/FindNext transact2 data buffer formats */
+/* FindFirst/FindNext transact2 data buffer formats */
/********************************************************/
typedef struct {
@@ -2232,7 +2255,7 @@ typedef struct {
__le64 EndOfFile;
__le64 AllocationSize;
__le32 ExtFileAttributes;
- __le32 FileNameLength;
+ __le32 FileNameLength;
__le32 EaSize; /* length of the xattrs */
__u8 ShortNameLength;
__u8 Reserved;
@@ -2259,7 +2282,7 @@ typedef struct {
struct win_dev {
unsigned char type[8]; /* IntxCHR or IntxBLK */
__le64 major;
- __le64 minor;
+ __le64 minor;
} __attribute__((packed));
struct gea {
@@ -2291,36 +2314,36 @@ struct fealist {
struct data_blob {
__u8 *data;
size_t length;
- void (*free) (struct data_blob * data_blob);
+ void (*free) (struct data_blob *data_blob);
} __attribute__((packed));
#ifdef CONFIG_CIFS_POSIX
-/*
+/*
For better POSIX semantics from Linux client, (even better
than the existing CIFS Unix Extensions) we need updated PDUs for:
-
+
1) PosixCreateX - to set and return the mode, inode#, device info and
perhaps add a CreateDevice - to create Pipes and other special .inodes
Also note POSIX open flags
- 2) Close - to return the last write time to do cache across close
+ 2) Close - to return the last write time to do cache across close
more safely
- 3) FindFirst return unique inode number - what about resume key, two
+ 3) FindFirst return unique inode number - what about resume key, two
forms short (matches readdir) and full (enough info to cache inodes)
4) Mkdir - set mode
-
- And under consideration:
+
+ And under consideration:
5) FindClose2 (return nanosecond timestamp ??)
- 6) Use nanosecond timestamps throughout all time fields if
+ 6) Use nanosecond timestamps throughout all time fields if
corresponding attribute flag is set
7) sendfile - handle based copy
8) Direct i/o
9) Misc fcntls?
-
+
what about fixing 64 bit alignment
-
+
There are also various legacy SMB/CIFS requests used as is
-
+
From existing Lanman and NTLM dialects:
--------------------------------------
NEGOTIATE
@@ -2341,48 +2364,48 @@ struct data_blob {
(BB verify that never need to set allocation size)
SMB_SET_FILE_BASIC_INFO2 (setting times - BB can it be done via
Unix ext?)
-
+
COPY (note support for copy across directories) - FUTURE, OPTIONAL
setting/getting OS/2 EAs - FUTURE (BB can this handle
setting Linux xattrs perfectly) - OPTIONAL
dnotify - FUTURE, OPTIONAL
quota - FUTURE, OPTIONAL
-
- Note that various requests implemented for NT interop such as
+
+ Note that various requests implemented for NT interop such as
NT_TRANSACT (IOCTL) QueryReparseInfo
are unneeded to servers compliant with the CIFS POSIX extensions
-
+
From CIFS Unix Extensions:
-------------------------
T2 SET_PATH_INFO (SMB_SET_FILE_UNIX_LINK) for symlinks
T2 SET_PATH_INFO (SMB_SET_FILE_BASIC_INFO2)
T2 QUERY_PATH_INFO (SMB_QUERY_FILE_UNIX_LINK)
- T2 QUERY_PATH_INFO (SMB_QUERY_FILE_UNIX_BASIC) - BB check for missing inode fields
- Actually need QUERY_FILE_UNIX_INFO since has inode num
- BB what about a) blksize/blkbits/blocks
+ T2 QUERY_PATH_INFO (SMB_QUERY_FILE_UNIX_BASIC) BB check for missing
+ inode fields
+ Actually a need QUERY_FILE_UNIX_INFO
+ since has inode num
+ BB what about a) blksize/blkbits/blocks
b) i_version
c) i_rdev
d) notify mask?
e) generation
f) size_seqcount
T2 FIND_FIRST/FIND_NEXT FIND_FILE_UNIX
- TRANS2_GET_DFS_REFERRAL - OPTIONAL but recommended
+ TRANS2_GET_DFS_REFERRAL - OPTIONAL but recommended
T2_QFS_INFO QueryDevice/AttributeInfo - OPTIONAL
-
-
*/
/* xsymlink is a symlink format (used by MacOS) that can be used
- to save symlink info in a regular file when
+ to save symlink info in a regular file when
mounted to operating systems that do not
support the cifs Unix extensions or EAs (for xattr
based symlinks). For such a file to be recognized
- as containing symlink data:
+ as containing symlink data:
- 1) file size must be 1067,
+ 1) file size must be 1067,
2) signature must begin file data,
3) length field must be set to ASCII representation
- of a number which is less than or equal to 1024,
+ of a number which is less than or equal to 1024,
4) md5 must match that of the path data */
struct xsymlink {
@@ -2393,10 +2416,10 @@ struct xsymlink {
char length[4];
char cr1; /* \n */
/* md5 of valid subset of path ie path[0] through path[length-1] */
- __u8 md5[32];
+ __u8 md5[32];
char cr2; /* \n */
/* if room left, then end with \n then 0x20s by convention but not required */
- char path[1024];
+ char path[1024];
} __attribute__((packed));
typedef struct file_xattr_info {
@@ -2405,7 +2428,8 @@ typedef struct file_xattr_info {
__u32 xattr_value_len;
char xattr_name[0];
/* followed by xattr_value[xattr_value_len], no pad */
-} __attribute__((packed)) FILE_XATTR_INFO; /* extended attribute, info level 0x205 */
+} __attribute__((packed)) FILE_XATTR_INFO; /* extended attribute info
+ level 0x205 */
/* flags for chattr command */
@@ -2431,8 +2455,9 @@ typedef struct file_xattr_info {
typedef struct file_chattr_info {
__le64 mask; /* list of all possible attribute bits */
__le64 mode; /* list of actual attribute bits on this inode */
-} __attribute__((packed)) FILE_CHATTR_INFO; /* ext attributes (chattr, chflags) level 0x206 */
+} __attribute__((packed)) FILE_CHATTR_INFO; /* ext attributes
+ (chattr, chflags) level 0x206 */
-#endif
+#endif
#endif /* _CIFSPDU_H */
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 5d163e2..04a69da 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -16,7 +16,7 @@
*
* You should have received a copy of the GNU Lesser General Public License
* along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _CIFSPROTO_H
#define _CIFSPROTO_H
@@ -49,9 +49,9 @@ extern int SendReceive(const unsigned int /* xid */ , struct cifsSesInfo *,
struct smb_hdr * /* out */ ,
int * /* bytes returned */ , const int long_op);
extern int SendReceive2(const unsigned int /* xid */ , struct cifsSesInfo *,
- struct kvec *, int /* nvec to send */,
+ struct kvec *, int /* nvec to send */,
int * /* type of buf returned */ , const int long_op);
-extern int SendReceiveBlockingLock(const unsigned int /* xid */ ,
+extern int SendReceiveBlockingLock(const unsigned int /* xid */ ,
struct cifsTconInfo *,
struct smb_hdr * /* input */ ,
struct smb_hdr * /* out */ ,
@@ -64,19 +64,19 @@ extern unsigned int smbCalcSize(struct smb_hdr *ptr);
extern unsigned int smbCalcSize_LE(struct smb_hdr *ptr);
extern int decode_negTokenInit(unsigned char *security_blob, int length,
enum securityEnum *secType);
-extern int cifs_inet_pton(int, char * source, void *dst);
+extern int cifs_inet_pton(int, char *source, void *dst);
extern int map_smb_to_linux_error(struct smb_hdr *smb);
extern void header_assemble(struct smb_hdr *, char /* command */ ,
const struct cifsTconInfo *, int /* length of
fixed section (word count) in two byte units */);
extern int small_smb_init_no_tc(const int smb_cmd, const int wct,
struct cifsSesInfo *ses,
- void ** request_buf);
+ void **request_buf);
extern int CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses,
- const int stage,
+ const int stage,
const struct nls_table *nls_cp);
extern __u16 GetNextMid(struct TCP_Server_Info *server);
-extern struct oplock_q_entry * AllocOplockQEntry(struct inode *, u16,
+extern struct oplock_q_entry *AllocOplockQEntry(struct inode *, u16,
struct cifsTconInfo *);
extern void DeleteOplockQEntry(struct oplock_q_entry *);
extern struct timespec cifs_NTtimeToUnix(u64 /* utc nanoseconds since 1601 */ );
@@ -85,12 +85,12 @@ extern __le64 cnvrtDosCifsTm(__u16 date, __u16 time);
extern struct timespec cnvrtDosUnixTm(__u16 date, __u16 time);
extern int cifs_get_inode_info(struct inode **pinode,
- const unsigned char *search_path,
+ const unsigned char *search_path,
FILE_ALL_INFO * pfile_info,
struct super_block *sb, int xid);
extern int cifs_get_inode_info_unix(struct inode **pinode,
const unsigned char *search_path,
- struct super_block *sb,int xid);
+ struct super_block *sb, int xid);
extern int cifs_mount(struct super_block *, struct cifs_sb_info *, char *,
const char *);
@@ -98,8 +98,8 @@ extern int cifs_umount(struct super_block *, struct cifs_sb_info *);
void cifs_proc_init(void);
void cifs_proc_clean(void);
-extern int cifs_setup_session(unsigned int xid, struct cifsSesInfo *pSesInfo,
- struct nls_table * nls_info);
+extern int cifs_setup_session(unsigned int xid, struct cifsSesInfo *pSesInfo,
+ struct nls_table *nls_info);
extern int CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses);
extern int CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
@@ -108,11 +108,11 @@ extern int CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
extern int CIFSFindFirst(const int xid, struct cifsTconInfo *tcon,
const char *searchName, const struct nls_table *nls_codepage,
- __u16 *searchHandle, struct cifs_search_info * psrch_inf,
+ __u16 *searchHandle, struct cifs_search_info *psrch_inf,
int map, const char dirsep);
extern int CIFSFindNext(const int xid, struct cifsTconInfo *tcon,
- __u16 searchHandle, struct cifs_search_info * psrch_inf);
+ __u16 searchHandle, struct cifs_search_info *psrch_inf);
extern int CIFSFindClose(const int, struct cifsTconInfo *tcon,
const __u16 search_handle);
@@ -123,9 +123,9 @@ extern int CIFSSMBQPathInfo(const int xid, struct cifsTconInfo *tcon,
int legacy /* whether to use old info level */,
const struct nls_table *nls_codepage, int remap);
extern int SMBQueryInformation(const int xid, struct cifsTconInfo *tcon,
- const unsigned char *searchName,
- FILE_ALL_INFO * findData,
- const struct nls_table *nls_codepage, int remap);
+ const unsigned char *searchName,
+ FILE_ALL_INFO *findData,
+ const struct nls_table *nls_codepage, int remap);
extern int CIFSSMBUnixQPathInfo(const int xid,
struct cifsTconInfo *tcon,
@@ -143,13 +143,13 @@ extern int connect_to_dfs_path(int xid, struct cifsSesInfo *pSesInfo,
const char *old_path,
const struct nls_table *nls_codepage, int remap);
extern int get_dfs_path(int xid, struct cifsSesInfo *pSesInfo,
- const char *old_path,
+ const char *old_path,
const struct nls_table *nls_codepage,
- unsigned int *pnum_referrals,
- unsigned char ** preferrals,
+ unsigned int *pnum_referrals,
+ unsigned char **preferrals,
int remap);
extern void reset_cifs_unix_caps(int xid, struct cifsTconInfo *tcon,
- struct super_block * sb, struct smb_vol * vol);
+ struct super_block *sb, struct smb_vol *vol);
extern int CIFSSMBQFSInfo(const int xid, struct cifsTconInfo *tcon,
struct kstatfs *FSData);
extern int SMBOldQFSInfo(const int xid, struct cifsTconInfo *tcon,
@@ -181,11 +181,11 @@ extern int CIFSSMBSetEOF(const int xid, struct cifsTconInfo *tcon,
const struct nls_table *nls_codepage,
int remap_special_chars);
extern int CIFSSMBSetFileSize(const int xid, struct cifsTconInfo *tcon,
- __u64 size, __u16 fileHandle,__u32 opener_pid,
+ __u64 size, __u16 fileHandle, __u32 opener_pid,
int AllocSizeFlag);
extern int CIFSSMBUnixSetPerms(const int xid, struct cifsTconInfo *pTcon,
char *full_path, __u64 mode, __u64 uid,
- __u64 gid, dev_t dev,
+ __u64 gid, dev_t dev,
const struct nls_table *nls_codepage,
int remap_special_chars);
@@ -196,7 +196,10 @@ extern int CIFSSMBMkDir(const int xid, struct cifsTconInfo *tcon,
extern int CIFSSMBRmDir(const int xid, struct cifsTconInfo *tcon,
const char *name, const struct nls_table *nls_codepage,
int remap_special_chars);
-
+extern int CIFSPOSIXDelFile(const int xid, struct cifsTconInfo *tcon,
+ const char *name, __u16 type,
+ const struct nls_table *nls_codepage,
+ int remap_special_chars);
extern int CIFSSMBDelFile(const int xid, struct cifsTconInfo *tcon,
const char *name,
const struct nls_table *nls_codepage,
@@ -205,8 +208,8 @@ extern int CIFSSMBRename(const int xid, struct cifsTconInfo *tcon,
const char *fromName, const char *toName,
const struct nls_table *nls_codepage,
int remap_special_chars);
-extern int CIFSSMBRenameOpenFile(const int xid,struct cifsTconInfo *pTcon,
- int netfid, char * target_name,
+extern int CIFSSMBRenameOpenFile(const int xid, struct cifsTconInfo *pTcon,
+ int netfid, char *target_name,
const struct nls_table *nls_codepage,
int remap_special_chars);
extern int CIFSCreateHardLink(const int xid,
@@ -217,7 +220,7 @@ extern int CIFSCreateHardLink(const int xid,
extern int CIFSUnixCreateHardLink(const int xid,
struct cifsTconInfo *tcon,
const char *fromName, const char *toName,
- const struct nls_table *nls_codepage,
+ const struct nls_table *nls_codepage,
int remap_special_chars);
extern int CIFSUnixCreateSymLink(const int xid,
struct cifsTconInfo *tcon,
@@ -228,7 +231,7 @@ extern int CIFSSMBUnixQuerySymLink(const int xid,
const unsigned char *searchName,
char *syminfo, const int buflen,
const struct nls_table *nls_codepage);
-extern int CIFSSMBQueryReparseLinkInfo(const int xid,
+extern int CIFSSMBQueryReparseLinkInfo(const int xid,
struct cifsTconInfo *tcon,
const unsigned char *searchName,
char *symlinkinfo, const int buflen, __u16 fid,
@@ -244,35 +247,35 @@ extern int SMBLegacyOpen(const int xid, struct cifsTconInfo *tcon,
const int access_flags, const int omode,
__u16 * netfid, int *pOplock, FILE_ALL_INFO *,
const struct nls_table *nls_codepage, int remap);
-extern int CIFSPOSIXCreate(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSPOSIXCreate(const int xid, struct cifsTconInfo *tcon,
u32 posix_flags, __u64 mode, __u16 * netfid,
FILE_UNIX_BASIC_INFO *pRetData,
__u32 *pOplock, const char *name,
- const struct nls_table *nls_codepage, int remap);
+ const struct nls_table *nls_codepage, int remap);
extern int CIFSSMBClose(const int xid, struct cifsTconInfo *tcon,
const int smb_file_id);
extern int CIFSSMBRead(const int xid, struct cifsTconInfo *tcon,
- const int netfid, unsigned int count,
- const __u64 lseek, unsigned int *nbytes, char **buf,
- int * return_buf_type);
+ const int netfid, unsigned int count,
+ const __u64 lseek, unsigned int *nbytes, char **buf,
+ int *return_buf_type);
extern int CIFSSMBWrite(const int xid, struct cifsTconInfo *tcon,
const int netfid, const unsigned int count,
const __u64 lseek, unsigned int *nbytes,
- const char *buf, const char __user *ubuf,
+ const char *buf, const char __user *ubuf,
const int long_op);
extern int CIFSSMBWrite2(const int xid, struct cifsTconInfo *tcon,
const int netfid, const unsigned int count,
- const __u64 offset, unsigned int *nbytes,
+ const __u64 offset, unsigned int *nbytes,
struct kvec *iov, const int nvec, const int long_op);
extern int CIFSGetSrvInodeNumber(const int xid, struct cifsTconInfo *tcon,
const unsigned char *searchName, __u64 * inode_number,
- const struct nls_table *nls_codepage,
+ const struct nls_table *nls_codepage,
int remap_special_chars);
extern int cifs_convertUCSpath(char *target, const __le16 *source, int maxlen,
- const struct nls_table * codepage);
-extern int cifsConvertToUCS(__le16 * target, const char *source, int maxlen,
- const struct nls_table * cp, int mapChars);
+ const struct nls_table *codepage);
+extern int cifsConvertToUCS(__le16 *target, const char *source, int maxlen,
+ const struct nls_table *cp, int mapChars);
extern int CIFSSMBLock(const int xid, struct cifsTconInfo *tcon,
const __u16 netfid, const __u64 len,
@@ -281,7 +284,7 @@ extern int CIFSSMBLock(const int xid, struct cifsTconInfo *tcon,
const int waitFlag);
extern int CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon,
const __u16 smb_file_id, const int get_flag,
- const __u64 len, struct file_lock *,
+ const __u64 len, struct file_lock *,
const __u16 lock_type, const int waitFlag);
extern int CIFSSMBTDis(const int xid, struct cifsTconInfo *tcon);
extern int CIFSSMBLogoff(const int xid, struct cifsSesInfo *ses);
@@ -291,54 +294,56 @@ extern void sesInfoFree(struct cifsSesInfo *);
extern struct cifsTconInfo *tconInfoAlloc(void);
extern void tconInfoFree(struct cifsTconInfo *);
-extern int cifs_sign_smb(struct smb_hdr *, struct TCP_Server_Info *,__u32 *);
+extern int cifs_sign_smb(struct smb_hdr *, struct TCP_Server_Info *, __u32 *);
extern int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *,
__u32 *);
-extern int cifs_verify_signature(struct smb_hdr *, const char * mac_key,
- __u32 expected_sequence_number);
-extern int cifs_calculate_mac_key(char * key,const char * rn,const char * pass);
-extern int CalcNTLMv2_partial_mac_key(struct cifsSesInfo *,
+extern int cifs_verify_signature(struct smb_hdr *,
+ const struct mac_key *mac_key,
+ __u32 expected_sequence_number);
+extern int cifs_calculate_mac_key(struct mac_key *key, const char *rn,
+ const char *pass);
+extern int CalcNTLMv2_partial_mac_key(struct cifsSesInfo *,
const struct nls_table *);
extern void CalcNTLMv2_response(const struct cifsSesInfo *, char * );
-extern void setup_ntlmv2_rsp(struct cifsSesInfo *, char *,
+extern void setup_ntlmv2_rsp(struct cifsSesInfo *, char *,
const struct nls_table *);
#ifdef CONFIG_CIFS_WEAK_PW_HASH
-extern void calc_lanman_hash(struct cifsSesInfo * ses, char * lnm_session_key);
+extern void calc_lanman_hash(struct cifsSesInfo *ses, char *lnm_session_key);
#endif /* CIFS_WEAK_PW_HASH */
extern int CIFSSMBCopy(int xid,
struct cifsTconInfo *source_tcon,
const char *fromName,
const __u16 target_tid,
const char *toName, const int flags,
- const struct nls_table *nls_codepage,
+ const struct nls_table *nls_codepage,
int remap_special_chars);
-extern int CIFSSMBNotify(const int xid, struct cifsTconInfo *tcon,
- const int notify_subdirs,const __u16 netfid,
- __u32 filter, struct file * file, int multishot,
+extern int CIFSSMBNotify(const int xid, struct cifsTconInfo *tcon,
+ const int notify_subdirs, const __u16 netfid,
+ __u32 filter, struct file *file, int multishot,
const struct nls_table *nls_codepage);
extern ssize_t CIFSSMBQAllEAs(const int xid, struct cifsTconInfo *tcon,
- const unsigned char *searchName, char * EAData,
+ const unsigned char *searchName, char *EAData,
size_t bufsize, const struct nls_table *nls_codepage,
int remap_special_chars);
-extern ssize_t CIFSSMBQueryEA(const int xid,struct cifsTconInfo * tcon,
- const unsigned char * searchName,const unsigned char * ea_name,
- unsigned char * ea_value, size_t buf_size,
+extern ssize_t CIFSSMBQueryEA(const int xid, struct cifsTconInfo *tcon,
+ const unsigned char *searchName, const unsigned char *ea_name,
+ unsigned char *ea_value, size_t buf_size,
const struct nls_table *nls_codepage, int remap_special_chars);
-extern int CIFSSMBSetEA(const int xid, struct cifsTconInfo *tcon,
- const char *fileName, const char * ea_name,
- const void * ea_value, const __u16 ea_value_len,
+extern int CIFSSMBSetEA(const int xid, struct cifsTconInfo *tcon,
+ const char *fileName, const char *ea_name,
+ const void *ea_value, const __u16 ea_value_len,
const struct nls_table *nls_codepage, int remap_special_chars);
extern int CIFSSMBGetCIFSACL(const int xid, struct cifsTconInfo *tcon,
__u16 fid, char *acl_inf, const int buflen,
const int acl_type /* ACCESS vs. DEFAULT */);
extern int CIFSSMBGetPosixACL(const int xid, struct cifsTconInfo *tcon,
const unsigned char *searchName,
- char *acl_inf, const int buflen,const int acl_type,
+ char *acl_inf, const int buflen, const int acl_type,
const struct nls_table *nls_codepage, int remap_special_chars);
extern int CIFSSMBSetPosixACL(const int xid, struct cifsTconInfo *tcon,
const unsigned char *fileName,
const char *local_acl, const int buflen, const int acl_type,
const struct nls_table *nls_codepage, int remap_special_chars);
extern int CIFSGetExtAttr(const int xid, struct cifsTconInfo *tcon,
- const int netfid, __u64 * pExtAttrBits, __u64 *pMask);
+ const int netfid, __u64 * pExtAttrBits, __u64 *pMask);
#endif /* _CIFSPROTO_H */
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 57419a1..8eb102f 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -48,7 +48,7 @@ static struct {
{LANMAN_PROT, "\2LM1.2X002"},
{LANMAN2_PROT, "\2LANMAN2.1"},
#endif /* weak password hashing for legacy clients */
- {CIFS_PROT, "\2NT LM 0.12"},
+ {CIFS_PROT, "\2NT LM 0.12"},
{POSIX_PROT, "\2POSIX 2"},
{BAD_PROT, "\2"}
};
@@ -61,7 +61,7 @@ static struct {
{LANMAN_PROT, "\2LM1.2X002"},
{LANMAN2_PROT, "\2LANMAN2.1"},
#endif /* weak password hashing for legacy clients */
- {CIFS_PROT, "\2NT LM 0.12"},
+ {CIFS_PROT, "\2NT LM 0.12"},
{BAD_PROT, "\2"}
};
#endif
@@ -84,17 +84,17 @@ static struct {
/* Mark as invalid, all open files on tree connections since they
were closed when session to server was lost */
-static void mark_open_files_invalid(struct cifsTconInfo * pTcon)
+static void mark_open_files_invalid(struct cifsTconInfo *pTcon)
{
struct cifsFileInfo *open_file = NULL;
- struct list_head * tmp;
- struct list_head * tmp1;
+ struct list_head *tmp;
+ struct list_head *tmp1;
/* list all files open on tree connection and mark them invalid */
write_lock(&GlobalSMBSeslock);
list_for_each_safe(tmp, tmp1, &pTcon->openFileList) {
- open_file = list_entry(tmp,struct cifsFileInfo, tlist);
- if(open_file) {
+ open_file = list_entry(tmp, struct cifsFileInfo, tlist);
+ if (open_file) {
open_file->invalidHandle = TRUE;
}
}
@@ -113,75 +113,78 @@ small_smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,
/* SMBs NegProt, SessSetup, uLogoff do not have tcon yet so
check for tcp and smb session status done differently
for those three - in the calling routine */
- if(tcon) {
- if(tcon->tidStatus == CifsExiting) {
+ if (tcon) {
+ if (tcon->tidStatus == CifsExiting) {
/* only tree disconnect, open, and write,
(and ulogoff which does not have tcon)
are allowed as we start force umount */
- if((smb_command != SMB_COM_WRITE_ANDX) &&
- (smb_command != SMB_COM_OPEN_ANDX) &&
+ if ((smb_command != SMB_COM_WRITE_ANDX) &&
+ (smb_command != SMB_COM_OPEN_ANDX) &&
(smb_command != SMB_COM_TREE_DISCONNECT)) {
- cFYI(1,("can not send cmd %d while umounting",
+ cFYI(1, ("can not send cmd %d while umounting",
smb_command));
return -ENODEV;
}
}
- if((tcon->ses) && (tcon->ses->status != CifsExiting) &&
- (tcon->ses->server)){
+ if ((tcon->ses) && (tcon->ses->status != CifsExiting) &&
+ (tcon->ses->server)) {
struct nls_table *nls_codepage;
- /* Give Demultiplex thread up to 10 seconds to
+ /* Give Demultiplex thread up to 10 seconds to
reconnect, should be greater than cifs socket
timeout which is 7 seconds */
- while(tcon->ses->server->tcpStatus == CifsNeedReconnect) {
+ while (tcon->ses->server->tcpStatus ==
+ CifsNeedReconnect) {
wait_event_interruptible_timeout(tcon->ses->server->response_q,
- (tcon->ses->server->tcpStatus == CifsGood), 10 * HZ);
- if(tcon->ses->server->tcpStatus == CifsNeedReconnect) {
+ (tcon->ses->server->tcpStatus ==
+ CifsGood), 10 * HZ);
+ if (tcon->ses->server->tcpStatus ==
+ CifsNeedReconnect) {
/* on "soft" mounts we wait once */
- if((tcon->retry == FALSE) ||
+ if ((tcon->retry == FALSE) ||
(tcon->ses->status == CifsExiting)) {
- cFYI(1,("gave up waiting on reconnect in smb_init"));
+ cFYI(1, ("gave up waiting on "
+ "reconnect in smb_init"));
return -EHOSTDOWN;
} /* else "hard" mount - keep retrying
until process is killed or server
comes back on-line */
} else /* TCP session is reestablished now */
break;
-
}
-
+
nls_codepage = load_nls_default();
/* need to prevent multiple threads trying to
simultaneously reconnect the same SMB session */
down(&tcon->ses->sesSem);
- if(tcon->ses->status == CifsNeedReconnect)
- rc = cifs_setup_session(0, tcon->ses,
+ if (tcon->ses->status == CifsNeedReconnect)
+ rc = cifs_setup_session(0, tcon->ses,
nls_codepage);
- if(!rc && (tcon->tidStatus == CifsNeedReconnect)) {
+ if (!rc && (tcon->tidStatus == CifsNeedReconnect)) {
mark_open_files_invalid(tcon);
- rc = CIFSTCon(0, tcon->ses, tcon->treeName,
+ rc = CIFSTCon(0, tcon->ses, tcon->treeName,
tcon, nls_codepage);
up(&tcon->ses->sesSem);
/* tell server which Unix caps we support */
if (tcon->ses->capabilities & CAP_UNIX)
reset_cifs_unix_caps(0 /* no xid */,
- tcon,
+ tcon,
NULL /* we do not know sb */,
- NULL /* no vol info */);
+ NULL /* no vol info */);
/* BB FIXME add code to check if wsize needs
update due to negotiated smb buffer size
shrinking */
- if(rc == 0)
+ if (rc == 0)
atomic_inc(&tconInfoReconnectCount);
cFYI(1, ("reconnect tcon rc = %d", rc));
- /* Removed call to reopen open files here -
- it is safer (and faster) to reopen files
+ /* Removed call to reopen open files here.
+ It is safer (and faster) to reopen files
one at a time as needed in read and write */
- /* Check if handle based operation so we
+ /* Check if handle based operation so we
know whether we can continue or not without
returning to caller to reset file handle */
- switch(smb_command) {
+ switch (smb_command) {
case SMB_COM_READ_ANDX:
case SMB_COM_WRITE_ANDX:
case SMB_COM_CLOSE:
@@ -200,7 +203,7 @@ small_smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,
return -EIO;
}
}
- if(rc)
+ if (rc)
return rc;
*request_buf = cifs_small_buf_get();
@@ -209,23 +212,24 @@ small_smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,
return -ENOMEM;
}
- header_assemble((struct smb_hdr *) *request_buf, smb_command, tcon,wct);
+ header_assemble((struct smb_hdr *) *request_buf, smb_command,
+ tcon, wct);
- if(tcon != NULL)
- cifs_stats_inc(&tcon->num_smbs_sent);
+ if (tcon != NULL)
+ cifs_stats_inc(&tcon->num_smbs_sent);
return rc;
}
int
-small_smb_init_no_tc(const int smb_command, const int wct,
+small_smb_init_no_tc(const int smb_command, const int wct,
struct cifsSesInfo *ses, void **request_buf)
{
int rc;
- struct smb_hdr * buffer;
+ struct smb_hdr *buffer;
rc = small_smb_init(smb_command, wct, NULL, request_buf);
- if(rc)
+ if (rc)
return rc;
buffer = (struct smb_hdr *)*request_buf;
@@ -237,7 +241,7 @@ small_smb_init_no_tc(const int smb_command, const int wct,
/* uid, tid can stay at zero as set in header assemble */
- /* BB add support for turning on the signing when
+ /* BB add support for turning on the signing when
this function is used after 1st of session setup requests */
return rc;
@@ -254,52 +258,53 @@ smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,
/* SMBs NegProt, SessSetup, uLogoff do not have tcon yet so
check for tcp and smb session status done differently
for those three - in the calling routine */
- if(tcon) {
- if(tcon->tidStatus == CifsExiting) {
+ if (tcon) {
+ if (tcon->tidStatus == CifsExiting) {
/* only tree disconnect, open, and write,
(and ulogoff which does not have tcon)
are allowed as we start force umount */
- if((smb_command != SMB_COM_WRITE_ANDX) &&
+ if ((smb_command != SMB_COM_WRITE_ANDX) &&
(smb_command != SMB_COM_OPEN_ANDX) &&
(smb_command != SMB_COM_TREE_DISCONNECT)) {
- cFYI(1,("can not send cmd %d while umounting",
+ cFYI(1, ("can not send cmd %d while umounting",
smb_command));
return -ENODEV;
}
}
- if((tcon->ses) && (tcon->ses->status != CifsExiting) &&
- (tcon->ses->server)){
+ if ((tcon->ses) && (tcon->ses->status != CifsExiting) &&
+ (tcon->ses->server)) {
struct nls_table *nls_codepage;
/* Give Demultiplex thread up to 10 seconds to
reconnect, should be greater than cifs socket
timeout which is 7 seconds */
- while(tcon->ses->server->tcpStatus == CifsNeedReconnect) {
+ while (tcon->ses->server->tcpStatus ==
+ CifsNeedReconnect) {
wait_event_interruptible_timeout(tcon->ses->server->response_q,
- (tcon->ses->server->tcpStatus == CifsGood), 10 * HZ);
- if(tcon->ses->server->tcpStatus ==
+ (tcon->ses->server->tcpStatus ==
+ CifsGood), 10 * HZ);
+ if (tcon->ses->server->tcpStatus ==
CifsNeedReconnect) {
/* on "soft" mounts we wait once */
- if((tcon->retry == FALSE) ||
+ if ((tcon->retry == FALSE) ||
(tcon->ses->status == CifsExiting)) {
- cFYI(1,("gave up waiting on reconnect in smb_init"));
+ cFYI(1, ("gave up waiting on "
+ "reconnect in smb_init"));
return -EHOSTDOWN;
} /* else "hard" mount - keep retrying
until process is killed or server
comes on-line */
} else /* TCP session is reestablished now */
break;
-
}
-
nls_codepage = load_nls_default();
/* need to prevent multiple threads trying to
simultaneously reconnect the same SMB session */
down(&tcon->ses->sesSem);
- if(tcon->ses->status == CifsNeedReconnect)
- rc = cifs_setup_session(0, tcon->ses,
+ if (tcon->ses->status == CifsNeedReconnect)
+ rc = cifs_setup_session(0, tcon->ses,
nls_codepage);
- if(!rc && (tcon->tidStatus == CifsNeedReconnect)) {
+ if (!rc && (tcon->tidStatus == CifsNeedReconnect)) {
mark_open_files_invalid(tcon);
rc = CIFSTCon(0, tcon->ses, tcon->treeName,
tcon, nls_codepage);
@@ -307,24 +312,24 @@ smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,
/* tell server which Unix caps we support */
if (tcon->ses->capabilities & CAP_UNIX)
reset_cifs_unix_caps(0 /* no xid */,
- tcon,
+ tcon,
NULL /* do not know sb */,
NULL /* no vol info */);
/* BB FIXME add code to check if wsize needs
update due to negotiated smb buffer size
shrinking */
- if(rc == 0)
+ if (rc == 0)
atomic_inc(&tconInfoReconnectCount);
cFYI(1, ("reconnect tcon rc = %d", rc));
- /* Removed call to reopen open files here -
- it is safer (and faster) to reopen files
+ /* Removed call to reopen open files here.
+ It is safer (and faster) to reopen files
one at a time as needed in read and write */
- /* Check if handle based operation so we
+ /* Check if handle based operation so we
know whether we can continue or not without
returning to caller to reset file handle */
- switch(smb_command) {
+ switch (smb_command) {
case SMB_COM_READ_ANDX:
case SMB_COM_WRITE_ANDX:
case SMB_COM_CLOSE:
@@ -343,7 +348,7 @@ smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,
return -EIO;
}
}
- if(rc)
+ if (rc)
return rc;
*request_buf = cifs_buf_get();
@@ -355,48 +360,48 @@ smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,
/* potential retries of smb operations it turns out we can determine */
/* from the mid flags when the request buffer can be resent without */
/* having to use a second distinct buffer for the response */
- if(response_buf)
- *response_buf = *request_buf;
+ if (response_buf)
+ *response_buf = *request_buf;
header_assemble((struct smb_hdr *) *request_buf, smb_command, tcon,
wct /*wct */ );
- if(tcon != NULL)
- cifs_stats_inc(&tcon->num_smbs_sent);
+ if (tcon != NULL)
+ cifs_stats_inc(&tcon->num_smbs_sent);
return rc;
}
-static int validate_t2(struct smb_t2_rsp * pSMB)
+static int validate_t2(struct smb_t2_rsp *pSMB)
{
int rc = -EINVAL;
int total_size;
- char * pBCC;
+ char *pBCC;
/* check for plausible wct, bcc and t2 data and parm sizes */
/* check for parm and data offset going beyond end of smb */
- if(pSMB->hdr.WordCount >= 10) {
- if((le16_to_cpu(pSMB->t2_rsp.ParameterOffset) <= 1024) &&
+ if (pSMB->hdr.WordCount >= 10) {
+ if ((le16_to_cpu(pSMB->t2_rsp.ParameterOffset) <= 1024) &&
(le16_to_cpu(pSMB->t2_rsp.DataOffset) <= 1024)) {
/* check that bcc is at least as big as parms + data */
/* check that bcc is less than negotiated smb buffer */
total_size = le16_to_cpu(pSMB->t2_rsp.ParameterCount);
- if(total_size < 512) {
- total_size+=le16_to_cpu(pSMB->t2_rsp.DataCount);
+ if (total_size < 512) {
+ total_size +=
+ le16_to_cpu(pSMB->t2_rsp.DataCount);
/* BCC le converted in SendReceive */
- pBCC = (pSMB->hdr.WordCount * 2) +
+ pBCC = (pSMB->hdr.WordCount * 2) +
sizeof(struct smb_hdr) +
(char *)pSMB;
- if((total_size <= (*(u16 *)pBCC)) &&
- (total_size <
+ if ((total_size <= (*(u16 *)pBCC)) &&
+ (total_size <
CIFSMaxBufSize+MAX_CIFS_HDR_SIZE)) {
return 0;
}
-
}
}
}
- cifs_dump_mem("Invalid transact2 SMB: ",(char *)pSMB,
+ cifs_dump_mem("Invalid transact2 SMB: ", (char *)pSMB,
sizeof(struct smb_t2_rsp) + 16);
return rc;
}
@@ -408,12 +413,12 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
int rc = 0;
int bytes_returned;
int i;
- struct TCP_Server_Info * server;
+ struct TCP_Server_Info *server;
u16 count;
unsigned int secFlags;
u16 dialect;
- if(ses->server)
+ if (ses->server)
server = ses->server;
else {
rc = -EIO;
@@ -425,20 +430,20 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
return rc;
/* if any of auth flags (ie not sign or seal) are overriden use them */
- if(ses->overrideSecFlg & (~(CIFSSEC_MUST_SIGN | CIFSSEC_MUST_SEAL)))
- secFlags = ses->overrideSecFlg;
+ if (ses->overrideSecFlg & (~(CIFSSEC_MUST_SIGN | CIFSSEC_MUST_SEAL)))
+ secFlags = ses->overrideSecFlg; /* BB FIXME fix sign flags? */
else /* if override flags set only sign/seal OR them with global auth */
secFlags = extended_security | ses->overrideSecFlg;
- cFYI(1,("secFlags 0x%x",secFlags));
+ cFYI(1, ("secFlags 0x%x", secFlags));
pSMB->hdr.Mid = GetNextMid(server);
pSMB->hdr.Flags2 |= (SMBFLG2_UNICODE | SMBFLG2_ERR_STATUS);
if ((secFlags & CIFSSEC_MUST_KRB5) == CIFSSEC_MUST_KRB5)
pSMB->hdr.Flags2 |= SMBFLG2_EXT_SEC;
-
+
count = 0;
- for(i=0;i<CIFS_NUM_PROT;i++) {
+ for (i = 0; i < CIFS_NUM_PROT; i++) {
strncpy(pSMB->DialectsArray+count, protocols[i].name, 16);
count += strlen(protocols[i].name) + 1;
/* null at end of source and target buffers anyway */
@@ -448,26 +453,26 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
rc = SendReceive(xid, ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
- if (rc != 0)
+ if (rc != 0)
goto neg_err_exit;
dialect = le16_to_cpu(pSMBr->DialectIndex);
- cFYI(1,("Dialect: %d", dialect));
+ cFYI(1, ("Dialect: %d", dialect));
/* Check wct = 1 error case */
- if((pSMBr->hdr.WordCount < 13) || (dialect == BAD_PROT)) {
+ if ((pSMBr->hdr.WordCount < 13) || (dialect == BAD_PROT)) {
/* core returns wct = 1, but we do not ask for core - otherwise
- small wct just comes when dialect index is -1 indicating we
+ small wct just comes when dialect index is -1 indicating we
could not negotiate a common dialect */
rc = -EOPNOTSUPP;
goto neg_err_exit;
-#ifdef CONFIG_CIFS_WEAK_PW_HASH
- } else if((pSMBr->hdr.WordCount == 13)
+#ifdef CONFIG_CIFS_WEAK_PW_HASH
+ } else if ((pSMBr->hdr.WordCount == 13)
&& ((dialect == LANMAN_PROT)
|| (dialect == LANMAN2_PROT))) {
__s16 tmp;
- struct lanman_neg_rsp * rsp = (struct lanman_neg_rsp *)pSMBr;
+ struct lanman_neg_rsp *rsp = (struct lanman_neg_rsp *)pSMBr;
- if((secFlags & CIFSSEC_MAY_LANMAN) ||
+ if ((secFlags & CIFSSEC_MAY_LANMAN) ||
(secFlags & CIFSSEC_MAY_PLNTXT))
server->secType = LANMAN;
else {
@@ -475,7 +480,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
" in /proc/fs/cifs/SecurityFlags"));
rc = -EOPNOTSUPP;
goto neg_err_exit;
- }
+ }
server->secMode = (__u8)le16_to_cpu(rsp->SecurityMode);
server->maxReq = le16_to_cpu(rsp->MaxMpxCount);
server->maxBuf = min((__u32)le16_to_cpu(rsp->MaxBufSize),
@@ -483,7 +488,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
GETU32(server->sessid) = le32_to_cpu(rsp->SessionKey);
/* even though we do not use raw we might as well set this
accurately, in case we ever find a need for it */
- if((le16_to_cpu(rsp->RawMode) & RAW_ENABLE) == RAW_ENABLE) {
+ if ((le16_to_cpu(rsp->RawMode) & RAW_ENABLE) == RAW_ENABLE) {
server->maxRw = 0xFF00;
server->capabilities = CAP_MPX_MODE | CAP_RAW_MODE;
} else {
@@ -504,29 +509,29 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
utc = CURRENT_TIME;
ts = cnvrtDosUnixTm(le16_to_cpu(rsp->SrvTime.Date),
le16_to_cpu(rsp->SrvTime.Time));
- cFYI(1,("SrvTime: %d sec since 1970 (utc: %d) diff: %d",
- (int)ts.tv_sec, (int)utc.tv_sec,
+ cFYI(1, ("SrvTime %d sec since 1970 (utc: %d) diff: %d",
+ (int)ts.tv_sec, (int)utc.tv_sec,
(int)(utc.tv_sec - ts.tv_sec)));
val = (int)(utc.tv_sec - ts.tv_sec);
seconds = val < 0 ? -val : val;
result = (seconds / MIN_TZ_ADJ) * MIN_TZ_ADJ;
remain = seconds % MIN_TZ_ADJ;
- if(remain >= (MIN_TZ_ADJ / 2))
+ if (remain >= (MIN_TZ_ADJ / 2))
result += MIN_TZ_ADJ;
- if(val < 0)
+ if (val < 0)
result = - result;
server->timeAdj = result;
} else {
server->timeAdj = (int)tmp;
server->timeAdj *= 60; /* also in seconds */
}
- cFYI(1,("server->timeAdj: %d seconds", server->timeAdj));
+ cFYI(1, ("server->timeAdj: %d seconds", server->timeAdj));
/* BB get server time for time conversions and add
- code to use it and timezone since this is not UTC */
+ code to use it and timezone since this is not UTC */
- if (rsp->EncryptionKeyLength ==
+ if (rsp->EncryptionKeyLength ==
cpu_to_le16(CIFS_CRYPTO_KEY_SIZE)) {
memcpy(server->cryptKey, rsp->EncryptionKey,
CIFS_CRYPTO_KEY_SIZE);
@@ -535,39 +540,39 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
goto neg_err_exit;
}
- cFYI(1,("LANMAN negotiated"));
+ cFYI(1, ("LANMAN negotiated"));
/* we will not end up setting signing flags - as no signing
was in LANMAN and server did not return the flags on */
goto signing_check;
#else /* weak security disabled */
- } else if(pSMBr->hdr.WordCount == 13) {
- cERROR(1,("mount failed, cifs module not built "
+ } else if (pSMBr->hdr.WordCount == 13) {
+ cERROR(1, ("mount failed, cifs module not built "
"with CIFS_WEAK_PW_HASH support"));
rc = -EOPNOTSUPP;
#endif /* WEAK_PW_HASH */
goto neg_err_exit;
- } else if(pSMBr->hdr.WordCount != 17) {
+ } else if (pSMBr->hdr.WordCount != 17) {
/* unknown wct */
rc = -EOPNOTSUPP;
goto neg_err_exit;
}
/* else wct == 17 NTLM */
server->secMode = pSMBr->SecurityMode;
- if((server->secMode & SECMODE_USER) == 0)
- cFYI(1,("share mode security"));
+ if ((server->secMode & SECMODE_USER) == 0)
+ cFYI(1, ("share mode security"));
- if((server->secMode & SECMODE_PW_ENCRYPT) == 0)
+ if ((server->secMode & SECMODE_PW_ENCRYPT) == 0)
#ifdef CONFIG_CIFS_WEAK_PW_HASH
if ((secFlags & CIFSSEC_MAY_PLNTXT) == 0)
#endif /* CIFS_WEAK_PW_HASH */
- cERROR(1,("Server requests plain text password"
+ cERROR(1, ("Server requests plain text password"
" but client support disabled"));
- if((secFlags & CIFSSEC_MUST_NTLMV2) == CIFSSEC_MUST_NTLMV2)
+ if ((secFlags & CIFSSEC_MUST_NTLMV2) == CIFSSEC_MUST_NTLMV2)
server->secType = NTLMv2;
- else if(secFlags & CIFSSEC_MAY_NTLM)
+ else if (secFlags & CIFSSEC_MAY_NTLM)
server->secType = NTLM;
- else if(secFlags & CIFSSEC_MAY_NTLMV2)
+ else if (secFlags & CIFSSEC_MAY_NTLMV2)
server->secType = NTLMv2;
/* else krb5 ... any others ... */
@@ -596,7 +601,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
/* BB might be helpful to save off the domain of server here */
- if ((pSMBr->hdr.Flags2 & SMBFLG2_EXT_SEC) &&
+ if ((pSMBr->hdr.Flags2 & SMBFLG2_EXT_SEC) &&
(server->capabilities & CAP_EXTENDED_SECURITY)) {
count = pSMBr->ByteCount;
if (count < 16)
@@ -620,7 +625,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
SecurityBlob,
count - 16,
&server->secType);
- if(rc == 1) {
+ if (rc == 1) {
/* BB Need to fill struct for sessetup here */
rc = -EOPNOTSUPP;
} else {
@@ -633,26 +638,37 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
#ifdef CONFIG_CIFS_WEAK_PW_HASH
signing_check:
#endif
- if(sign_CIFS_PDUs == FALSE) {
- if(server->secMode & SECMODE_SIGN_REQUIRED)
- cERROR(1,("Server requires "
- "/proc/fs/cifs/PacketSigningEnabled to be on"));
- server->secMode &=
+ if ((secFlags & CIFSSEC_MAY_SIGN) == 0) {
+ /* MUST_SIGN already includes the MAY_SIGN FLAG
+ so if this is zero it means that signing is disabled */
+ cFYI(1, ("Signing disabled"));
+ if (server->secMode & SECMODE_SIGN_REQUIRED)
+ cERROR(1, ("Server requires "
+ "/proc/fs/cifs/PacketSigningEnabled "
+ "to be on"));
+ server->secMode &=
~(SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED);
- } else if(sign_CIFS_PDUs == 1) {
- if((server->secMode & SECMODE_SIGN_REQUIRED) == 0)
- server->secMode &=
- ~(SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED);
- } else if(sign_CIFS_PDUs == 2) {
- if((server->secMode &
+ } else if ((secFlags & CIFSSEC_MUST_SIGN) == CIFSSEC_MUST_SIGN) {
+ /* signing required */
+ cFYI(1, ("Must sign - secFlags 0x%x", secFlags));
+ if ((server->secMode &
(SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED)) == 0) {
- cERROR(1,("signing required but server lacks support"));
- }
+ cERROR(1,
+ ("signing required but server lacks support"));
+ rc = -EOPNOTSUPP;
+ } else
+ server->secMode |= SECMODE_SIGN_REQUIRED;
+ } else {
+ /* signing optional ie CIFSSEC_MAY_SIGN */
+ if ((server->secMode & SECMODE_SIGN_REQUIRED) == 0)
+ server->secMode &=
+ ~(SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED);
}
-neg_err_exit:
+
+neg_err_exit:
cifs_buf_release(pSMB);
- cFYI(1,("negprot rc %d",rc));
+ cFYI(1, ("negprot rc %d", rc));
return rc;
}
@@ -669,7 +685,7 @@ CIFSSMBTDis(const int xid, struct cifsTconInfo *tcon)
* If last user of the connection and
* connection alive - disconnect it
* If this is the last connection on the server session disconnect it
- * (and inside session disconnect we should check if tcp socket needs
+ * (and inside session disconnect we should check if tcp socket needs
* to be freed and kernel thread woken up).
*/
if (tcon)
@@ -683,18 +699,18 @@ CIFSSMBTDis(const int xid, struct cifsTconInfo *tcon)
return -EBUSY;
}
- /* No need to return error on this operation if tid invalidated and
+ /* No need to return error on this operation if tid invalidated and
closed on server already e.g. due to tcp session crashing */
- if(tcon->tidStatus == CifsNeedReconnect) {
+ if (tcon->tidStatus == CifsNeedReconnect) {
up(&tcon->tconSem);
- return 0;
+ return 0;
}
- if((tcon->ses == NULL) || (tcon->ses->server == NULL)) {
+ if ((tcon->ses == NULL) || (tcon->ses->server == NULL)) {
up(&tcon->tconSem);
return -EIO;
}
- rc = small_smb_init(SMB_COM_TREE_DISCONNECT, 0, tcon,
+ rc = small_smb_init(SMB_COM_TREE_DISCONNECT, 0, tcon,
(void **)&smb_buffer);
if (rc) {
up(&tcon->tconSem);
@@ -711,7 +727,7 @@ CIFSSMBTDis(const int xid, struct cifsTconInfo *tcon)
cifs_small_buf_release(smb_buffer);
up(&tcon->tconSem);
- /* No need to return error on this operation if tid invalidated and
+ /* No need to return error on this operation if tid invalidated and
closed on server already e.g. due to tcp session crashing */
if (rc == -EAGAIN)
rc = 0;
@@ -745,11 +761,11 @@ CIFSSMBLogoff(const int xid, struct cifsSesInfo *ses)
}
smb_buffer_response = (struct smb_hdr *)pSMB; /* BB removeme BB */
-
- if(ses->server) {
+
+ if (ses->server) {
pSMB->hdr.Mid = GetNextMid(ses->server);
- if(ses->server->secMode &
+ if (ses->server->secMode &
(SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
pSMB->hdr.Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
}
@@ -772,7 +788,7 @@ CIFSSMBLogoff(const int xid, struct cifsSesInfo *ses)
cifs_small_buf_release(pSMB);
/* if session dead then we do not need to do ulogoff,
- since server closed smb session, no sense reporting
+ since server closed smb session, no sense reporting
error */
if (rc == -EAGAIN)
rc = 0;
@@ -780,6 +796,82 @@ CIFSSMBLogoff(const int xid, struct cifsSesInfo *ses)
}
int
+CIFSPOSIXDelFile(const int xid, struct cifsTconInfo *tcon, const char *fileName,
+ __u16 type, const struct nls_table *nls_codepage, int remap)
+{
+ TRANSACTION2_SPI_REQ *pSMB = NULL;
+ TRANSACTION2_SPI_RSP *pSMBr = NULL;
+ struct unlink_psx_rq *pRqD;
+ int name_len;
+ int rc = 0;
+ int bytes_returned = 0;
+ __u16 params, param_offset, offset, byte_count;
+
+ cFYI(1, ("In POSIX delete"));
+PsxDelete:
+ rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
+ (void **) &pSMBr);
+ if (rc)
+ return rc;
+
+ if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
+ name_len =
+ cifsConvertToUCS((__le16 *) pSMB->FileName, fileName,
+ PATH_MAX, nls_codepage, remap);
+ name_len++; /* trailing null */
+ name_len *= 2;
+ } else { /* BB add path length overrun check */
+ name_len = strnlen(fileName, PATH_MAX);
+ name_len++; /* trailing null */
+ strncpy(pSMB->FileName, fileName, name_len);
+ }
+
+ params = 6 + name_len;
+ pSMB->MaxParameterCount = cpu_to_le16(2);
+ pSMB->MaxDataCount = 0; /* BB double check this with jra */
+ pSMB->MaxSetupCount = 0;
+ pSMB->Reserved = 0;
+ pSMB->Flags = 0;
+ pSMB->Timeout = 0;
+ pSMB->Reserved2 = 0;
+ param_offset = offsetof(struct smb_com_transaction2_spi_req,
+ InformationLevel) - 4;
+ offset = param_offset + params;
+
+ /* Setup pointer to Request Data (inode type) */
+ pRqD = (struct unlink_psx_rq *)(((char *)&pSMB->hdr.Protocol) + offset);
+ pRqD->type = cpu_to_le16(type);
+ pSMB->ParameterOffset = cpu_to_le16(param_offset);
+ pSMB->DataOffset = cpu_to_le16(offset);
+ pSMB->SetupCount = 1;
+ pSMB->Reserved3 = 0;
+ pSMB->SubCommand = cpu_to_le16(TRANS2_SET_PATH_INFORMATION);
+ byte_count = 3 /* pad */ + params + sizeof(struct unlink_psx_rq);
+
+ pSMB->DataCount = cpu_to_le16(sizeof(struct unlink_psx_rq));
+ pSMB->TotalDataCount = cpu_to_le16(sizeof(struct unlink_psx_rq));
+ pSMB->ParameterCount = cpu_to_le16(params);
+ pSMB->TotalParameterCount = pSMB->ParameterCount;
+ pSMB->InformationLevel = cpu_to_le16(SMB_POSIX_UNLINK);
+ pSMB->Reserved4 = 0;
+ pSMB->hdr.smb_buf_length += byte_count;
+ pSMB->ByteCount = cpu_to_le16(byte_count);
+ rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+ (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+ if (rc) {
+ cFYI(1, ("Posix delete returned %d", rc));
+ }
+ cifs_buf_release(pSMB);
+
+ cifs_stats_inc(&tcon->num_deletes);
+
+ if (rc == -EAGAIN)
+ goto PsxDelete;
+
+ return rc;
+}
+
+int
CIFSSMBDelFile(const int xid, struct cifsTconInfo *tcon, const char *fileName,
const struct nls_table *nls_codepage, int remap)
{
@@ -797,7 +889,7 @@ DelFileRetry:
if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
name_len =
- cifsConvertToUCS((__le16 *) pSMB->fileName, fileName,
+ cifsConvertToUCS((__le16 *) pSMB->fileName, fileName,
PATH_MAX, nls_codepage, remap);
name_len++; /* trailing null */
name_len *= 2;
@@ -816,7 +908,7 @@ DelFileRetry:
cifs_stats_inc(&tcon->num_deletes);
if (rc) {
cFYI(1, ("Error in RMFile = %d", rc));
- }
+ }
cifs_buf_release(pSMB);
if (rc == -EAGAIN)
@@ -826,7 +918,7 @@ DelFileRetry:
}
int
-CIFSSMBRmDir(const int xid, struct cifsTconInfo *tcon, const char *dirName,
+CIFSSMBRmDir(const int xid, struct cifsTconInfo *tcon, const char *dirName,
const struct nls_table *nls_codepage, int remap)
{
DELETE_DIRECTORY_REQ *pSMB = NULL;
@@ -887,7 +979,7 @@ MkDirRetry:
return rc;
if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
- name_len = cifsConvertToUCS((__le16 *) pSMB->DirName, name,
+ name_len = cifsConvertToUCS((__le16 *) pSMB->DirName, name,
PATH_MAX, nls_codepage, remap);
name_len++; /* trailing null */
name_len *= 2;
@@ -916,7 +1008,7 @@ MkDirRetry:
int
CIFSPOSIXCreate(const int xid, struct cifsTconInfo *tcon, __u32 posix_flags,
__u64 mode, __u16 * netfid, FILE_UNIX_BASIC_INFO *pRetData,
- __u32 *pOplock, const char *name,
+ __u32 *pOplock, const char *name,
const struct nls_table *nls_codepage, int remap)
{
TRANSACTION2_SPI_REQ *pSMB = NULL;
@@ -924,7 +1016,6 @@ CIFSPOSIXCreate(const int xid, struct cifsTconInfo *tcon, __u32 posix_flags,
int name_len;
int rc = 0;
int bytes_returned = 0;
- char *data_offset;
__u16 params, param_offset, offset, byte_count, count;
OPEN_PSX_REQ * pdata;
OPEN_PSX_RSP * psx_rsp;
@@ -958,13 +1049,12 @@ PsxCreat:
pSMB->Timeout = 0;
pSMB->Reserved2 = 0;
param_offset = offsetof(struct smb_com_transaction2_spi_req,
- InformationLevel) - 4;
+ InformationLevel) - 4;
offset = param_offset + params;
- data_offset = (char *) (&pSMB->hdr.Protocol) + offset;
pdata = (OPEN_PSX_REQ *)(((char *)&pSMB->hdr.Protocol) + offset);
pdata->Level = SMB_QUERY_FILE_UNIX_BASIC;
pdata->Permissions = cpu_to_le64(mode);
- pdata->PosixOpenFlags = cpu_to_le32(posix_flags);
+ pdata->PosixOpenFlags = cpu_to_le32(posix_flags);
pdata->OpenFlags = cpu_to_le32(*pOplock);
pSMB->ParameterOffset = cpu_to_le16(param_offset);
pSMB->DataOffset = cpu_to_le16(offset);
@@ -979,7 +1069,7 @@ PsxCreat:
pSMB->TotalParameterCount = pSMB->ParameterCount;
pSMB->InformationLevel = cpu_to_le16(SMB_POSIX_OPEN);
pSMB->Reserved4 = 0;
- pSMB->hdr.smb_buf_length += byte_count;
+ pSMB->hdr.smb_buf_length += byte_count;
pSMB->ByteCount = cpu_to_le16(byte_count);
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
@@ -988,7 +1078,7 @@ PsxCreat:
goto psx_create_err;
}
- cFYI(1,("copying inode info"));
+ cFYI(1, ("copying inode info"));
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
if (rc || (pSMBr->ByteCount < sizeof(OPEN_PSX_RSP))) {
@@ -997,34 +1087,33 @@ PsxCreat:
}
/* copy return information to pRetData */
- psx_rsp = (OPEN_PSX_RSP *)((char *) &pSMBr->hdr.Protocol
+ psx_rsp = (OPEN_PSX_RSP *)((char *) &pSMBr->hdr.Protocol
+ le16_to_cpu(pSMBr->t2.DataOffset));
-
+
*pOplock = le16_to_cpu(psx_rsp->OplockFlags);
- if(netfid)
+ if (netfid)
*netfid = psx_rsp->Fid; /* cifs fid stays in le */
/* Let caller know file was created so we can set the mode. */
/* Do we care about the CreateAction in any other cases? */
- if(cpu_to_le32(FILE_CREATE) == psx_rsp->CreateAction)
+ if (cpu_to_le32(FILE_CREATE) == psx_rsp->CreateAction)
*pOplock |= CIFS_CREATE_ACTION;
/* check to make sure response data is there */
- if(psx_rsp->ReturnedLevel != SMB_QUERY_FILE_UNIX_BASIC) {
+ if (psx_rsp->ReturnedLevel != SMB_QUERY_FILE_UNIX_BASIC) {
pRetData->Type = -1; /* unknown */
#ifdef CONFIG_CIFS_DEBUG2
- cFYI(1,("unknown type"));
+ cFYI(1, ("unknown type"));
#endif
} else {
- if(pSMBr->ByteCount < sizeof(OPEN_PSX_RSP)
+ if (pSMBr->ByteCount < sizeof(OPEN_PSX_RSP)
+ sizeof(FILE_UNIX_BASIC_INFO)) {
- cERROR(1,("Open response data too small"));
+ cERROR(1, ("Open response data too small"));
pRetData->Type = -1;
goto psx_create_err;
}
- memcpy((char *) pRetData,
+ memcpy((char *) pRetData,
(char *)psx_rsp + sizeof(OPEN_PSX_RSP),
sizeof (FILE_UNIX_BASIC_INFO));
}
-
psx_create_err:
cifs_buf_release(pSMB);
@@ -1034,7 +1123,7 @@ psx_create_err:
if (rc == -EAGAIN)
goto PsxCreat;
- return rc;
+ return rc;
}
static __u16 convert_disposition(int disposition)
@@ -1061,7 +1150,7 @@ static __u16 convert_disposition(int disposition)
ofun = SMBOPEN_OCREATE | SMBOPEN_OTRUNC;
break;
default:
- cFYI(1,("unknown disposition %d",disposition));
+ cFYI(1, ("unknown disposition %d", disposition));
ofun = SMBOPEN_OAPPEND; /* regular open */
}
return ofun;
@@ -1071,7 +1160,7 @@ int
SMBLegacyOpen(const int xid, struct cifsTconInfo *tcon,
const char *fileName, const int openDisposition,
const int access_flags, const int create_options, __u16 * netfid,
- int *pOplock, FILE_ALL_INFO * pfile_info,
+ int *pOplock, FILE_ALL_INFO * pfile_info,
const struct nls_table *nls_codepage, int remap)
{
int rc = -EACCES;
@@ -1113,16 +1202,16 @@ OldOpenRetry:
1 = write
2 = rw
3 = execute
- */
+ */
pSMB->Mode = cpu_to_le16(2);
pSMB->Mode |= cpu_to_le16(0x40); /* deny none */
/* set file as system file if special file such
as fifo and server expecting SFU style and
no Unix extensions */
- if(create_options & CREATE_OPTION_SPECIAL)
- pSMB->FileAttributes = cpu_to_le16(ATTR_SYSTEM);
- else
+ if (create_options & CREATE_OPTION_SPECIAL)
+ pSMB->FileAttributes = cpu_to_le16(ATTR_SYSTEM);
+ else
pSMB->FileAttributes = cpu_to_le16(0/*ATTR_NORMAL*/); /* BB FIXME */
/* if ((omode & S_IWUGO) == 0)
@@ -1132,7 +1221,8 @@ OldOpenRetry:
being created */
/* BB FIXME BB */
-/* pSMB->CreateOptions = cpu_to_le32(create_options & CREATE_OPTIONS_MASK); */
+/* pSMB->CreateOptions = cpu_to_le32(create_options &
+ CREATE_OPTIONS_MASK); */
/* BB FIXME END BB */
pSMB->Sattr = cpu_to_le16(ATTR_HIDDEN | ATTR_SYSTEM | ATTR_DIRECTORY);
@@ -1143,7 +1233,7 @@ OldOpenRetry:
pSMB->ByteCount = cpu_to_le16(count);
/* long_op set to 1 to allow for oplock break timeouts */
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
- (struct smb_hdr *) pSMBr, &bytes_returned, 1);
+ (struct smb_hdr *) pSMBr, &bytes_returned, 1);
cifs_stats_inc(&tcon->num_opens);
if (rc) {
cFYI(1, ("Error in Open = %d", rc));
@@ -1156,17 +1246,17 @@ OldOpenRetry:
/* Let caller know file was created so we can set the mode. */
/* Do we care about the CreateAction in any other cases? */
/* BB FIXME BB */
-/* if(cpu_to_le32(FILE_CREATE) == pSMBr->CreateAction)
+/* if (cpu_to_le32(FILE_CREATE) == pSMBr->CreateAction)
*pOplock |= CIFS_CREATE_ACTION; */
/* BB FIXME END */
- if(pfile_info) {
+ if (pfile_info) {
pfile_info->CreationTime = 0; /* BB convert CreateTime*/
pfile_info->LastAccessTime = 0; /* BB fixme */
pfile_info->LastWriteTime = 0; /* BB fixme */
pfile_info->ChangeTime = 0; /* BB fixme */
pfile_info->Attributes =
- cpu_to_le32(le16_to_cpu(pSMBr->FileAttributes));
+ cpu_to_le32(le16_to_cpu(pSMBr->FileAttributes));
/* the file_info buf is endian converted by caller */
pfile_info->AllocationSize =
cpu_to_le64(le32_to_cpu(pSMBr->EndOfFile));
@@ -1185,7 +1275,7 @@ int
CIFSSMBOpen(const int xid, struct cifsTconInfo *tcon,
const char *fileName, const int openDisposition,
const int access_flags, const int create_options, __u16 * netfid,
- int *pOplock, FILE_ALL_INFO * pfile_info,
+ int *pOplock, FILE_ALL_INFO * pfile_info,
const struct nls_table *nls_codepage, int remap)
{
int rc = -EACCES;
@@ -1228,7 +1318,7 @@ openRetry:
/* set file as system file if special file such
as fifo and server expecting SFU style and
no Unix extensions */
- if(create_options & CREATE_OPTION_SPECIAL)
+ if (create_options & CREATE_OPTION_SPECIAL)
pSMB->FileAttributes = cpu_to_le32(ATTR_SYSTEM);
else
pSMB->FileAttributes = cpu_to_le32(ATTR_NORMAL);
@@ -1266,10 +1356,10 @@ openRetry:
*netfid = pSMBr->Fid; /* cifs fid stays in le */
/* Let caller know file was created so we can set the mode. */
/* Do we care about the CreateAction in any other cases? */
- if(cpu_to_le32(FILE_CREATE) == pSMBr->CreateAction)
- *pOplock |= CIFS_CREATE_ACTION;
- if(pfile_info) {
- memcpy((char *)pfile_info,(char *)&pSMBr->CreationTime,
+ if (cpu_to_le32(FILE_CREATE) == pSMBr->CreateAction)
+ *pOplock |= CIFS_CREATE_ACTION;
+ if (pfile_info) {
+ memcpy((char *)pfile_info, (char *)&pSMBr->CreationTime,
36 /* CreationTime to Attributes */);
/* the file_info buf is endian converted by caller */
pfile_info->AllocationSize = pSMBr->AllocationSize;
@@ -1285,10 +1375,9 @@ openRetry:
}
int
-CIFSSMBRead(const int xid, struct cifsTconInfo *tcon,
- const int netfid, const unsigned int count,
- const __u64 lseek, unsigned int *nbytes, char **buf,
- int * pbuf_type)
+CIFSSMBRead(const int xid, struct cifsTconInfo *tcon, const int netfid,
+ const unsigned int count, const __u64 lseek, unsigned int *nbytes,
+ char **buf, int *pbuf_type)
{
int rc = -EACCES;
READ_REQ *pSMB = NULL;
@@ -1298,8 +1387,8 @@ CIFSSMBRead(const int xid, struct cifsTconInfo *tcon,
int resp_buf_type = 0;
struct kvec iov[1];
- cFYI(1,("Reading %d bytes on fid %d",count,netfid));
- if(tcon->ses->capabilities & CAP_LARGE_FILES)
+ cFYI(1, ("Reading %d bytes on fid %d", count, netfid));
+ if (tcon->ses->capabilities & CAP_LARGE_FILES)
wct = 12;
else
wct = 10; /* old style read */
@@ -1316,28 +1405,28 @@ CIFSSMBRead(const int xid, struct cifsTconInfo *tcon,
pSMB->AndXCommand = 0xFF; /* none */
pSMB->Fid = netfid;
pSMB->OffsetLow = cpu_to_le32(lseek & 0xFFFFFFFF);
- if(wct == 12)
+ if (wct == 12)
pSMB->OffsetHigh = cpu_to_le32(lseek >> 32);
- else if((lseek >> 32) > 0) /* can not handle this big offset for old */
+ else if ((lseek >> 32) > 0) /* can not handle this big offset for old */
return -EIO;
pSMB->Remaining = 0;
pSMB->MaxCount = cpu_to_le16(count & 0xFFFF);
pSMB->MaxCountHigh = cpu_to_le32(count >> 16);
- if(wct == 12)
+ if (wct == 12)
pSMB->ByteCount = 0; /* no need to do le conversion since 0 */
else {
/* old style read */
- struct smb_com_readx_req * pSMBW =
+ struct smb_com_readx_req *pSMBW =
(struct smb_com_readx_req *)pSMB;
pSMBW->ByteCount = 0;
}
iov[0].iov_base = (char *)pSMB;
iov[0].iov_len = pSMB->hdr.smb_buf_length + 4;
- rc = SendReceive2(xid, tcon->ses, iov,
+ rc = SendReceive2(xid, tcon->ses, iov,
1 /* num iovecs */,
- &resp_buf_type, 0);
+ &resp_buf_type, 0);
cifs_stats_inc(&tcon->num_reads);
pSMBr = (READ_RSP *)iov[0].iov_base;
if (rc) {
@@ -1351,33 +1440,34 @@ CIFSSMBRead(const int xid, struct cifsTconInfo *tcon,
/*check that DataLength would not go beyond end of SMB */
if ((data_length > CIFSMaxBufSize)
|| (data_length > count)) {
- cFYI(1,("bad length %d for count %d",data_length,count));
+ cFYI(1, ("bad length %d for count %d",
+ data_length, count));
rc = -EIO;
*nbytes = 0;
} else {
pReadData = (char *) (&pSMBr->hdr.Protocol) +
le16_to_cpu(pSMBr->DataOffset);
-/* if(rc = copy_to_user(buf, pReadData, data_length)) {
- cERROR(1,("Faulting on read rc = %d",rc));
- rc = -EFAULT;
+/* if (rc = copy_to_user(buf, pReadData, data_length)) {
+ cERROR(1,("Faulting on read rc = %d",rc));
+ rc = -EFAULT;
}*/ /* can not use copy_to_user when using page cache*/
- if(*buf)
- memcpy(*buf,pReadData,data_length);
+ if (*buf)
+ memcpy(*buf, pReadData, data_length);
}
}
/* cifs_small_buf_release(pSMB); */ /* Freed earlier now in SendReceive2 */
- if(*buf) {
- if(resp_buf_type == CIFS_SMALL_BUFFER)
+ if (*buf) {
+ if (resp_buf_type == CIFS_SMALL_BUFFER)
cifs_small_buf_release(iov[0].iov_base);
- else if(resp_buf_type == CIFS_LARGE_BUFFER)
+ else if (resp_buf_type == CIFS_LARGE_BUFFER)
cifs_buf_release(iov[0].iov_base);
- } else if(resp_buf_type != CIFS_NO_BUFFER) {
- /* return buffer to caller to free */
- *buf = iov[0].iov_base;
- if(resp_buf_type == CIFS_SMALL_BUFFER)
+ } else if (resp_buf_type != CIFS_NO_BUFFER) {
+ /* return buffer to caller to free */
+ *buf = iov[0].iov_base;
+ if (resp_buf_type == CIFS_SMALL_BUFFER)
*pbuf_type = CIFS_SMALL_BUFFER;
- else if(resp_buf_type == CIFS_LARGE_BUFFER)
+ else if (resp_buf_type == CIFS_LARGE_BUFFER)
*pbuf_type = CIFS_LARGE_BUFFER;
} /* else no valid buffer on return - leave as null */
@@ -1391,7 +1481,7 @@ int
CIFSSMBWrite(const int xid, struct cifsTconInfo *tcon,
const int netfid, const unsigned int count,
const __u64 offset, unsigned int *nbytes, const char *buf,
- const char __user * ubuf, const int long_op)
+ const char __user *ubuf, const int long_op)
{
int rc = -EACCES;
WRITE_REQ *pSMB = NULL;
@@ -1401,10 +1491,10 @@ CIFSSMBWrite(const int xid, struct cifsTconInfo *tcon,
__u16 byte_count;
/* cFYI(1,("write at %lld %d bytes",offset,count));*/
- if(tcon->ses == NULL)
+ if (tcon->ses == NULL)
return -ECONNABORTED;
- if(tcon->ses->capabilities & CAP_LARGE_FILES)
+ if (tcon->ses->capabilities & CAP_LARGE_FILES)
wct = 14;
else
wct = 12;
@@ -1420,20 +1510,20 @@ CIFSSMBWrite(const int xid, struct cifsTconInfo *tcon,
pSMB->AndXCommand = 0xFF; /* none */
pSMB->Fid = netfid;
pSMB->OffsetLow = cpu_to_le32(offset & 0xFFFFFFFF);
- if(wct == 14)
+ if (wct == 14)
pSMB->OffsetHigh = cpu_to_le32(offset >> 32);
- else if((offset >> 32) > 0) /* can not handle this big offset for old */
+ else if ((offset >> 32) > 0) /* can not handle big offset for old srv */
return -EIO;
-
+
pSMB->Reserved = 0xFFFFFFFF;
pSMB->WriteMode = 0;
pSMB->Remaining = 0;
- /* Can increase buffer size if buffer is big enough in some cases - ie we
+ /* Can increase buffer size if buffer is big enough in some cases ie we
can send more if LARGE_WRITE_X capability returned by the server and if
our buffer is big enough or if we convert to iovecs on socket writes
and eliminate the copy to the CIFS buffer */
- if(tcon->ses->capabilities & CAP_LARGE_WRITE_X) {
+ if (tcon->ses->capabilities & CAP_LARGE_WRITE_X) {
bytes_sent = min_t(const unsigned int, CIFSMaxBufSize, count);
} else {
bytes_sent = (tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE)
@@ -1443,11 +1533,11 @@ CIFSSMBWrite(const int xid, struct cifsTconInfo *tcon,
if (bytes_sent > count)
bytes_sent = count;
pSMB->DataOffset =
- cpu_to_le16(offsetof(struct smb_com_write_req,Data) - 4);
- if(buf)
- memcpy(pSMB->Data,buf,bytes_sent);
- else if(ubuf) {
- if(copy_from_user(pSMB->Data,ubuf,bytes_sent)) {
+ cpu_to_le16(offsetof(struct smb_com_write_req, Data) - 4);
+ if (buf)
+ memcpy(pSMB->Data, buf, bytes_sent);
+ else if (ubuf) {
+ if (copy_from_user(pSMB->Data, ubuf, bytes_sent)) {
cifs_buf_release(pSMB);
return -EFAULT;
}
@@ -1456,7 +1546,7 @@ CIFSSMBWrite(const int xid, struct cifsTconInfo *tcon,
cifs_buf_release(pSMB);
return -EINVAL;
} /* else setting file size with write of zero bytes */
- if(wct == 14)
+ if (wct == 14)
byte_count = bytes_sent + 1; /* pad */
else /* wct == 12 */ {
byte_count = bytes_sent + 5; /* bigger pad, smaller smb hdr */
@@ -1465,10 +1555,11 @@ CIFSSMBWrite(const int xid, struct cifsTconInfo *tcon,
pSMB->DataLengthHigh = cpu_to_le16(bytes_sent >> 16);
pSMB->hdr.smb_buf_length += byte_count;
- if(wct == 14)
+ if (wct == 14)
pSMB->ByteCount = cpu_to_le16(byte_count);
- else { /* old style write has byte count 4 bytes earlier so 4 bytes pad */
- struct smb_com_writex_req * pSMBW =
+ else { /* old style write has byte count 4 bytes earlier
+ so 4 bytes pad */
+ struct smb_com_writex_req *pSMBW =
(struct smb_com_writex_req *)pSMB;
pSMBW->ByteCount = cpu_to_le16(byte_count);
}
@@ -1487,7 +1578,7 @@ CIFSSMBWrite(const int xid, struct cifsTconInfo *tcon,
cifs_buf_release(pSMB);
- /* Note: On -EAGAIN error only caller can retry on handle based calls
+ /* Note: On -EAGAIN error only caller can retry on handle based calls
since file handle passed in no longer valid */
return rc;
@@ -1505,9 +1596,9 @@ CIFSSMBWrite2(const int xid, struct cifsTconInfo *tcon,
int smb_hdr_len;
int resp_buf_type = 0;
- cFYI(1,("write2 at %lld %d bytes", (long long)offset, count));
+ cFYI(1, ("write2 at %lld %d bytes", (long long)offset, count));
- if(tcon->ses->capabilities & CAP_LARGE_FILES)
+ if (tcon->ses->capabilities & CAP_LARGE_FILES)
wct = 14;
else
wct = 12;
@@ -1521,37 +1612,37 @@ CIFSSMBWrite2(const int xid, struct cifsTconInfo *tcon,
pSMB->AndXCommand = 0xFF; /* none */
pSMB->Fid = netfid;
pSMB->OffsetLow = cpu_to_le32(offset & 0xFFFFFFFF);
- if(wct == 14)
+ if (wct == 14)
pSMB->OffsetHigh = cpu_to_le32(offset >> 32);
- else if((offset >> 32) > 0) /* can not handle this big offset for old */
+ else if ((offset >> 32) > 0) /* can not handle big offset for old srv */
return -EIO;
pSMB->Reserved = 0xFFFFFFFF;
pSMB->WriteMode = 0;
pSMB->Remaining = 0;
pSMB->DataOffset =
- cpu_to_le16(offsetof(struct smb_com_write_req,Data) - 4);
+ cpu_to_le16(offsetof(struct smb_com_write_req, Data) - 4);
pSMB->DataLengthLow = cpu_to_le16(count & 0xFFFF);
pSMB->DataLengthHigh = cpu_to_le16(count >> 16);
smb_hdr_len = pSMB->hdr.smb_buf_length + 1; /* hdr + 1 byte pad */
- if(wct == 14)
+ if (wct == 14)
pSMB->hdr.smb_buf_length += count+1;
else /* wct == 12 */
- pSMB->hdr.smb_buf_length += count+5; /* smb data starts later */
- if(wct == 14)
+ pSMB->hdr.smb_buf_length += count+5; /* smb data starts later */
+ if (wct == 14)
pSMB->ByteCount = cpu_to_le16(count + 1);
else /* wct == 12 */ /* bigger pad, smaller smb hdr, keep offset ok */ {
- struct smb_com_writex_req * pSMBW =
+ struct smb_com_writex_req *pSMBW =
(struct smb_com_writex_req *)pSMB;
pSMBW->ByteCount = cpu_to_le16(count + 5);
}
iov[0].iov_base = pSMB;
- if(wct == 14)
+ if (wct == 14)
iov[0].iov_len = smb_hdr_len + 4;
else /* wct == 12 pad bigger by four bytes */
iov[0].iov_len = smb_hdr_len + 8;
-
+
rc = SendReceive2(xid, tcon->ses, iov, n_vec + 1, &resp_buf_type,
long_op);
@@ -1559,7 +1650,7 @@ CIFSSMBWrite2(const int xid, struct cifsTconInfo *tcon,
if (rc) {
cFYI(1, ("Send error Write2 = %d", rc));
*nbytes = 0;
- } else if(resp_buf_type == 0) {
+ } else if (resp_buf_type == 0) {
/* presumably this can not happen, but best to be safe */
rc = -EIO;
*nbytes = 0;
@@ -1568,15 +1659,15 @@ CIFSSMBWrite2(const int xid, struct cifsTconInfo *tcon,
*nbytes = le16_to_cpu(pSMBr->CountHigh);
*nbytes = (*nbytes) << 16;
*nbytes += le16_to_cpu(pSMBr->Count);
- }
+ }
/* cifs_small_buf_release(pSMB); */ /* Freed earlier now in SendReceive2 */
- if(resp_buf_type == CIFS_SMALL_BUFFER)
+ if (resp_buf_type == CIFS_SMALL_BUFFER)
cifs_small_buf_release(iov[0].iov_base);
- else if(resp_buf_type == CIFS_LARGE_BUFFER)
+ else if (resp_buf_type == CIFS_LARGE_BUFFER)
cifs_buf_release(iov[0].iov_base);
- /* Note: On -EAGAIN error only caller can retry on handle based calls
+ /* Note: On -EAGAIN error only caller can retry on handle based calls
since file handle passed in no longer valid */
return rc;
@@ -1596,7 +1687,7 @@ CIFSSMBLock(const int xid, struct cifsTconInfo *tcon,
int timeout = 0;
__u16 count;
- cFYI(1, ("In CIFSSMBLock - timeout %d numLock %d",waitFlag,numLock));
+ cFYI(1, ("In CIFSSMBLock - timeout %d numLock %d", waitFlag, numLock));
rc = small_smb_init(SMB_COM_LOCKING_ANDX, 8, tcon, (void **) &pSMB);
if (rc)
@@ -1604,7 +1695,7 @@ CIFSSMBLock(const int xid, struct cifsTconInfo *tcon,
pSMBr = (LOCK_RSP *)pSMB; /* BB removeme BB */
- if(lockType == LOCKING_ANDX_OPLOCK_RELEASE) {
+ if (lockType == LOCKING_ANDX_OPLOCK_RELEASE) {
timeout = -1; /* no response expected */
pSMB->Timeout = 0;
} else if (waitFlag == TRUE) {
@@ -1620,7 +1711,7 @@ CIFSSMBLock(const int xid, struct cifsTconInfo *tcon,
pSMB->AndXCommand = 0xFF; /* none */
pSMB->Fid = smb_file_id; /* netfid stays le */
- if((numLock != 0) || (numUnlock != 0)) {
+ if ((numLock != 0) || (numUnlock != 0)) {
pSMB->Locks[0].Pid = cpu_to_le16(current->tgid);
/* BB where to store pid high? */
pSMB->Locks[0].LengthLow = cpu_to_le32((u32)len);
@@ -1648,7 +1739,7 @@ CIFSSMBLock(const int xid, struct cifsTconInfo *tcon,
}
cifs_small_buf_release(pSMB);
- /* Note: On -EAGAIN error only caller can retry on handle based calls
+ /* Note: On -EAGAIN error only caller can retry on handle based calls
since file handle passed in no longer valid */
return rc;
}
@@ -1656,12 +1747,11 @@ CIFSSMBLock(const int xid, struct cifsTconInfo *tcon,
int
CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon,
const __u16 smb_file_id, const int get_flag, const __u64 len,
- struct file_lock *pLockData, const __u16 lock_type,
+ struct file_lock *pLockData, const __u16 lock_type,
const int waitFlag)
{
struct smb_com_transaction2_sfi_req *pSMB = NULL;
struct smb_com_transaction2_sfi_rsp *pSMBr = NULL;
- char *data_offset;
struct cifs_posix_lock *parm_data;
int rc = 0;
int timeout = 0;
@@ -1670,7 +1760,7 @@ CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon,
cFYI(1, ("Posix Lock"));
- if(pLockData == NULL)
+ if (pLockData == NULL)
return EINVAL;
rc = small_smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB);
@@ -1680,7 +1770,7 @@ CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon,
pSMBr = (struct smb_com_transaction2_sfi_rsp *)pSMB;
- params = 6;
+ params = 6;
pSMB->MaxSetupCount = 0;
pSMB->Reserved = 0;
pSMB->Flags = 0;
@@ -1688,14 +1778,12 @@ CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon,
param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4;
offset = param_offset + params;
- data_offset = (char *) (&pSMB->hdr.Protocol) + offset;
-
count = sizeof(struct cifs_posix_lock);
pSMB->MaxParameterCount = cpu_to_le16(2);
pSMB->MaxDataCount = cpu_to_le16(1000); /* BB find max SMB PDU from sess */
pSMB->SetupCount = 1;
pSMB->Reserved3 = 0;
- if(get_flag)
+ if (get_flag)
pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_FILE_INFORMATION);
else
pSMB->SubCommand = cpu_to_le16(TRANS2_SET_FILE_INFORMATION);
@@ -1705,11 +1793,11 @@ CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon,
pSMB->TotalDataCount = pSMB->DataCount;
pSMB->TotalParameterCount = pSMB->ParameterCount;
pSMB->ParameterOffset = cpu_to_le16(param_offset);
- parm_data = (struct cifs_posix_lock *)
+ parm_data = (struct cifs_posix_lock *)
(((char *) &pSMB->hdr.Protocol) + offset);
parm_data->lock_type = cpu_to_le16(lock_type);
- if(waitFlag) {
+ if (waitFlag) {
timeout = 3; /* blocking operation, no timeout */
parm_data->lock_flags = cpu_to_le16(1);
pSMB->Timeout = cpu_to_le32(-1);
@@ -1746,22 +1834,22 @@ CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon,
rc = -EIO; /* bad smb */
goto plk_err_exit;
}
- if(pLockData == NULL) {
+ if (pLockData == NULL) {
rc = -EINVAL;
goto plk_err_exit;
}
data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
data_count = le16_to_cpu(pSMBr->t2.DataCount);
- if(data_count < sizeof(struct cifs_posix_lock)) {
+ if (data_count < sizeof(struct cifs_posix_lock)) {
rc = -EIO;
goto plk_err_exit;
}
parm_data = (struct cifs_posix_lock *)
((char *)&pSMBr->hdr.Protocol + data_offset);
- if(parm_data->lock_type == cpu_to_le16(CIFS_UNLCK))
+ if (parm_data->lock_type == cpu_to_le16(CIFS_UNLCK))
pLockData->fl_type = F_UNLCK;
}
-
+
plk_err_exit:
if (pSMB)
cifs_small_buf_release(pSMB);
@@ -1784,7 +1872,7 @@ CIFSSMBClose(const int xid, struct cifsTconInfo *tcon, int smb_file_id)
/* do not retry on dead session on close */
rc = small_smb_init(SMB_COM_CLOSE, 3, tcon, (void **) &pSMB);
- if(rc == -EAGAIN)
+ if (rc == -EAGAIN)
return 0;
if (rc)
return rc;
@@ -1798,7 +1886,7 @@ CIFSSMBClose(const int xid, struct cifsTconInfo *tcon, int smb_file_id)
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
cifs_stats_inc(&tcon->num_closes);
if (rc) {
- if(rc!=-EINTR) {
+ if (rc != -EINTR) {
/* EINTR is expected when user ctl-c to kill app */
cERROR(1, ("Send error in Close = %d", rc));
}
@@ -1807,7 +1895,7 @@ CIFSSMBClose(const int xid, struct cifsTconInfo *tcon, int smb_file_id)
cifs_small_buf_release(pSMB);
/* Since session is dead, file will be closed on server already */
- if(rc == -EAGAIN)
+ if (rc == -EAGAIN)
rc = 0;
return rc;
@@ -1839,7 +1927,7 @@ renameRetry:
if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
name_len =
- cifsConvertToUCS((__le16 *) pSMB->OldFileName, fromName,
+ cifsConvertToUCS((__le16 *) pSMB->OldFileName, fromName,
PATH_MAX, nls_codepage, remap);
name_len++; /* trailing null */
name_len *= 2;
@@ -1851,7 +1939,7 @@ renameRetry:
toName, PATH_MAX, nls_codepage, remap);
name_len2 += 1 /* trailing null */ + 1 /* Signature word */ ;
name_len2 *= 2; /* convert to bytes */
- } else { /* BB improve the check for buffer overruns BB */
+ } else { /* BB improve the check for buffer overruns BB */
name_len = strnlen(fromName, PATH_MAX);
name_len++; /* trailing null */
strncpy(pSMB->OldFileName, fromName, name_len);
@@ -1872,7 +1960,7 @@ renameRetry:
cifs_stats_inc(&tcon->num_renames);
if (rc) {
cFYI(1, ("Send error in rename = %d", rc));
- }
+ }
cifs_buf_release(pSMB);
@@ -1882,13 +1970,13 @@ renameRetry:
return rc;
}
-int CIFSSMBRenameOpenFile(const int xid,struct cifsTconInfo *pTcon,
- int netfid, char * target_name,
- const struct nls_table * nls_codepage, int remap)
+int CIFSSMBRenameOpenFile(const int xid, struct cifsTconInfo *pTcon,
+ int netfid, char *target_name,
+ const struct nls_table *nls_codepage, int remap)
{
struct smb_com_transaction2_sfi_req *pSMB = NULL;
struct smb_com_transaction2_sfi_rsp *pSMBr = NULL;
- struct set_file_rename * rename_info;
+ struct set_file_rename *rename_info;
char *data_offset;
char dummy_string[30];
int rc = 0;
@@ -1927,13 +2015,14 @@ int CIFSSMBRenameOpenFile(const int xid,struct cifsTconInfo *pTcon,
rename_info->overwrite = cpu_to_le32(1);
rename_info->root_fid = 0;
/* unicode only call */
- if(target_name == NULL) {
- sprintf(dummy_string,"cifs%x",pSMB->hdr.Mid);
- len_of_str = cifsConvertToUCS((__le16 *)rename_info->target_name,
+ if (target_name == NULL) {
+ sprintf(dummy_string, "cifs%x", pSMB->hdr.Mid);
+ len_of_str = cifsConvertToUCS((__le16 *)rename_info->target_name,
dummy_string, 24, nls_codepage, remap);
} else {
len_of_str = cifsConvertToUCS((__le16 *)rename_info->target_name,
- target_name, PATH_MAX, nls_codepage, remap);
+ target_name, PATH_MAX, nls_codepage,
+ remap);
}
rename_info->target_name_len = cpu_to_le32(2 * len_of_str);
count = 12 /* sizeof(struct set_file_rename) */ + (2 * len_of_str) + 2;
@@ -1947,10 +2036,10 @@ int CIFSSMBRenameOpenFile(const int xid,struct cifsTconInfo *pTcon,
pSMB->hdr.smb_buf_length += byte_count;
pSMB->ByteCount = cpu_to_le16(byte_count);
rc = SendReceive(xid, pTcon->ses, (struct smb_hdr *) pSMB,
- (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+ (struct smb_hdr *) pSMBr, &bytes_returned, 0);
cifs_stats_inc(&pTcon->num_t2renames);
if (rc) {
- cFYI(1,("Send error in Rename (by file handle) = %d", rc));
+ cFYI(1, ("Send error in Rename (by file handle) = %d", rc));
}
cifs_buf_release(pSMB);
@@ -1962,9 +2051,9 @@ int CIFSSMBRenameOpenFile(const int xid,struct cifsTconInfo *pTcon,
}
int
-CIFSSMBCopy(const int xid, struct cifsTconInfo *tcon, const char * fromName,
- const __u16 target_tid, const char *toName, const int flags,
- const struct nls_table *nls_codepage, int remap)
+CIFSSMBCopy(const int xid, struct cifsTconInfo *tcon, const char *fromName,
+ const __u16 target_tid, const char *toName, const int flags,
+ const struct nls_table *nls_codepage, int remap)
{
int rc = 0;
COPY_REQ *pSMB = NULL;
@@ -1986,7 +2075,7 @@ copyRetry:
pSMB->Flags = cpu_to_le16(flags & COPY_TREE);
if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
- name_len = cifsConvertToUCS((__le16 *) pSMB->OldFileName,
+ name_len = cifsConvertToUCS((__le16 *) pSMB->OldFileName,
fromName, PATH_MAX, nls_codepage,
remap);
name_len++; /* trailing null */
@@ -1994,11 +2083,12 @@ copyRetry:
pSMB->OldFileName[name_len] = 0x04; /* pad */
/* protocol requires ASCII signature byte on Unicode string */
pSMB->OldFileName[name_len + 1] = 0x00;
- name_len2 = cifsConvertToUCS((__le16 *)&pSMB->OldFileName[name_len + 2],
+ name_len2 =
+ cifsConvertToUCS((__le16 *)&pSMB->OldFileName[name_len + 2],
toName, PATH_MAX, nls_codepage, remap);
name_len2 += 1 /* trailing null */ + 1 /* Signature word */ ;
name_len2 *= 2; /* convert to bytes */
- } else { /* BB improve the check for buffer overruns BB */
+ } else { /* BB improve the check for buffer overruns BB */
name_len = strnlen(fromName, PATH_MAX);
name_len++; /* trailing null */
strncpy(pSMB->OldFileName, fromName, name_len);
@@ -2058,7 +2148,7 @@ createSymLinkRetry:
name_len++; /* trailing null */
name_len *= 2;
- } else { /* BB improve the check for buffer overruns BB */
+ } else { /* BB improve the check for buffer overruns BB */
name_len = strnlen(fromName, PATH_MAX);
name_len++; /* trailing null */
strncpy(pSMB->FileName, fromName, name_len);
@@ -2070,7 +2160,7 @@ createSymLinkRetry:
pSMB->Timeout = 0;
pSMB->Reserved2 = 0;
param_offset = offsetof(struct smb_com_transaction2_spi_req,
- InformationLevel) - 4;
+ InformationLevel) - 4;
offset = param_offset + params;
data_offset = (char *) (&pSMB->hdr.Protocol) + offset;
@@ -2081,7 +2171,7 @@ createSymLinkRetry:
, nls_codepage);
name_len_target++; /* trailing null */
name_len_target *= 2;
- } else { /* BB improve the check for buffer overruns BB */
+ } else { /* BB improve the check for buffer overruns BB */
name_len_target = strnlen(toName, PATH_MAX);
name_len_target++; /* trailing null */
strncpy(data_offset, toName, name_len_target);
@@ -2108,9 +2198,7 @@ createSymLinkRetry:
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
cifs_stats_inc(&tcon->num_symlinks);
if (rc) {
- cFYI(1,
- ("Send error in SetPathInfo (create symlink) = %d",
- rc));
+ cFYI(1, ("Send error in SetPathInfo create symlink = %d", rc));
}
if (pSMB)
@@ -2149,7 +2237,7 @@ createHardLinkRetry:
name_len++; /* trailing null */
name_len *= 2;
- } else { /* BB improve the check for buffer overruns BB */
+ } else { /* BB improve the check for buffer overruns BB */
name_len = strnlen(toName, PATH_MAX);
name_len++; /* trailing null */
strncpy(pSMB->FileName, toName, name_len);
@@ -2161,7 +2249,7 @@ createHardLinkRetry:
pSMB->Timeout = 0;
pSMB->Reserved2 = 0;
param_offset = offsetof(struct smb_com_transaction2_spi_req,
- InformationLevel) - 4;
+ InformationLevel) - 4;
offset = param_offset + params;
data_offset = (char *) (&pSMB->hdr.Protocol) + offset;
@@ -2171,7 +2259,7 @@ createHardLinkRetry:
nls_codepage, remap);
name_len_target++; /* trailing null */
name_len_target *= 2;
- } else { /* BB improve the check for buffer overruns BB */
+ } else { /* BB improve the check for buffer overruns BB */
name_len_target = strnlen(fromName, PATH_MAX);
name_len_target++; /* trailing null */
strncpy(data_offset, fromName, name_len_target);
@@ -2243,13 +2331,13 @@ winCreateHardLinkRetry:
name_len++; /* trailing null */
name_len *= 2;
pSMB->OldFileName[name_len] = 0; /* pad */
- pSMB->OldFileName[name_len + 1] = 0x04;
+ pSMB->OldFileName[name_len + 1] = 0x04;
name_len2 =
- cifsConvertToUCS((__le16 *)&pSMB->OldFileName[name_len + 2],
+ cifsConvertToUCS((__le16 *)&pSMB->OldFileName[name_len + 2],
toName, PATH_MAX, nls_codepage, remap);
name_len2 += 1 /* trailing null */ + 1 /* Signature word */ ;
name_len2 *= 2; /* convert to bytes */
- } else { /* BB improve the check for buffer overruns BB */
+ } else { /* BB improve the check for buffer overruns BB */
name_len = strnlen(fromName, PATH_MAX);
name_len++; /* trailing null */
strncpy(pSMB->OldFileName, fromName, name_len);
@@ -2302,12 +2390,11 @@ querySymLinkRetry:
if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
name_len =
- cifs_strtoUCS((__le16 *) pSMB->FileName, searchName, PATH_MAX
- /* find define for this maxpathcomponent */
- , nls_codepage);
+ cifs_strtoUCS((__le16 *) pSMB->FileName, searchName,
+ PATH_MAX, nls_codepage);
name_len++; /* trailing null */
name_len *= 2;
- } else { /* BB improve the check for buffer overruns BB */
+ } else { /* BB improve the check for buffer overruns BB */
name_len = strnlen(searchName, PATH_MAX);
name_len++; /* trailing null */
strncpy(pSMB->FileName, searchName, name_len);
@@ -2324,7 +2411,7 @@ querySymLinkRetry:
pSMB->Timeout = 0;
pSMB->Reserved2 = 0;
pSMB->ParameterOffset = cpu_to_le16(offsetof(
- struct smb_com_transaction2_qpi_req ,InformationLevel) - 4);
+ struct smb_com_transaction2_qpi_req, InformationLevel) - 4);
pSMB->DataCount = 0;
pSMB->DataOffset = 0;
pSMB->SetupCount = 1;
@@ -2355,16 +2442,16 @@ querySymLinkRetry:
if (pSMBr->hdr.Flags2 & SMBFLG2_UNICODE) {
name_len = UniStrnlen((wchar_t *) ((char *)
- &pSMBr->hdr.Protocol +data_offset),
- min_t(const int, buflen,count) / 2);
+ &pSMBr->hdr.Protocol + data_offset),
+ min_t(const int, buflen, count) / 2);
/* BB FIXME investigate remapping reserved chars here */
cifs_strfromUCS_le(symlinkinfo,
- (__le16 *) ((char *)&pSMBr->hdr.Protocol +
- data_offset),
+ (__le16 *) ((char *)&pSMBr->hdr.Protocol
+ + data_offset),
name_len, nls_codepage);
} else {
strncpy(symlinkinfo,
- (char *) &pSMBr->hdr.Protocol +
+ (char *) &pSMBr->hdr.Protocol +
data_offset,
min_t(const int, buflen, count));
}
@@ -2385,14 +2472,14 @@ querySymLinkRetry:
Setup words themselves and ByteCount
MaxSetupCount (size of returned setup area) and
MaxParameterCount (returned parms size) must be set by caller */
-static int
+static int
smb_init_ntransact(const __u16 sub_command, const int setup_count,
const int parm_len, struct cifsTconInfo *tcon,
- void ** ret_buf)
+ void **ret_buf)
{
int rc;
__u32 temp_offset;
- struct smb_com_ntransact_req * pSMB;
+ struct smb_com_ntransact_req *pSMB;
rc = small_smb_init(SMB_COM_NT_TRANSACT, 19 + setup_count, tcon,
(void **)&pSMB);
@@ -2416,47 +2503,47 @@ smb_init_ntransact(const __u16 sub_command, const int setup_count,
}
static int
-validate_ntransact(char * buf, char ** ppparm, char ** ppdata,
- int * pdatalen, int * pparmlen)
+validate_ntransact(char *buf, char **ppparm, char **ppdata,
+ int *pdatalen, int *pparmlen)
{
- char * end_of_smb;
+ char *end_of_smb;
__u32 data_count, data_offset, parm_count, parm_offset;
- struct smb_com_ntransact_rsp * pSMBr;
+ struct smb_com_ntransact_rsp *pSMBr;
- if(buf == NULL)
+ if (buf == NULL)
return -EINVAL;
pSMBr = (struct smb_com_ntransact_rsp *)buf;
/* ByteCount was converted from little endian in SendReceive */
- end_of_smb = 2 /* sizeof byte count */ + pSMBr->ByteCount +
+ end_of_smb = 2 /* sizeof byte count */ + pSMBr->ByteCount +
(char *)&pSMBr->ByteCount;
-
data_offset = le32_to_cpu(pSMBr->DataOffset);
data_count = le32_to_cpu(pSMBr->DataCount);
- parm_offset = le32_to_cpu(pSMBr->ParameterOffset);
+ parm_offset = le32_to_cpu(pSMBr->ParameterOffset);
parm_count = le32_to_cpu(pSMBr->ParameterCount);
*ppparm = (char *)&pSMBr->hdr.Protocol + parm_offset;
*ppdata = (char *)&pSMBr->hdr.Protocol + data_offset;
/* should we also check that parm and data areas do not overlap? */
- if(*ppparm > end_of_smb) {
- cFYI(1,("parms start after end of smb"));
+ if (*ppparm > end_of_smb) {
+ cFYI(1, ("parms start after end of smb"));
return -EINVAL;
- } else if(parm_count + *ppparm > end_of_smb) {
- cFYI(1,("parm end after end of smb"));
+ } else if (parm_count + *ppparm > end_of_smb) {
+ cFYI(1, ("parm end after end of smb"));
return -EINVAL;
- } else if(*ppdata > end_of_smb) {
- cFYI(1,("data starts after end of smb"));
+ } else if (*ppdata > end_of_smb) {
+ cFYI(1, ("data starts after end of smb"));
return -EINVAL;
- } else if(data_count + *ppdata > end_of_smb) {
+ } else if (data_count + *ppdata > end_of_smb) {
cFYI(1,("data %p + count %d (%p) ends after end of smb %p start %p",
- *ppdata, data_count, (data_count + *ppdata), end_of_smb, pSMBr)); /* BB FIXME */
+ *ppdata, data_count, (data_count + *ppdata),
+ end_of_smb, pSMBr));
return -EINVAL;
- } else if(parm_count + data_count > pSMBr->ByteCount) {
- cFYI(1,("parm count and data count larger than SMB"));
+ } else if (parm_count + data_count > pSMBr->ByteCount) {
+ cFYI(1, ("parm count and data count larger than SMB"));
return -EINVAL;
}
return 0;
@@ -2465,14 +2552,14 @@ validate_ntransact(char * buf, char ** ppparm, char ** ppdata,
int
CIFSSMBQueryReparseLinkInfo(const int xid, struct cifsTconInfo *tcon,
const unsigned char *searchName,
- char *symlinkinfo, const int buflen,__u16 fid,
+ char *symlinkinfo, const int buflen, __u16 fid,
const struct nls_table *nls_codepage)
{
int rc = 0;
int bytes_returned;
int name_len;
- struct smb_com_transaction_ioctl_req * pSMB;
- struct smb_com_transaction_ioctl_rsp * pSMBr;
+ struct smb_com_transaction_ioctl_req *pSMB;
+ struct smb_com_transaction_ioctl_rsp *pSMBr;
cFYI(1, ("In Windows reparse style QueryLink for path %s", searchName));
rc = smb_init(SMB_COM_NT_TRANSACT, 23, tcon, (void **) &pSMB,
@@ -2511,47 +2598,53 @@ CIFSSMBQueryReparseLinkInfo(const int xid, struct cifsTconInfo *tcon,
/* BB also check enough total bytes returned */
rc = -EIO; /* bad smb */
else {
- if(data_count && (data_count < 2048)) {
- char * end_of_smb = 2 /* sizeof byte count */ +
+ if (data_count && (data_count < 2048)) {
+ char *end_of_smb = 2 /* sizeof byte count */ +
pSMBr->ByteCount +
(char *)&pSMBr->ByteCount;
- struct reparse_data * reparse_buf = (struct reparse_data *)
- ((char *)&pSMBr->hdr.Protocol + data_offset);
- if((char*)reparse_buf >= end_of_smb) {
+ struct reparse_data *reparse_buf =
+ (struct reparse_data *)
+ ((char *)&pSMBr->hdr.Protocol
+ + data_offset);
+ if ((char *)reparse_buf >= end_of_smb) {
rc = -EIO;
goto qreparse_out;
}
- if((reparse_buf->LinkNamesBuf +
+ if ((reparse_buf->LinkNamesBuf +
reparse_buf->TargetNameOffset +
reparse_buf->TargetNameLen) >
end_of_smb) {
- cFYI(1,("reparse buf extended beyond SMB"));
+ cFYI(1,("reparse buf goes beyond SMB"));
rc = -EIO;
goto qreparse_out;
}
-
+
if (pSMBr->hdr.Flags2 & SMBFLG2_UNICODE) {
name_len = UniStrnlen((wchar_t *)
- (reparse_buf->LinkNamesBuf +
- reparse_buf->TargetNameOffset),
- min(buflen/2, reparse_buf->TargetNameLen / 2));
+ (reparse_buf->LinkNamesBuf +
+ reparse_buf->TargetNameOffset),
+ min(buflen/2,
+ reparse_buf->TargetNameLen / 2));
cifs_strfromUCS_le(symlinkinfo,
- (__le16 *) (reparse_buf->LinkNamesBuf +
+ (__le16 *) (reparse_buf->LinkNamesBuf +
reparse_buf->TargetNameOffset),
name_len, nls_codepage);
} else { /* ASCII names */
- strncpy(symlinkinfo,reparse_buf->LinkNamesBuf +
- reparse_buf->TargetNameOffset,
- min_t(const int, buflen, reparse_buf->TargetNameLen));
+ strncpy(symlinkinfo,
+ reparse_buf->LinkNamesBuf +
+ reparse_buf->TargetNameOffset,
+ min_t(const int, buflen,
+ reparse_buf->TargetNameLen));
}
} else {
rc = -EIO;
- cFYI(1,("Invalid return data count on get reparse info ioctl"));
+ cFYI(1, ("Invalid return data count on "
+ "get reparse info ioctl"));
}
symlinkinfo[buflen] = 0; /* just in case so the caller
does not go off the end of the buffer */
- cFYI(1,("readlink result - %s",symlinkinfo));
+ cFYI(1, ("readlink result - %s", symlinkinfo));
}
}
qreparse_out:
@@ -2566,7 +2659,8 @@ qreparse_out:
#ifdef CONFIG_CIFS_POSIX
/*Convert an Access Control Entry from wire format to local POSIX xattr format*/
-static void cifs_convert_ace(posix_acl_xattr_entry * ace, struct cifs_posix_ace * cifs_ace)
+static void cifs_convert_ace(posix_acl_xattr_entry *ace,
+ struct cifs_posix_ace *cifs_ace)
{
/* u8 cifs fields do not need le conversion */
ace->e_perm = cpu_to_le16(cifs_ace->cifs_e_perm);
@@ -2578,30 +2672,31 @@ static void cifs_convert_ace(posix_acl_xattr_entry * ace, struct cifs_posix_ace
}
/* Convert ACL from CIFS POSIX wire format to local Linux POSIX ACL xattr */
-static int cifs_copy_posix_acl(char * trgt,char * src, const int buflen,
- const int acl_type,const int size_of_data_area)
+static int cifs_copy_posix_acl(char *trgt, char *src, const int buflen,
+ const int acl_type, const int size_of_data_area)
{
int size = 0;
int i;
__u16 count;
- struct cifs_posix_ace * pACE;
- struct cifs_posix_acl * cifs_acl = (struct cifs_posix_acl *)src;
- posix_acl_xattr_header * local_acl = (posix_acl_xattr_header *)trgt;
+ struct cifs_posix_ace *pACE;
+ struct cifs_posix_acl *cifs_acl = (struct cifs_posix_acl *)src;
+ posix_acl_xattr_header *local_acl = (posix_acl_xattr_header *)trgt;
if (le16_to_cpu(cifs_acl->version) != CIFS_ACL_VERSION)
return -EOPNOTSUPP;
- if(acl_type & ACL_TYPE_ACCESS) {
+ if (acl_type & ACL_TYPE_ACCESS) {
count = le16_to_cpu(cifs_acl->access_entry_count);
pACE = &cifs_acl->ace_array[0];
size = sizeof(struct cifs_posix_acl);
size += sizeof(struct cifs_posix_ace) * count;
/* check if we would go beyond end of SMB */
- if(size_of_data_area < size) {
- cFYI(1,("bad CIFS POSIX ACL size %d vs. %d",size_of_data_area,size));
+ if (size_of_data_area < size) {
+ cFYI(1, ("bad CIFS POSIX ACL size %d vs. %d",
+ size_of_data_area, size));
return -EINVAL;
}
- } else if(acl_type & ACL_TYPE_DEFAULT) {
+ } else if (acl_type & ACL_TYPE_DEFAULT) {
count = le16_to_cpu(cifs_acl->access_entry_count);
size = sizeof(struct cifs_posix_acl);
size += sizeof(struct cifs_posix_ace) * count;
@@ -2610,7 +2705,7 @@ static int cifs_copy_posix_acl(char * trgt,char * src, const int buflen,
count = le16_to_cpu(cifs_acl->default_entry_count);
size += sizeof(struct cifs_posix_ace) * count;
/* check if we would go beyond end of SMB */
- if(size_of_data_area < size)
+ if (size_of_data_area < size)
return -EINVAL;
} else {
/* illegal type */
@@ -2618,76 +2713,77 @@ static int cifs_copy_posix_acl(char * trgt,char * src, const int buflen,
}
size = posix_acl_xattr_size(count);
- if((buflen == 0) || (local_acl == NULL)) {
- /* used to query ACL EA size */
- } else if(size > buflen) {
+ if ((buflen == 0) || (local_acl == NULL)) {
+ /* used to query ACL EA size */
+ } else if (size > buflen) {
return -ERANGE;
} else /* buffer big enough */ {
local_acl->a_version = cpu_to_le32(POSIX_ACL_XATTR_VERSION);
- for(i = 0;i < count ;i++) {
- cifs_convert_ace(&local_acl->a_entries[i],pACE);
- pACE ++;
+ for (i = 0; i < count ; i++) {
+ cifs_convert_ace(&local_acl->a_entries[i], pACE);
+ pACE++;
}
}
return size;
}
-static __u16 convert_ace_to_cifs_ace(struct cifs_posix_ace * cifs_ace,
- const posix_acl_xattr_entry * local_ace)
+static __u16 convert_ace_to_cifs_ace(struct cifs_posix_ace *cifs_ace,
+ const posix_acl_xattr_entry *local_ace)
{
__u16 rc = 0; /* 0 = ACL converted ok */
cifs_ace->cifs_e_perm = le16_to_cpu(local_ace->e_perm);
cifs_ace->cifs_e_tag = le16_to_cpu(local_ace->e_tag);
/* BB is there a better way to handle the large uid? */
- if(local_ace->e_id == cpu_to_le32(-1)) {
+ if (local_ace->e_id == cpu_to_le32(-1)) {
/* Probably no need to le convert -1 on any arch but can not hurt */
cifs_ace->cifs_uid = cpu_to_le64(-1);
- } else
+ } else
cifs_ace->cifs_uid = cpu_to_le64(le32_to_cpu(local_ace->e_id));
- /*cFYI(1,("perm %d tag %d id %d",ace->e_perm,ace->e_tag,ace->e_id));*/
+ /*cFYI(1,("perm %d tag %d id %d",ace->e_perm,ace->e_tag,ace->e_id));*/
return rc;
}
/* Convert ACL from local Linux POSIX xattr to CIFS POSIX ACL wire format */
-static __u16 ACL_to_cifs_posix(char * parm_data,const char * pACL,const int buflen,
- const int acl_type)
+static __u16 ACL_to_cifs_posix(char *parm_data, const char *pACL,
+ const int buflen, const int acl_type)
{
__u16 rc = 0;
- struct cifs_posix_acl * cifs_acl = (struct cifs_posix_acl *)parm_data;
- posix_acl_xattr_header * local_acl = (posix_acl_xattr_header *)pACL;
+ struct cifs_posix_acl *cifs_acl = (struct cifs_posix_acl *)parm_data;
+ posix_acl_xattr_header *local_acl = (posix_acl_xattr_header *)pACL;
int count;
int i;
- if((buflen == 0) || (pACL == NULL) || (cifs_acl == NULL))
+ if ((buflen == 0) || (pACL == NULL) || (cifs_acl == NULL))
return 0;
count = posix_acl_xattr_count((size_t)buflen);
- cFYI(1,("setting acl with %d entries from buf of length %d and version of %d",
+ cFYI(1, ("setting acl with %d entries from buf of length %d and "
+ "version of %d",
count, buflen, le32_to_cpu(local_acl->a_version)));
- if(le32_to_cpu(local_acl->a_version) != 2) {
- cFYI(1,("unknown POSIX ACL version %d",
+ if (le32_to_cpu(local_acl->a_version) != 2) {
+ cFYI(1, ("unknown POSIX ACL version %d",
le32_to_cpu(local_acl->a_version)));
return 0;
}
cifs_acl->version = cpu_to_le16(1);
- if(acl_type == ACL_TYPE_ACCESS)
+ if (acl_type == ACL_TYPE_ACCESS)
cifs_acl->access_entry_count = cpu_to_le16(count);
- else if(acl_type == ACL_TYPE_DEFAULT)
+ else if (acl_type == ACL_TYPE_DEFAULT)
cifs_acl->default_entry_count = cpu_to_le16(count);
else {
- cFYI(1,("unknown ACL type %d",acl_type));
+ cFYI(1, ("unknown ACL type %d", acl_type));
return 0;
}
- for(i=0;i<count;i++) {
+ for (i = 0; i < count; i++) {
rc = convert_ace_to_cifs_ace(&cifs_acl->ace_array[i],
&local_acl->a_entries[i]);
- if(rc != 0) {
+ if (rc != 0) {
/* ACE not converted */
break;
}
}
- if(rc == 0) {
+ if (rc == 0) {
rc = (__u16)(count * sizeof(struct cifs_posix_ace));
rc += sizeof(struct cifs_posix_acl);
/* BB add check to make sure ACL does not overflow SMB */
@@ -2697,9 +2793,9 @@ static __u16 ACL_to_cifs_posix(char * parm_data,const char * pACL,const int bufl
int
CIFSSMBGetPosixACL(const int xid, struct cifsTconInfo *tcon,
- const unsigned char *searchName,
- char *acl_inf, const int buflen, const int acl_type,
- const struct nls_table *nls_codepage, int remap)
+ const unsigned char *searchName,
+ char *acl_inf, const int buflen, const int acl_type,
+ const struct nls_table *nls_codepage, int remap)
{
/* SMB_QUERY_POSIX_ACL */
TRANSACTION2_QPI_REQ *pSMB = NULL;
@@ -2708,7 +2804,7 @@ CIFSSMBGetPosixACL(const int xid, struct cifsTconInfo *tcon,
int bytes_returned;
int name_len;
__u16 params, byte_count;
-
+
cFYI(1, ("In GetPosixACL (Unix) for path %s", searchName));
queryAclRetry:
@@ -2716,16 +2812,16 @@ queryAclRetry:
(void **) &pSMBr);
if (rc)
return rc;
-
+
if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
name_len =
- cifsConvertToUCS((__le16 *) pSMB->FileName, searchName,
+ cifsConvertToUCS((__le16 *) pSMB->FileName, searchName,
PATH_MAX, nls_codepage, remap);
name_len++; /* trailing null */
name_len *= 2;
pSMB->FileName[name_len] = 0;
pSMB->FileName[name_len+1] = 0;
- } else { /* BB improve the check for buffer overruns BB */
+ } else { /* BB improve the check for buffer overruns BB */
name_len = strnlen(searchName, PATH_MAX);
name_len++; /* trailing null */
strncpy(pSMB->FileName, searchName, name_len);
@@ -2734,7 +2830,7 @@ queryAclRetry:
params = 2 /* level */ + 4 /* rsrvd */ + name_len /* incl null */ ;
pSMB->TotalDataCount = 0;
pSMB->MaxParameterCount = cpu_to_le16(2);
- /* BB find exact max data count below from sess structure BB */
+ /* BB find exact max data count below from sess structure BB */
pSMB->MaxDataCount = cpu_to_le16(4000);
pSMB->MaxSetupCount = 0;
pSMB->Reserved = 0;
@@ -2742,7 +2838,8 @@ queryAclRetry:
pSMB->Timeout = 0;
pSMB->Reserved2 = 0;
pSMB->ParameterOffset = cpu_to_le16(
- offsetof(struct smb_com_transaction2_qpi_req ,InformationLevel) - 4);
+ offsetof(struct smb_com_transaction2_qpi_req,
+ InformationLevel) - 4);
pSMB->DataCount = 0;
pSMB->DataOffset = 0;
pSMB->SetupCount = 1;
@@ -2763,7 +2860,7 @@ queryAclRetry:
cFYI(1, ("Send error in Query POSIX ACL = %d", rc));
} else {
/* decode response */
-
+
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
if (rc || (pSMBr->ByteCount < 2))
/* BB also check enough total bytes returned */
@@ -2773,7 +2870,7 @@ queryAclRetry:
__u16 count = le16_to_cpu(pSMBr->t2.DataCount);
rc = cifs_copy_posix_acl(acl_inf,
(char *)&pSMBr->hdr.Protocol+data_offset,
- buflen,acl_type,count);
+ buflen, acl_type, count);
}
}
cifs_buf_release(pSMB);
@@ -2784,10 +2881,10 @@ queryAclRetry:
int
CIFSSMBSetPosixACL(const int xid, struct cifsTconInfo *tcon,
- const unsigned char *fileName,
- const char *local_acl, const int buflen,
- const int acl_type,
- const struct nls_table *nls_codepage, int remap)
+ const unsigned char *fileName,
+ const char *local_acl, const int buflen,
+ const int acl_type,
+ const struct nls_table *nls_codepage, int remap)
{
struct smb_com_transaction2_spi_req *pSMB = NULL;
struct smb_com_transaction2_spi_rsp *pSMBr = NULL;
@@ -2800,16 +2897,16 @@ CIFSSMBSetPosixACL(const int xid, struct cifsTconInfo *tcon,
cFYI(1, ("In SetPosixACL (Unix) for path %s", fileName));
setAclRetry:
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
- (void **) &pSMBr);
+ (void **) &pSMBr);
if (rc)
return rc;
if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
name_len =
- cifsConvertToUCS((__le16 *) pSMB->FileName, fileName,
+ cifsConvertToUCS((__le16 *) pSMB->FileName, fileName,
PATH_MAX, nls_codepage, remap);
name_len++; /* trailing null */
name_len *= 2;
- } else { /* BB improve the check for buffer overruns BB */
+ } else { /* BB improve the check for buffer overruns BB */
name_len = strnlen(fileName, PATH_MAX);
name_len++; /* trailing null */
strncpy(pSMB->FileName, fileName, name_len);
@@ -2823,15 +2920,15 @@ setAclRetry:
pSMB->Timeout = 0;
pSMB->Reserved2 = 0;
param_offset = offsetof(struct smb_com_transaction2_spi_req,
- InformationLevel) - 4;
+ InformationLevel) - 4;
offset = param_offset + params;
parm_data = ((char *) &pSMB->hdr.Protocol) + offset;
pSMB->ParameterOffset = cpu_to_le16(param_offset);
/* convert to on the wire format for POSIX ACL */
- data_count = ACL_to_cifs_posix(parm_data,local_acl,buflen,acl_type);
+ data_count = ACL_to_cifs_posix(parm_data, local_acl, buflen, acl_type);
- if(data_count == 0) {
+ if (data_count == 0) {
rc = -EOPNOTSUPP;
goto setACLerrorExit;
}
@@ -2849,7 +2946,7 @@ setAclRetry:
pSMB->hdr.smb_buf_length += byte_count;
pSMB->ByteCount = cpu_to_le16(byte_count);
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
- (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+ (struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
cFYI(1, ("Set POSIX ACL returned %d", rc));
}
@@ -2864,86 +2961,85 @@ setACLerrorExit:
/* BB fix tabs in this function FIXME BB */
int
CIFSGetExtAttr(const int xid, struct cifsTconInfo *tcon,
- const int netfid, __u64 * pExtAttrBits, __u64 *pMask)
+ const int netfid, __u64 * pExtAttrBits, __u64 *pMask)
{
- int rc = 0;
- struct smb_t2_qfi_req *pSMB = NULL;
- struct smb_t2_qfi_rsp *pSMBr = NULL;
- int bytes_returned;
- __u16 params, byte_count;
+ int rc = 0;
+ struct smb_t2_qfi_req *pSMB = NULL;
+ struct smb_t2_qfi_rsp *pSMBr = NULL;
+ int bytes_returned;
+ __u16 params, byte_count;
- cFYI(1,("In GetExtAttr"));
- if(tcon == NULL)
- return -ENODEV;
+ cFYI(1, ("In GetExtAttr"));
+ if (tcon == NULL)
+ return -ENODEV;
GetExtAttrRetry:
- rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
- (void **) &pSMBr);
- if (rc)
- return rc;
-
- params = 2 /* level */ +2 /* fid */;
- pSMB->t2.TotalDataCount = 0;
- pSMB->t2.MaxParameterCount = cpu_to_le16(4);
- /* BB find exact max data count below from sess structure BB */
- pSMB->t2.MaxDataCount = cpu_to_le16(4000);
- pSMB->t2.MaxSetupCount = 0;
- pSMB->t2.Reserved = 0;
- pSMB->t2.Flags = 0;
- pSMB->t2.Timeout = 0;
- pSMB->t2.Reserved2 = 0;
- pSMB->t2.ParameterOffset = cpu_to_le16(offsetof(struct smb_t2_qfi_req,
- Fid) - 4);
- pSMB->t2.DataCount = 0;
- pSMB->t2.DataOffset = 0;
- pSMB->t2.SetupCount = 1;
- pSMB->t2.Reserved3 = 0;
- pSMB->t2.SubCommand = cpu_to_le16(TRANS2_QUERY_FILE_INFORMATION);
- byte_count = params + 1 /* pad */ ;
- pSMB->t2.TotalParameterCount = cpu_to_le16(params);
- pSMB->t2.ParameterCount = pSMB->t2.TotalParameterCount;
- pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_ATTR_FLAGS);
- pSMB->Pad = 0;
+ rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
+ (void **) &pSMBr);
+ if (rc)
+ return rc;
+
+ params = 2 /* level */ +2 /* fid */;
+ pSMB->t2.TotalDataCount = 0;
+ pSMB->t2.MaxParameterCount = cpu_to_le16(4);
+ /* BB find exact max data count below from sess structure BB */
+ pSMB->t2.MaxDataCount = cpu_to_le16(4000);
+ pSMB->t2.MaxSetupCount = 0;
+ pSMB->t2.Reserved = 0;
+ pSMB->t2.Flags = 0;
+ pSMB->t2.Timeout = 0;
+ pSMB->t2.Reserved2 = 0;
+ pSMB->t2.ParameterOffset = cpu_to_le16(offsetof(struct smb_t2_qfi_req,
+ Fid) - 4);
+ pSMB->t2.DataCount = 0;
+ pSMB->t2.DataOffset = 0;
+ pSMB->t2.SetupCount = 1;
+ pSMB->t2.Reserved3 = 0;
+ pSMB->t2.SubCommand = cpu_to_le16(TRANS2_QUERY_FILE_INFORMATION);
+ byte_count = params + 1 /* pad */ ;
+ pSMB->t2.TotalParameterCount = cpu_to_le16(params);
+ pSMB->t2.ParameterCount = pSMB->t2.TotalParameterCount;
+ pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_ATTR_FLAGS);
+ pSMB->Pad = 0;
pSMB->Fid = netfid;
- pSMB->hdr.smb_buf_length += byte_count;
- pSMB->t2.ByteCount = cpu_to_le16(byte_count);
-
- rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
- (struct smb_hdr *) pSMBr, &bytes_returned, 0);
- if (rc) {
- cFYI(1, ("error %d in GetExtAttr", rc));
- } else {
- /* decode response */
- rc = validate_t2((struct smb_t2_rsp *)pSMBr);
- if (rc || (pSMBr->ByteCount < 2))
- /* BB also check enough total bytes returned */
- /* If rc should we check for EOPNOSUPP and
- disable the srvino flag? or in caller? */
- rc = -EIO; /* bad smb */
- else {
- __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
- __u16 count = le16_to_cpu(pSMBr->t2.DataCount);
- struct file_chattr_info * pfinfo;
- /* BB Do we need a cast or hash here ? */
- if(count != 16) {
- cFYI(1, ("Illegal size ret in GetExtAttr"));
- rc = -EIO;
- goto GetExtAttrOut;
- }
- pfinfo = (struct file_chattr_info *)
- (data_offset + (char *) &pSMBr->hdr.Protocol);
- *pExtAttrBits = le64_to_cpu(pfinfo->mode);
+ pSMB->hdr.smb_buf_length += byte_count;
+ pSMB->t2.ByteCount = cpu_to_le16(byte_count);
+
+ rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+ (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+ if (rc) {
+ cFYI(1, ("error %d in GetExtAttr", rc));
+ } else {
+ /* decode response */
+ rc = validate_t2((struct smb_t2_rsp *)pSMBr);
+ if (rc || (pSMBr->ByteCount < 2))
+ /* BB also check enough total bytes returned */
+ /* If rc should we check for EOPNOSUPP and
+ disable the srvino flag? or in caller? */
+ rc = -EIO; /* bad smb */
+ else {
+ __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
+ __u16 count = le16_to_cpu(pSMBr->t2.DataCount);
+ struct file_chattr_info *pfinfo;
+ /* BB Do we need a cast or hash here ? */
+ if (count != 16) {
+ cFYI(1, ("Illegal size ret in GetExtAttr"));
+ rc = -EIO;
+ goto GetExtAttrOut;
+ }
+ pfinfo = (struct file_chattr_info *)
+ (data_offset + (char *) &pSMBr->hdr.Protocol);
+ *pExtAttrBits = le64_to_cpu(pfinfo->mode);
*pMask = le64_to_cpu(pfinfo->mask);
- }
- }
+ }
+ }
GetExtAttrOut:
- cifs_buf_release(pSMB);
- if (rc == -EAGAIN)
- goto GetExtAttrRetry;
- return rc;
+ cifs_buf_release(pSMB);
+ if (rc == -EAGAIN)
+ goto GetExtAttrRetry;
+ return rc;
}
-
#endif /* CONFIG_POSIX */
@@ -2955,7 +3051,7 @@ static const struct cifs_sid sid_user =
{1, 2 , {0, 0, 0, 0, 0, 5}, {32, 545, 0, 0}};
/* Convert CIFS ACL to POSIX form */
-static int parse_sec_desc(struct cifs_sid * psec_desc, int acl_len)
+static int parse_sec_desc(struct cifs_sid *psec_desc, int acl_len)
{
return 0;
}
@@ -2963,7 +3059,7 @@ static int parse_sec_desc(struct cifs_sid * psec_desc, int acl_len)
/* Get Security Descriptor (by handle) from remote server for a file or dir */
int
CIFSSMBGetCIFSACL(const int xid, struct cifsTconInfo *tcon, __u16 fid,
- /* BB fix up return info */ char *acl_inf, const int buflen,
+ /* BB fix up return info */ char *acl_inf, const int buflen,
const int acl_type /* ACCESS/DEFAULT not sure implication */)
{
int rc = 0;
@@ -2973,7 +3069,7 @@ CIFSSMBGetCIFSACL(const int xid, struct cifsTconInfo *tcon, __u16 fid,
cFYI(1, ("GetCifsACL"));
- rc = smb_init_ntransact(NT_TRANSACT_QUERY_SECURITY_DESC, 0,
+ rc = smb_init_ntransact(NT_TRANSACT_QUERY_SECURITY_DESC, 0,
8 /* parm len */, tcon, (void **) &pSMB);
if (rc)
return rc;
@@ -2994,23 +3090,23 @@ CIFSSMBGetCIFSACL(const int xid, struct cifsTconInfo *tcon, __u16 fid,
if (rc) {
cFYI(1, ("Send error in QuerySecDesc = %d", rc));
} else { /* decode response */
- struct cifs_sid * psec_desc;
+ struct cifs_sid *psec_desc;
__le32 * parm;
int parm_len;
int data_len;
int acl_len;
- struct smb_com_ntransact_rsp * pSMBr;
+ struct smb_com_ntransact_rsp *pSMBr;
/* validate_nttransact */
- rc = validate_ntransact(iov[0].iov_base, (char **)&parm,
+ rc = validate_ntransact(iov[0].iov_base, (char **)&parm,
(char **)&psec_desc,
&parm_len, &data_len);
-
- if(rc)
+ if (rc)
goto qsec_out;
pSMBr = (struct smb_com_ntransact_rsp *)iov[0].iov_base;
- cERROR(1,("smb %p parm %p data %p",pSMBr,parm,psec_desc)); /* BB removeme BB */
+ cERROR(1, ("smb %p parm %p data %p",
+ pSMBr, parm, psec_desc)); /* BB removeme BB */
if (le32_to_cpu(pSMBr->ParameterCount) != 4) {
rc = -EIO; /* bad smb */
@@ -3020,14 +3116,14 @@ CIFSSMBGetCIFSACL(const int xid, struct cifsTconInfo *tcon, __u16 fid,
/* BB check that data area is minimum length and as big as acl_len */
acl_len = le32_to_cpu(*(__le32 *)parm);
- /* BB check if(acl_len > bufsize) */
+ /* BB check if (acl_len > bufsize) */
parse_sec_desc(psec_desc, acl_len);
}
qsec_out:
- if(buf_type == CIFS_SMALL_BUFFER)
+ if (buf_type == CIFS_SMALL_BUFFER)
cifs_small_buf_release(iov[0].iov_base);
- else if(buf_type == CIFS_LARGE_BUFFER)
+ else if (buf_type == CIFS_LARGE_BUFFER)
cifs_buf_release(iov[0].iov_base);
/* cifs_small_buf_release(pSMB); */ /* Freed earlier now in SendReceive2 */
return rc;
@@ -3036,9 +3132,9 @@ qsec_out:
/* Legacy Query Path Information call for lookup to old servers such
as Win9x/WinME */
int SMBQueryInformation(const int xid, struct cifsTconInfo *tcon,
- const unsigned char *searchName,
- FILE_ALL_INFO * pFinfo,
- const struct nls_table *nls_codepage, int remap)
+ const unsigned char *searchName,
+ FILE_ALL_INFO *pFinfo,
+ const struct nls_table *nls_codepage, int remap)
{
QUERY_INFORMATION_REQ * pSMB;
QUERY_INFORMATION_RSP * pSMBr;
@@ -3046,31 +3142,31 @@ int SMBQueryInformation(const int xid, struct cifsTconInfo *tcon,
int bytes_returned;
int name_len;
- cFYI(1, ("In SMBQPath path %s", searchName));
+ cFYI(1, ("In SMBQPath path %s", searchName));
QInfRetry:
rc = smb_init(SMB_COM_QUERY_INFORMATION, 0, tcon, (void **) &pSMB,
- (void **) &pSMBr);
+ (void **) &pSMBr);
if (rc)
return rc;
if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
name_len =
- cifsConvertToUCS((__le16 *) pSMB->FileName, searchName,
- PATH_MAX, nls_codepage, remap);
+ cifsConvertToUCS((__le16 *) pSMB->FileName, searchName,
+ PATH_MAX, nls_codepage, remap);
name_len++; /* trailing null */
name_len *= 2;
- } else {
+ } else {
name_len = strnlen(searchName, PATH_MAX);
name_len++; /* trailing null */
strncpy(pSMB->FileName, searchName, name_len);
}
pSMB->BufferFormat = 0x04;
- name_len++; /* account for buffer type byte */
+ name_len++; /* account for buffer type byte */
pSMB->hdr.smb_buf_length += (__u16) name_len;
pSMB->ByteCount = cpu_to_le16(name_len);
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
- (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+ (struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
cFYI(1, ("Send error in QueryInfo = %d", rc));
} else if (pFinfo) { /* decode response */
@@ -3127,17 +3223,17 @@ QPathInfoRetry:
if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
name_len =
- cifsConvertToUCS((__le16 *) pSMB->FileName, searchName,
+ cifsConvertToUCS((__le16 *) pSMB->FileName, searchName,
PATH_MAX, nls_codepage, remap);
name_len++; /* trailing null */
name_len *= 2;
- } else { /* BB improve the check for buffer overruns BB */
+ } else { /* BB improve the check for buffer overruns BB */
name_len = strnlen(searchName, PATH_MAX);
name_len++; /* trailing null */
strncpy(pSMB->FileName, searchName, name_len);
}
- params = 2 /* level */ + 4 /* reserved */ + name_len /* includes NUL */ ;
+ params = 2 /* level */ + 4 /* reserved */ + name_len /* includes NUL */;
pSMB->TotalDataCount = 0;
pSMB->MaxParameterCount = cpu_to_le16(2);
pSMB->MaxDataCount = cpu_to_le16(4000); /* BB find exact max SMB PDU from sess structure BB */
@@ -3147,7 +3243,7 @@ QPathInfoRetry:
pSMB->Timeout = 0;
pSMB->Reserved2 = 0;
pSMB->ParameterOffset = cpu_to_le16(offsetof(
- struct smb_com_transaction2_qpi_req ,InformationLevel) - 4);
+ struct smb_com_transaction2_qpi_req, InformationLevel) - 4);
pSMB->DataCount = 0;
pSMB->DataOffset = 0;
pSMB->SetupCount = 1;
@@ -3156,7 +3252,7 @@ QPathInfoRetry:
byte_count = params + 1 /* pad */ ;
pSMB->TotalParameterCount = cpu_to_le16(params);
pSMB->ParameterCount = pSMB->TotalParameterCount;
- if(legacy)
+ if (legacy)
pSMB->InformationLevel = cpu_to_le16(SMB_INFO_STANDARD);
else
pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_FILE_ALL_INFO);
@@ -3173,16 +3269,18 @@ QPathInfoRetry:
if (rc) /* BB add auto retry on EOPNOTSUPP? */
rc = -EIO;
- else if (!legacy && (pSMBr->ByteCount < 40))
+ else if (!legacy && (pSMBr->ByteCount < 40))
rc = -EIO; /* bad smb */
- else if(legacy && (pSMBr->ByteCount < 24))
- rc = -EIO; /* 24 or 26 expected but we do not read last field */
- else if (pFindData){
+ else if (legacy && (pSMBr->ByteCount < 24))
+ rc = -EIO; /* 24 or 26 expected but we do not read
+ last field */
+ else if (pFindData) {
int size;
__u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
- if(legacy) /* we do not read the last field, EAsize, fortunately
- since it varies by subdialect and on Set vs. Get, is
- two bytes or 4 bytes depending but we don't care here */
+ if (legacy) /* we do not read the last field, EAsize,
+ fortunately since it varies by subdialect
+ and on Set vs. Get, is two bytes or 4
+ bytes depending but we don't care here */
size = sizeof(FILE_INFO_STANDARD);
else
size = sizeof(FILE_ALL_INFO);
@@ -3226,24 +3324,24 @@ UnixQPathInfoRetry:
PATH_MAX, nls_codepage, remap);
name_len++; /* trailing null */
name_len *= 2;
- } else { /* BB improve the check for buffer overruns BB */
+ } else { /* BB improve the check for buffer overruns BB */
name_len = strnlen(searchName, PATH_MAX);
name_len++; /* trailing null */
strncpy(pSMB->FileName, searchName, name_len);
}
- params = 2 /* level */ + 4 /* reserved */ + name_len /* includes NUL */ ;
+ params = 2 /* level */ + 4 /* reserved */ + name_len /* includes NUL */;
pSMB->TotalDataCount = 0;
pSMB->MaxParameterCount = cpu_to_le16(2);
/* BB find exact max SMB PDU from sess structure BB */
- pSMB->MaxDataCount = cpu_to_le16(4000);
+ pSMB->MaxDataCount = cpu_to_le16(4000);
pSMB->MaxSetupCount = 0;
pSMB->Reserved = 0;
pSMB->Flags = 0;
pSMB->Timeout = 0;
pSMB->Reserved2 = 0;
pSMB->ParameterOffset = cpu_to_le16(offsetof(
- struct smb_com_transaction2_qpi_req ,InformationLevel) - 4);
+ struct smb_com_transaction2_qpi_req, InformationLevel) - 4);
pSMB->DataCount = 0;
pSMB->DataOffset = 0;
pSMB->SetupCount = 1;
@@ -3303,12 +3401,11 @@ findUniqueRetry:
if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
name_len =
- cifsConvertToUCS((__le16 *) pSMB->FileName, searchName, PATH_MAX
- /* find define for this maxpathcomponent */
- , nls_codepage);
+ cifsConvertToUCS((__le16 *) pSMB->FileName, searchName,
+ PATH_MAX, nls_codepage);
name_len++; /* trailing null */
name_len *= 2;
- } else { /* BB improve the check for buffer overruns BB */
+ } else { /* BB improve the check for buffer overruns BB */
name_len = strnlen(searchName, PATH_MAX);
name_len++; /* trailing null */
strncpy(pSMB->FileName, searchName, name_len);
@@ -3324,7 +3421,7 @@ findUniqueRetry:
pSMB->Timeout = 0;
pSMB->Reserved2 = 0;
pSMB->ParameterOffset = cpu_to_le16(
- offsetof(struct smb_com_transaction2_ffirst_req,InformationLevel) - 4);
+ offsetof(struct smb_com_transaction2_ffirst_req, InformationLevel)-4);
pSMB->DataCount = 0;
pSMB->DataOffset = 0;
pSMB->SetupCount = 1; /* one byte, no need to le convert */
@@ -3364,10 +3461,10 @@ findUniqueRetry:
/* xid, tcon, searchName and codepage are input parms, rest are returned */
int
CIFSFindFirst(const int xid, struct cifsTconInfo *tcon,
- const char *searchName,
+ const char *searchName,
const struct nls_table *nls_codepage,
- __u16 * pnetfid,
- struct cifs_search_info * psrch_inf, int remap, const char dirsep)
+ __u16 *pnetfid,
+ struct cifs_search_info *psrch_inf, int remap, const char dirsep)
{
/* level 257 SMB_ */
TRANSACTION2_FFIRST_REQ *pSMB = NULL;
@@ -3378,7 +3475,7 @@ CIFSFindFirst(const int xid, struct cifsTconInfo *tcon,
int name_len;
__u16 params, byte_count;
- cFYI(1, ("In FindFirst for %s",searchName));
+ cFYI(1, ("In FindFirst for %s", searchName));
findFirstRetry:
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
@@ -3388,7 +3485,7 @@ findFirstRetry:
if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
name_len =
- cifsConvertToUCS((__le16 *) pSMB->FileName,searchName,
+ cifsConvertToUCS((__le16 *) pSMB->FileName, searchName,
PATH_MAX, nls_codepage, remap);
/* We can not add the asterik earlier in case
it got remapped to 0xF03A as if it were part of the
@@ -3405,7 +3502,7 @@ findFirstRetry:
} else { /* BB add check for overrun of SMB buf BB */
name_len = strnlen(searchName, PATH_MAX);
/* BB fix here and in unicode clause above ie
- if(name_len > buffersize-header)
+ if (name_len > buffersize-header)
free buffer exit; BB */
strncpy(pSMB->FileName, searchName, name_len);
pSMB->FileName[name_len] = dirsep;
@@ -3438,8 +3535,8 @@ findFirstRetry:
pSMB->SearchAttributes =
cpu_to_le16(ATTR_READONLY | ATTR_HIDDEN | ATTR_SYSTEM |
ATTR_DIRECTORY);
- pSMB->SearchCount= cpu_to_le16(CIFSMaxBufSize/sizeof(FILE_UNIX_INFO));
- pSMB->SearchFlags = cpu_to_le16(CIFS_SEARCH_CLOSE_AT_END |
+ pSMB->SearchCount = cpu_to_le16(CIFSMaxBufSize/sizeof(FILE_UNIX_INFO));
+ pSMB->SearchFlags = cpu_to_le16(CIFS_SEARCH_CLOSE_AT_END |
CIFS_SEARCH_RETURN_RESUME);
pSMB->InformationLevel = cpu_to_le16(psrch_inf->info_level);
@@ -3466,7 +3563,7 @@ findFirstRetry:
} else { /* decode response */
/* BB remember to free buffer if error BB */
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
- if(rc == 0) {
+ if (rc == 0) {
if (pSMBr->hdr.Flags2 & SMBFLG2_UNICODE)
psrch_inf->unicode = TRUE;
else
@@ -3474,18 +3571,19 @@ findFirstRetry:
psrch_inf->ntwrk_buf_start = (char *)pSMBr;
psrch_inf->smallBuf = 0;
- psrch_inf->srch_entries_start =
- (char *) &pSMBr->hdr.Protocol +
+ psrch_inf->srch_entries_start =
+ (char *) &pSMBr->hdr.Protocol +
le16_to_cpu(pSMBr->t2.DataOffset);
parms = (T2_FFIRST_RSP_PARMS *)((char *) &pSMBr->hdr.Protocol +
le16_to_cpu(pSMBr->t2.ParameterOffset));
- if(parms->EndofSearch)
+ if (parms->EndofSearch)
psrch_inf->endOfSearch = TRUE;
else
psrch_inf->endOfSearch = FALSE;
- psrch_inf->entries_in_buffer = le16_to_cpu(parms->SearchCount);
+ psrch_inf->entries_in_buffer =
+ le16_to_cpu(parms->SearchCount);
psrch_inf->index_of_last_entry = 2 /* skip . and .. */ +
psrch_inf->entries_in_buffer;
*pnetfid = parms->SearchHandle;
@@ -3498,7 +3596,7 @@ findFirstRetry:
}
int CIFSFindNext(const int xid, struct cifsTconInfo *tcon,
- __u16 searchHandle, struct cifs_search_info * psrch_inf)
+ __u16 searchHandle, struct cifs_search_info *psrch_inf)
{
TRANSACTION2_FNEXT_REQ *pSMB = NULL;
TRANSACTION2_FNEXT_RSP *pSMBr = NULL;
@@ -3510,7 +3608,7 @@ int CIFSFindNext(const int xid, struct cifsTconInfo *tcon,
cFYI(1, ("In FindNext"));
- if(psrch_inf->endOfSearch == TRUE)
+ if (psrch_inf->endOfSearch == TRUE)
return -ENOENT;
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
@@ -3518,12 +3616,13 @@ int CIFSFindNext(const int xid, struct cifsTconInfo *tcon,
if (rc)
return rc;
- params = 14; /* includes 2 bytes of null string, converted to LE below */
+ params = 14; /* includes 2 bytes of null string, converted to LE below*/
byte_count = 0;
pSMB->TotalDataCount = 0; /* no EAs */
pSMB->MaxParameterCount = cpu_to_le16(8);
pSMB->MaxDataCount =
- cpu_to_le16((tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE) & 0xFFFFFF00);
+ cpu_to_le16((tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE) &
+ 0xFFFFFF00);
pSMB->MaxSetupCount = 0;
pSMB->Reserved = 0;
pSMB->Flags = 0;
@@ -3539,15 +3638,6 @@ int CIFSFindNext(const int xid, struct cifsTconInfo *tcon,
pSMB->SearchHandle = searchHandle; /* always kept as le */
pSMB->SearchCount =
cpu_to_le16(CIFSMaxBufSize / sizeof (FILE_UNIX_INFO));
- /* test for Unix extensions */
-/* if (tcon->ses->capabilities & CAP_UNIX) {
- pSMB->InformationLevel = cpu_to_le16(SMB_FIND_FILE_UNIX);
- psrch_inf->info_level = SMB_FIND_FILE_UNIX;
- } else {
- pSMB->InformationLevel =
- cpu_to_le16(SMB_FIND_FILE_DIRECTORY_INFO);
- psrch_inf->info_level = SMB_FIND_FILE_DIRECTORY_INFO;
- } */
pSMB->InformationLevel = cpu_to_le16(psrch_inf->info_level);
pSMB->ResumeKey = psrch_inf->resume_key;
pSMB->SearchFlags =
@@ -3555,7 +3645,7 @@ int CIFSFindNext(const int xid, struct cifsTconInfo *tcon,
name_len = psrch_inf->resume_name_len;
params += name_len;
- if(name_len < PATH_MAX) {
+ if (name_len < PATH_MAX) {
memcpy(pSMB->ResumeFileName, psrch_inf->presume_name, name_len);
byte_count += name_len;
/* 14 byte parm len above enough for 2 byte null terminator */
@@ -3570,20 +3660,20 @@ int CIFSFindNext(const int xid, struct cifsTconInfo *tcon,
pSMB->ParameterCount = pSMB->TotalParameterCount;
pSMB->hdr.smb_buf_length += byte_count;
pSMB->ByteCount = cpu_to_le16(byte_count);
-
+
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
cifs_stats_inc(&tcon->num_fnext);
if (rc) {
if (rc == -EBADF) {
psrch_inf->endOfSearch = TRUE;
- rc = 0; /* search probably was closed at end of search above */
+ rc = 0; /* search probably was closed at end of search*/
} else
cFYI(1, ("FindNext returned = %d", rc));
} else { /* decode response */
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
-
- if(rc == 0) {
+
+ if (rc == 0) {
/* BB fixme add lock for file (srch_info) struct here */
if (pSMBr->hdr.Flags2 & SMBFLG2_UNICODE)
psrch_inf->unicode = TRUE;
@@ -3594,7 +3684,7 @@ int CIFSFindNext(const int xid, struct cifsTconInfo *tcon,
parms = (T2_FNEXT_RSP_PARMS *)response_data;
response_data = (char *)&pSMBr->hdr.Protocol +
le16_to_cpu(pSMBr->t2.DataOffset);
- if(psrch_inf->smallBuf)
+ if (psrch_inf->smallBuf)
cifs_small_buf_release(
psrch_inf->ntwrk_buf_start);
else
@@ -3602,15 +3692,16 @@ int CIFSFindNext(const int xid, struct cifsTconInfo *tcon,
psrch_inf->srch_entries_start = response_data;
psrch_inf->ntwrk_buf_start = (char *)pSMB;
psrch_inf->smallBuf = 0;
- if(parms->EndofSearch)
+ if (parms->EndofSearch)
psrch_inf->endOfSearch = TRUE;
else
psrch_inf->endOfSearch = FALSE;
-
- psrch_inf->entries_in_buffer = le16_to_cpu(parms->SearchCount);
+ psrch_inf->entries_in_buffer =
+ le16_to_cpu(parms->SearchCount);
psrch_inf->index_of_last_entry +=
psrch_inf->entries_in_buffer;
-/* cFYI(1,("fnxt2 entries in buf %d index_of_last %d",psrch_inf->entries_in_buffer,psrch_inf->index_of_last_entry)); */
+/* cFYI(1,("fnxt2 entries in buf %d index_of_last %d",
+ psrch_inf->entries_in_buffer, psrch_inf->index_of_last_entry)); */
/* BB fixme add unlock here */
}
@@ -3625,12 +3716,12 @@ int CIFSFindNext(const int xid, struct cifsTconInfo *tcon,
FNext2_err_exit:
if (rc != 0)
cifs_buf_release(pSMB);
-
return rc;
}
int
-CIFSFindClose(const int xid, struct cifsTconInfo *tcon, const __u16 searchHandle)
+CIFSFindClose(const int xid, struct cifsTconInfo *tcon,
+ const __u16 searchHandle)
{
int rc = 0;
FINDCLOSE_REQ *pSMB = NULL;
@@ -3642,7 +3733,7 @@ CIFSFindClose(const int xid, struct cifsTconInfo *tcon, const __u16 searchHandle
/* no sense returning error if session restarted
as file handle has been closed */
- if(rc == -EAGAIN)
+ if (rc == -EAGAIN)
return 0;
if (rc)
return rc;
@@ -3667,9 +3758,9 @@ CIFSFindClose(const int xid, struct cifsTconInfo *tcon, const __u16 searchHandle
int
CIFSGetSrvInodeNumber(const int xid, struct cifsTconInfo *tcon,
- const unsigned char *searchName,
- __u64 * inode_number,
- const struct nls_table *nls_codepage, int remap)
+ const unsigned char *searchName,
+ __u64 * inode_number,
+ const struct nls_table *nls_codepage, int remap)
{
int rc = 0;
TRANSACTION2_QPI_REQ *pSMB = NULL;
@@ -3677,24 +3768,23 @@ CIFSGetSrvInodeNumber(const int xid, struct cifsTconInfo *tcon,
int name_len, bytes_returned;
__u16 params, byte_count;
- cFYI(1,("In GetSrvInodeNum for %s",searchName));
- if(tcon == NULL)
- return -ENODEV;
+ cFYI(1, ("In GetSrvInodeNum for %s", searchName));
+ if (tcon == NULL)
+ return -ENODEV;
GetInodeNumberRetry:
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
- (void **) &pSMBr);
+ (void **) &pSMBr);
if (rc)
return rc;
-
if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
name_len =
cifsConvertToUCS((__le16 *) pSMB->FileName, searchName,
- PATH_MAX,nls_codepage, remap);
+ PATH_MAX, nls_codepage, remap);
name_len++; /* trailing null */
name_len *= 2;
- } else { /* BB improve the check for buffer overruns BB */
+ } else { /* BB improve the check for buffer overruns BB */
name_len = strnlen(searchName, PATH_MAX);
name_len++; /* trailing null */
strncpy(pSMB->FileName, searchName, name_len);
@@ -3711,7 +3801,7 @@ GetInodeNumberRetry:
pSMB->Timeout = 0;
pSMB->Reserved2 = 0;
pSMB->ParameterOffset = cpu_to_le16(offsetof(
- struct smb_com_transaction2_qpi_req ,InformationLevel) - 4);
+ struct smb_com_transaction2_qpi_req, InformationLevel) - 4);
pSMB->DataCount = 0;
pSMB->DataOffset = 0;
pSMB->SetupCount = 1;
@@ -3737,12 +3827,12 @@ GetInodeNumberRetry:
/* If rc should we check for EOPNOSUPP and
disable the srvino flag? or in caller? */
rc = -EIO; /* bad smb */
- else {
+ else {
__u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
__u16 count = le16_to_cpu(pSMBr->t2.DataCount);
- struct file_internal_info * pfinfo;
+ struct file_internal_info *pfinfo;
/* BB Do we need a cast or hash here ? */
- if(count < 8) {
+ if (count < 8) {
cFYI(1, ("Illegal size ret in QryIntrnlInf"));
rc = -EIO;
goto GetInodeNumOut;
@@ -3769,12 +3859,12 @@ CIFSGetDFSRefer(const int xid, struct cifsSesInfo *ses,
/* TRANS2_GET_DFS_REFERRAL */
TRANSACTION2_GET_DFS_REFER_REQ *pSMB = NULL;
TRANSACTION2_GET_DFS_REFER_RSP *pSMBr = NULL;
- struct dfs_referral_level_3 * referrals = NULL;
+ struct dfs_referral_level_3 *referrals = NULL;
int rc = 0;
int bytes_returned;
int name_len;
unsigned int i;
- char * temp;
+ char *temp;
__u16 params, byte_count;
*number_of_UNC_in_array = 0;
*targetUNCs = NULL;
@@ -3787,8 +3877,8 @@ getDFSRetry:
(void **) &pSMBr);
if (rc)
return rc;
-
- /* server pointer checked in called function,
+
+ /* server pointer checked in called function,
but should never be null here anyway */
pSMB->hdr.Mid = GetNextMid(ses->server);
pSMB->hdr.Tid = ses->ipc_tid;
@@ -3807,19 +3897,19 @@ getDFSRetry:
searchName, PATH_MAX, nls_codepage, remap);
name_len++; /* trailing null */
name_len *= 2;
- } else { /* BB improve the check for buffer overruns BB */
+ } else { /* BB improve the check for buffer overruns BB */
name_len = strnlen(searchName, PATH_MAX);
name_len++; /* trailing null */
strncpy(pSMB->RequestFileName, searchName, name_len);
}
- if(ses->server) {
- if(ses->server->secMode &
+ if (ses->server) {
+ if (ses->server->secMode &
(SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
pSMB->hdr.Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
}
- pSMB->hdr.Uid = ses->Suid;
+ pSMB->hdr.Uid = ses->Suid;
params = 2 /* level */ + name_len /*includes null */ ;
pSMB->TotalDataCount = 0;
@@ -3833,7 +3923,7 @@ getDFSRetry:
pSMB->Timeout = 0;
pSMB->Reserved2 = 0;
pSMB->ParameterOffset = cpu_to_le16(offsetof(
- struct smb_com_transaction2_get_dfs_refer_req, MaxReferralLevel) - 4);
+ struct smb_com_transaction2_get_dfs_refer_req, MaxReferralLevel) - 4);
pSMB->SetupCount = 1;
pSMB->Reserved3 = 0;
pSMB->SubCommand = cpu_to_le16(TRANS2_GET_DFS_REFERRAL);
@@ -3852,74 +3942,87 @@ getDFSRetry:
/* BB Add logic to parse referrals here */
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
- if (rc || (pSMBr->ByteCount < 17)) /* BB also check enough total bytes returned */
+ /* BB Also check if enough total bytes returned? */
+ if (rc || (pSMBr->ByteCount < 17))
rc = -EIO; /* bad smb */
else {
- __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
+ __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
__u16 data_count = le16_to_cpu(pSMBr->t2.DataCount);
cFYI(1,
- ("Decoding GetDFSRefer response. BCC: %d Offset %d",
+ ("Decoding GetDFSRefer response BCC: %d Offset %d",
pSMBr->ByteCount, data_offset));
- referrals =
- (struct dfs_referral_level_3 *)
+ referrals =
+ (struct dfs_referral_level_3 *)
(8 /* sizeof start of data block */ +
data_offset +
- (char *) &pSMBr->hdr.Protocol);
- cFYI(1,("num_referrals: %d dfs flags: 0x%x ... \nfor referral one refer size: 0x%x srv type: 0x%x refer flags: 0x%x ttl: 0x%x",
- le16_to_cpu(pSMBr->NumberOfReferrals),le16_to_cpu(pSMBr->DFSFlags), le16_to_cpu(referrals->ReferralSize),le16_to_cpu(referrals->ServerType),le16_to_cpu(referrals->ReferralFlags),le16_to_cpu(referrals->TimeToLive)));
+ (char *) &pSMBr->hdr.Protocol);
+ cFYI(1, ("num_referrals: %d dfs flags: 0x%x ... \n"
+ "for referral one refer size: 0x%x srv "
+ "type: 0x%x refer flags: 0x%x ttl: 0x%x",
+ le16_to_cpu(pSMBr->NumberOfReferrals),
+ le16_to_cpu(pSMBr->DFSFlags),
+ le16_to_cpu(referrals->ReferralSize),
+ le16_to_cpu(referrals->ServerType),
+ le16_to_cpu(referrals->ReferralFlags),
+ le16_to_cpu(referrals->TimeToLive)));
/* BB This field is actually two bytes in from start of
data block so we could do safety check that DataBlock
begins at address of pSMBr->NumberOfReferrals */
- *number_of_UNC_in_array = le16_to_cpu(pSMBr->NumberOfReferrals);
+ *number_of_UNC_in_array =
+ le16_to_cpu(pSMBr->NumberOfReferrals);
/* BB Fix below so can return more than one referral */
- if(*number_of_UNC_in_array > 1)
+ if (*number_of_UNC_in_array > 1)
*number_of_UNC_in_array = 1;
/* get the length of the strings describing refs */
name_len = 0;
- for(i=0;i<*number_of_UNC_in_array;i++) {
+ for (i = 0; i < *number_of_UNC_in_array; i++) {
/* make sure that DfsPathOffset not past end */
- __u16 offset = le16_to_cpu(referrals->DfsPathOffset);
+ __u16 offset =
+ le16_to_cpu(referrals->DfsPathOffset);
if (offset > data_count) {
- /* if invalid referral, stop here and do
+ /* if invalid referral, stop here and do
not try to copy any more */
*number_of_UNC_in_array = i;
break;
- }
+ }
temp = ((char *)referrals) + offset;
if (pSMBr->hdr.Flags2 & SMBFLG2_UNICODE) {
- name_len += UniStrnlen((wchar_t *)temp,data_count);
+ name_len += UniStrnlen((wchar_t *)temp,
+ data_count);
} else {
- name_len += strnlen(temp,data_count);
+ name_len += strnlen(temp, data_count);
}
referrals++;
- /* BB add check that referral pointer does not fall off end PDU */
-
+ /* BB add check that referral pointer does
+ not fall off end PDU */
}
/* BB add check for name_len bigger than bcc */
- *targetUNCs =
- kmalloc(name_len+1+ (*number_of_UNC_in_array),GFP_KERNEL);
- if(*targetUNCs == NULL) {
+ *targetUNCs =
+ kmalloc(name_len+1+(*number_of_UNC_in_array),
+ GFP_KERNEL);
+ if (*targetUNCs == NULL) {
rc = -ENOMEM;
goto GetDFSRefExit;
}
/* copy the ref strings */
- referrals =
- (struct dfs_referral_level_3 *)
- (8 /* sizeof data hdr */ +
- data_offset +
+ referrals = (struct dfs_referral_level_3 *)
+ (8 /* sizeof data hdr */ + data_offset +
(char *) &pSMBr->hdr.Protocol);
- for(i=0;i<*number_of_UNC_in_array;i++) {
- temp = ((char *)referrals) + le16_to_cpu(referrals->DfsPathOffset);
+ for (i = 0; i < *number_of_UNC_in_array; i++) {
+ temp = ((char *)referrals) +
+ le16_to_cpu(referrals->DfsPathOffset);
if (pSMBr->hdr.Flags2 & SMBFLG2_UNICODE) {
cifs_strfromUCS_le(*targetUNCs,
- (__le16 *) temp, name_len, nls_codepage);
+ (__le16 *) temp,
+ name_len,
+ nls_codepage);
} else {
- strncpy(*targetUNCs,temp,name_len);
+ strncpy(*targetUNCs, temp, name_len);
}
/* BB update target_uncs pointers */
referrals++;
@@ -3996,18 +4099,17 @@ oldQFSInfoRetry:
rc = -EIO; /* bad smb */
else {
__u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
- cFYI(1,("qfsinf resp BCC: %d Offset %d",
+ cFYI(1, ("qfsinf resp BCC: %d Offset %d",
pSMBr->ByteCount, data_offset));
- response_data =
- (FILE_SYSTEM_ALLOC_INFO *)
+ response_data = (FILE_SYSTEM_ALLOC_INFO *)
(((char *) &pSMBr->hdr.Protocol) + data_offset);
FSData->f_bsize =
le16_to_cpu(response_data->BytesPerSector) *
le32_to_cpu(response_data->
SectorsPerAllocationUnit);
FSData->f_blocks =
- le32_to_cpu(response_data->TotalAllocationUnits);
+ le32_to_cpu(response_data->TotalAllocationUnits);
FSData->f_bfree = FSData->f_bavail =
le32_to_cpu(response_data->FreeAllocationUnits);
cFYI(1,
@@ -4056,7 +4158,7 @@ QFSInfoRetry:
pSMB->TotalParameterCount = cpu_to_le16(params);
pSMB->ParameterCount = pSMB->TotalParameterCount;
pSMB->ParameterOffset = cpu_to_le16(offsetof(
- struct smb_com_transaction2_qfsi_req, InformationLevel) - 4);
+ struct smb_com_transaction2_qfsi_req, InformationLevel) - 4);
pSMB->DataCount = 0;
pSMB->DataOffset = 0;
pSMB->SetupCount = 1;
@@ -4071,7 +4173,7 @@ QFSInfoRetry:
if (rc) {
cFYI(1, ("Send error in QFSInfo = %d", rc));
} else { /* decode response */
- rc = validate_t2((struct smb_t2_rsp *)pSMBr);
+ rc = validate_t2((struct smb_t2_rsp *)pSMBr);
if (rc || (pSMBr->ByteCount < 24))
rc = -EIO; /* bad smb */
@@ -4136,7 +4238,7 @@ QFSAttributeRetry:
pSMB->TotalParameterCount = cpu_to_le16(params);
pSMB->ParameterCount = pSMB->TotalParameterCount;
pSMB->ParameterOffset = cpu_to_le16(offsetof(
- struct smb_com_transaction2_qfsi_req, InformationLevel) - 4);
+ struct smb_com_transaction2_qfsi_req, InformationLevel) - 4);
pSMB->DataCount = 0;
pSMB->DataOffset = 0;
pSMB->SetupCount = 1;
@@ -4153,7 +4255,8 @@ QFSAttributeRetry:
} else { /* decode response */
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
- if (rc || (pSMBr->ByteCount < 13)) { /* BB also check enough bytes returned */
+ if (rc || (pSMBr->ByteCount < 13)) {
+ /* BB also check if enough bytes returned */
rc = -EIO; /* bad smb */
} else {
__u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
@@ -4204,7 +4307,7 @@ QFSDeviceRetry:
pSMB->TotalParameterCount = cpu_to_le16(params);
pSMB->ParameterCount = pSMB->TotalParameterCount;
pSMB->ParameterOffset = cpu_to_le16(offsetof(
- struct smb_com_transaction2_qfsi_req, InformationLevel) - 4);
+ struct smb_com_transaction2_qfsi_req, InformationLevel) - 4);
pSMB->DataCount = 0;
pSMB->DataOffset = 0;
@@ -4274,8 +4377,8 @@ QFSUnixRetry:
byte_count = params + 1 /* pad */ ;
pSMB->ParameterCount = cpu_to_le16(params);
pSMB->TotalParameterCount = pSMB->ParameterCount;
- pSMB->ParameterOffset = cpu_to_le16(offsetof(struct
- smb_com_transaction2_qfsi_req, InformationLevel) - 4);
+ pSMB->ParameterOffset = cpu_to_le16(offsetof(struct
+ smb_com_transaction2_qfsi_req, InformationLevel) - 4);
pSMB->SetupCount = 1;
pSMB->Reserved3 = 0;
pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_FS_INFORMATION);
@@ -4335,7 +4438,8 @@ SETFSUnixRetry:
pSMB->Flags = 0;
pSMB->Timeout = 0;
pSMB->Reserved2 = 0;
- param_offset = offsetof(struct smb_com_transaction2_setfsi_req, FileNum) - 4;
+ param_offset = offsetof(struct smb_com_transaction2_setfsi_req, FileNum)
+ - 4;
offset = param_offset + params;
pSMB->MaxParameterCount = cpu_to_le16(4);
@@ -4417,8 +4521,8 @@ QFSPosixRetry:
byte_count = params + 1 /* pad */ ;
pSMB->ParameterCount = cpu_to_le16(params);
pSMB->TotalParameterCount = pSMB->ParameterCount;
- pSMB->ParameterOffset = cpu_to_le16(offsetof(struct
- smb_com_transaction2_qfsi_req, InformationLevel) - 4);
+ pSMB->ParameterOffset = cpu_to_le16(offsetof(struct
+ smb_com_transaction2_qfsi_req, InformationLevel) - 4);
pSMB->SetupCount = 1;
pSMB->Reserved3 = 0;
pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_FS_INFORMATION);
@@ -4447,18 +4551,18 @@ QFSPosixRetry:
le64_to_cpu(response_data->TotalBlocks);
FSData->f_bfree =
le64_to_cpu(response_data->BlocksAvail);
- if(response_data->UserBlocksAvail == cpu_to_le64(-1)) {
+ if (response_data->UserBlocksAvail == cpu_to_le64(-1)) {
FSData->f_bavail = FSData->f_bfree;
} else {
FSData->f_bavail =
- le64_to_cpu(response_data->UserBlocksAvail);
+ le64_to_cpu(response_data->UserBlocksAvail);
}
- if(response_data->TotalFileNodes != cpu_to_le64(-1))
+ if (response_data->TotalFileNodes != cpu_to_le64(-1))
FSData->f_files =
- le64_to_cpu(response_data->TotalFileNodes);
- if(response_data->FreeFileNodes != cpu_to_le64(-1))
+ le64_to_cpu(response_data->TotalFileNodes);
+ if (response_data->FreeFileNodes != cpu_to_le64(-1))
FSData->f_ffree =
- le64_to_cpu(response_data->FreeFileNodes);
+ le64_to_cpu(response_data->FreeFileNodes);
}
}
cifs_buf_release(pSMB);
@@ -4470,15 +4574,15 @@ QFSPosixRetry:
}
-/* We can not use write of zero bytes trick to
- set file size due to need for large file support. Also note that
- this SetPathInfo is preferred to SetFileInfo based method in next
+/* We can not use write of zero bytes trick to
+ set file size due to need for large file support. Also note that
+ this SetPathInfo is preferred to SetFileInfo based method in next
routine which is only needed to work around a sharing violation bug
in Samba which this routine can run into */
int
CIFSSMBSetEOF(const int xid, struct cifsTconInfo *tcon, const char *fileName,
- __u64 size, int SetAllocation,
+ __u64 size, int SetAllocation,
const struct nls_table *nls_codepage, int remap)
{
struct smb_com_transaction2_spi_req *pSMB = NULL;
@@ -4517,22 +4621,22 @@ SetEOFRetry:
pSMB->Timeout = 0;
pSMB->Reserved2 = 0;
param_offset = offsetof(struct smb_com_transaction2_spi_req,
- InformationLevel) - 4;
+ InformationLevel) - 4;
offset = param_offset + params;
- if(SetAllocation) {
- if (tcon->ses->capabilities & CAP_INFOLEVEL_PASSTHRU)
- pSMB->InformationLevel =
- cpu_to_le16(SMB_SET_FILE_ALLOCATION_INFO2);
- else
- pSMB->InformationLevel =
- cpu_to_le16(SMB_SET_FILE_ALLOCATION_INFO);
- } else /* Set File Size */ {
+ if (SetAllocation) {
+ if (tcon->ses->capabilities & CAP_INFOLEVEL_PASSTHRU)
+ pSMB->InformationLevel =
+ cpu_to_le16(SMB_SET_FILE_ALLOCATION_INFO2);
+ else
+ pSMB->InformationLevel =
+ cpu_to_le16(SMB_SET_FILE_ALLOCATION_INFO);
+ } else /* Set File Size */ {
if (tcon->ses->capabilities & CAP_INFOLEVEL_PASSTHRU)
pSMB->InformationLevel =
- cpu_to_le16(SMB_SET_FILE_END_OF_FILE_INFO2);
+ cpu_to_le16(SMB_SET_FILE_END_OF_FILE_INFO2);
else
pSMB->InformationLevel =
- cpu_to_le16(SMB_SET_FILE_END_OF_FILE_INFO);
+ cpu_to_le16(SMB_SET_FILE_END_OF_FILE_INFO);
}
parm_data =
@@ -4567,8 +4671,8 @@ SetEOFRetry:
}
int
-CIFSSMBSetFileSize(const int xid, struct cifsTconInfo *tcon, __u64 size,
- __u16 fid, __u32 pid_of_opener, int SetAllocation)
+CIFSSMBSetFileSize(const int xid, struct cifsTconInfo *tcon, __u64 size,
+ __u16 fid, __u32 pid_of_opener, int SetAllocation)
{
struct smb_com_transaction2_sfi_req *pSMB = NULL;
struct smb_com_transaction2_sfi_rsp *pSMBr = NULL;
@@ -4589,7 +4693,7 @@ CIFSSMBSetFileSize(const int xid, struct cifsTconInfo *tcon, __u64 size,
pSMB->hdr.Pid = cpu_to_le16((__u16)pid_of_opener);
pSMB->hdr.PidHigh = cpu_to_le16((__u16)(pid_of_opener >> 16));
-
+
params = 6;
pSMB->MaxSetupCount = 0;
pSMB->Reserved = 0;
@@ -4599,7 +4703,7 @@ CIFSSMBSetFileSize(const int xid, struct cifsTconInfo *tcon, __u64 size,
param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4;
offset = param_offset + params;
- data_offset = (char *) (&pSMB->hdr.Protocol) + offset;
+ data_offset = (char *) (&pSMB->hdr.Protocol) + offset;
count = sizeof(struct file_end_of_file_info);
pSMB->MaxParameterCount = cpu_to_le16(2);
@@ -4614,25 +4718,25 @@ CIFSSMBSetFileSize(const int xid, struct cifsTconInfo *tcon, __u64 size,
pSMB->TotalParameterCount = pSMB->ParameterCount;
pSMB->ParameterOffset = cpu_to_le16(param_offset);
parm_data =
- (struct file_end_of_file_info *) (((char *) &pSMB->hdr.Protocol) +
- offset);
+ (struct file_end_of_file_info *) (((char *) &pSMB->hdr.Protocol)
+ + offset);
pSMB->DataOffset = cpu_to_le16(offset);
parm_data->FileSize = cpu_to_le64(size);
pSMB->Fid = fid;
- if(SetAllocation) {
+ if (SetAllocation) {
if (tcon->ses->capabilities & CAP_INFOLEVEL_PASSTHRU)
pSMB->InformationLevel =
cpu_to_le16(SMB_SET_FILE_ALLOCATION_INFO2);
else
pSMB->InformationLevel =
cpu_to_le16(SMB_SET_FILE_ALLOCATION_INFO);
- } else /* Set File Size */ {
+ } else /* Set File Size */ {
if (tcon->ses->capabilities & CAP_INFOLEVEL_PASSTHRU)
pSMB->InformationLevel =
- cpu_to_le16(SMB_SET_FILE_END_OF_FILE_INFO2);
+ cpu_to_le16(SMB_SET_FILE_END_OF_FILE_INFO2);
else
pSMB->InformationLevel =
- cpu_to_le16(SMB_SET_FILE_END_OF_FILE_INFO);
+ cpu_to_le16(SMB_SET_FILE_END_OF_FILE_INFO);
}
pSMB->Reserved4 = 0;
pSMB->hdr.smb_buf_length += byte_count;
@@ -4648,21 +4752,21 @@ CIFSSMBSetFileSize(const int xid, struct cifsTconInfo *tcon, __u64 size,
if (pSMB)
cifs_small_buf_release(pSMB);
- /* Note: On -EAGAIN error only caller can retry on handle based calls
+ /* Note: On -EAGAIN error only caller can retry on handle based calls
since file handle passed in no longer valid */
return rc;
}
-/* Some legacy servers such as NT4 require that the file times be set on
+/* Some legacy servers such as NT4 require that the file times be set on
an open handle, rather than by pathname - this is awkward due to
potential access conflicts on the open, but it is unavoidable for these
old servers since the only other choice is to go from 100 nanosecond DCE
time and resort to the original setpathinfo level which takes the ancient
DOS time format with 2 second granularity */
int
-CIFSSMBSetFileTimes(const int xid, struct cifsTconInfo *tcon, const FILE_BASIC_INFO * data,
- __u16 fid)
+CIFSSMBSetFileTimes(const int xid, struct cifsTconInfo *tcon,
+ const FILE_BASIC_INFO *data, __u16 fid)
{
struct smb_com_transaction2_sfi_req *pSMB = NULL;
struct smb_com_transaction2_sfi_rsp *pSMBr = NULL;
@@ -4684,7 +4788,7 @@ CIFSSMBSetFileTimes(const int xid, struct cifsTconInfo *tcon, const FILE_BASIC_I
use an existing handle (rather than opening one on the fly) */
/* pSMB->hdr.Pid = cpu_to_le16((__u16)pid_of_opener);
pSMB->hdr.PidHigh = cpu_to_le16((__u16)(pid_of_opener >> 16));*/
-
+
params = 6;
pSMB->MaxSetupCount = 0;
pSMB->Reserved = 0;
@@ -4694,7 +4798,7 @@ CIFSSMBSetFileTimes(const int xid, struct cifsTconInfo *tcon, const FILE_BASIC_I
param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4;
offset = param_offset + params;
- data_offset = (char *) (&pSMB->hdr.Protocol) + offset;
+ data_offset = (char *) (&pSMB->hdr.Protocol) + offset;
count = sizeof (FILE_BASIC_INFO);
pSMB->MaxParameterCount = cpu_to_le16(2);
@@ -4717,16 +4821,16 @@ CIFSSMBSetFileTimes(const int xid, struct cifsTconInfo *tcon, const FILE_BASIC_I
pSMB->Reserved4 = 0;
pSMB->hdr.smb_buf_length += byte_count;
pSMB->ByteCount = cpu_to_le16(byte_count);
- memcpy(data_offset,data,sizeof(FILE_BASIC_INFO));
+ memcpy(data_offset, data, sizeof(FILE_BASIC_INFO));
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
- cFYI(1,("Send error in Set Time (SetFileInfo) = %d",rc));
+ cFYI(1, ("Send error in Set Time (SetFileInfo) = %d", rc));
}
cifs_small_buf_release(pSMB);
- /* Note: On -EAGAIN error only caller can retry on handle based calls
+ /* Note: On -EAGAIN error only caller can retry on handle based calls
since file handle passed in no longer valid */
return rc;
@@ -4735,7 +4839,7 @@ CIFSSMBSetFileTimes(const int xid, struct cifsTconInfo *tcon, const FILE_BASIC_I
int
CIFSSMBSetTimes(const int xid, struct cifsTconInfo *tcon, const char *fileName,
- const FILE_BASIC_INFO * data,
+ const FILE_BASIC_INFO *data,
const struct nls_table *nls_codepage, int remap)
{
TRANSACTION2_SPI_REQ *pSMB = NULL;
@@ -4760,7 +4864,7 @@ SetTimesRetry:
PATH_MAX, nls_codepage, remap);
name_len++; /* trailing null */
name_len *= 2;
- } else { /* BB improve the check for buffer overruns BB */
+ } else { /* BB improve the check for buffer overruns BB */
name_len = strnlen(fileName, PATH_MAX);
name_len++; /* trailing null */
strncpy(pSMB->FileName, fileName, name_len);
@@ -4776,7 +4880,7 @@ SetTimesRetry:
pSMB->Timeout = 0;
pSMB->Reserved2 = 0;
param_offset = offsetof(struct smb_com_transaction2_spi_req,
- InformationLevel) - 4;
+ InformationLevel) - 4;
offset = param_offset + params;
data_offset = (char *) (&pSMB->hdr.Protocol) + offset;
pSMB->ParameterOffset = cpu_to_le16(param_offset);
@@ -4837,11 +4941,11 @@ SetAttrLgcyRetry:
if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
name_len =
- ConvertToUCS((__le16 *) pSMB->fileName, fileName,
+ ConvertToUCS((__le16 *) pSMB->fileName, fileName,
PATH_MAX, nls_codepage);
name_len++; /* trailing null */
name_len *= 2;
- } else { /* BB improve the check for buffer overruns BB */
+ } else { /* BB improve the check for buffer overruns BB */
name_len = strnlen(fileName, PATH_MAX);
name_len++; /* trailing null */
strncpy(pSMB->fileName, fileName, name_len);
@@ -4867,8 +4971,8 @@ SetAttrLgcyRetry:
int
CIFSSMBUnixSetPerms(const int xid, struct cifsTconInfo *tcon,
- char *fileName, __u64 mode, __u64 uid, __u64 gid,
- dev_t device, const struct nls_table *nls_codepage,
+ char *fileName, __u64 mode, __u64 uid, __u64 gid,
+ dev_t device, const struct nls_table *nls_codepage,
int remap)
{
TRANSACTION2_SPI_REQ *pSMB = NULL;
@@ -4888,7 +4992,7 @@ setPermsRetry:
if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
name_len =
- cifsConvertToUCS((__le16 *) pSMB->FileName, fileName,
+ cifsConvertToUCS((__le16 *) pSMB->FileName, fileName,
PATH_MAX, nls_codepage, remap);
name_len++; /* trailing null */
name_len *= 2;
@@ -4908,7 +5012,7 @@ setPermsRetry:
pSMB->Timeout = 0;
pSMB->Reserved2 = 0;
param_offset = offsetof(struct smb_com_transaction2_spi_req,
- InformationLevel) - 4;
+ InformationLevel) - 4;
offset = param_offset + params;
data_offset =
(FILE_UNIX_BASIC_INFO *) ((char *) &pSMB->hdr.Protocol +
@@ -4931,7 +5035,7 @@ setPermsRetry:
older clients, but we should be precise - we use SetFileSize to
set file size and do not want to truncate file size to zero
accidently as happened on one Samba server beta by putting
- zero instead of -1 here */
+ zero instead of -1 here */
data_offset->EndOfFile = NO_CHANGE_64;
data_offset->NumOfBytes = NO_CHANGE_64;
data_offset->LastStatusChange = NO_CHANGE_64;
@@ -4943,20 +5047,20 @@ setPermsRetry:
data_offset->DevMajor = cpu_to_le64(MAJOR(device));
data_offset->DevMinor = cpu_to_le64(MINOR(device));
data_offset->Permissions = cpu_to_le64(mode);
-
- if(S_ISREG(mode))
+
+ if (S_ISREG(mode))
data_offset->Type = cpu_to_le32(UNIX_FILE);
- else if(S_ISDIR(mode))
+ else if (S_ISDIR(mode))
data_offset->Type = cpu_to_le32(UNIX_DIR);
- else if(S_ISLNK(mode))
+ else if (S_ISLNK(mode))
data_offset->Type = cpu_to_le32(UNIX_SYMLINK);
- else if(S_ISCHR(mode))
+ else if (S_ISCHR(mode))
data_offset->Type = cpu_to_le32(UNIX_CHARDEV);
- else if(S_ISBLK(mode))
+ else if (S_ISBLK(mode))
data_offset->Type = cpu_to_le32(UNIX_BLOCKDEV);
- else if(S_ISFIFO(mode))
+ else if (S_ISFIFO(mode))
data_offset->Type = cpu_to_le32(UNIX_FIFO);
- else if(S_ISSOCK(mode))
+ else if (S_ISSOCK(mode))
data_offset->Type = cpu_to_le32(UNIX_SOCKET);
@@ -4974,20 +5078,20 @@ setPermsRetry:
return rc;
}
-int CIFSSMBNotify(const int xid, struct cifsTconInfo *tcon,
+int CIFSSMBNotify(const int xid, struct cifsTconInfo *tcon,
const int notify_subdirs, const __u16 netfid,
- __u32 filter, struct file * pfile, int multishot,
+ __u32 filter, struct file *pfile, int multishot,
const struct nls_table *nls_codepage)
{
int rc = 0;
- struct smb_com_transaction_change_notify_req * pSMB = NULL;
- struct smb_com_ntransaction_change_notify_rsp * pSMBr = NULL;
+ struct smb_com_transaction_change_notify_req *pSMB = NULL;
+ struct smb_com_ntransaction_change_notify_rsp *pSMBr = NULL;
struct dir_notify_req *dnotify_req;
int bytes_returned;
- cFYI(1, ("In CIFSSMBNotify for file handle %d",(int)netfid));
+ cFYI(1, ("In CIFSSMBNotify for file handle %d", (int)netfid));
rc = smb_init(SMB_COM_NT_TRANSACT, 23, tcon, (void **) &pSMB,
- (void **) &pSMBr);
+ (void **) &pSMBr);
if (rc)
return rc;
@@ -5008,7 +5112,7 @@ int CIFSSMBNotify(const int xid, struct cifsTconInfo *tcon,
pSMB->SetupCount = 4; /* single byte does not need le conversion */
pSMB->SubCommand = cpu_to_le16(NT_TRANSACT_NOTIFY_CHANGE);
pSMB->ParameterCount = pSMB->TotalParameterCount;
- if(notify_subdirs)
+ if (notify_subdirs)
pSMB->WatchTree = 1; /* one byte - no le conversion needed */
pSMB->Reserved2 = 0;
pSMB->CompletionFilter = cpu_to_le32(filter);
@@ -5021,11 +5125,11 @@ int CIFSSMBNotify(const int xid, struct cifsTconInfo *tcon,
cFYI(1, ("Error in Notify = %d", rc));
} else {
/* Add file to outstanding requests */
- /* BB change to kmem cache alloc */
+ /* BB change to kmem cache alloc */
dnotify_req = kmalloc(
sizeof(struct dir_notify_req),
GFP_KERNEL);
- if(dnotify_req) {
+ if (dnotify_req) {
dnotify_req->Pid = pSMB->hdr.Pid;
dnotify_req->PidHigh = pSMB->hdr.PidHigh;
dnotify_req->Mid = pSMB->hdr.Mid;
@@ -5036,20 +5140,20 @@ int CIFSSMBNotify(const int xid, struct cifsTconInfo *tcon,
dnotify_req->filter = filter;
dnotify_req->multishot = multishot;
spin_lock(&GlobalMid_Lock);
- list_add_tail(&dnotify_req->lhead,
+ list_add_tail(&dnotify_req->lhead,
&GlobalDnotifyReqList);
spin_unlock(&GlobalMid_Lock);
- } else
+ } else
rc = -ENOMEM;
}
cifs_buf_release(pSMB);
- return rc;
+ return rc;
}
#ifdef CONFIG_CIFS_XATTR
ssize_t
CIFSSMBQAllEAs(const int xid, struct cifsTconInfo *tcon,
const unsigned char *searchName,
- char * EAData, size_t buf_size,
+ char *EAData, size_t buf_size,
const struct nls_table *nls_codepage, int remap)
{
/* BB assumes one setup word */
@@ -5058,8 +5162,8 @@ CIFSSMBQAllEAs(const int xid, struct cifsTconInfo *tcon,
int rc = 0;
int bytes_returned;
int name_len;
- struct fea * temp_fea;
- char * temp_ptr;
+ struct fea *temp_fea;
+ char *temp_ptr;
__u16 params, byte_count;
cFYI(1, ("In Query All EAs path %s", searchName));
@@ -5071,7 +5175,7 @@ QAllEAsRetry:
if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
name_len =
- cifsConvertToUCS((__le16 *) pSMB->FileName, searchName,
+ cifsConvertToUCS((__le16 *) pSMB->FileName, searchName,
PATH_MAX, nls_codepage, remap);
name_len++; /* trailing null */
name_len *= 2;
@@ -5081,7 +5185,7 @@ QAllEAsRetry:
strncpy(pSMB->FileName, searchName, name_len);
}
- params = 2 /* level */ + 4 /* reserved */ + name_len /* includes NUL */ ;
+ params = 2 /* level */ + 4 /* reserved */ + name_len /* includes NUL */;
pSMB->TotalDataCount = 0;
pSMB->MaxParameterCount = cpu_to_le16(2);
pSMB->MaxDataCount = cpu_to_le16(4000); /* BB find exact max SMB PDU from sess structure BB */
@@ -5091,7 +5195,7 @@ QAllEAsRetry:
pSMB->Timeout = 0;
pSMB->Reserved2 = 0;
pSMB->ParameterOffset = cpu_to_le16(offsetof(
- struct smb_com_transaction2_qpi_req ,InformationLevel) - 4);
+ struct smb_com_transaction2_qpi_req, InformationLevel) - 4);
pSMB->DataCount = 0;
pSMB->DataOffset = 0;
pSMB->SetupCount = 1;
@@ -5115,7 +5219,7 @@ QAllEAsRetry:
/* BB also check enough total bytes returned */
/* BB we need to improve the validity checking
of these trans2 responses */
- if (rc || (pSMBr->ByteCount < 4))
+ if (rc || (pSMBr->ByteCount < 4))
rc = -EIO; /* bad smb */
/* else if (pFindData){
memcpy((char *) pFindData,
@@ -5128,39 +5232,40 @@ QAllEAsRetry:
/* check that each element of each entry does not
go beyond end of list */
__u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
- struct fealist * ea_response_data;
+ struct fealist *ea_response_data;
rc = 0;
/* validate_trans2_offsets() */
- /* BB to check if(start of smb + data_offset > &bcc+ bcc)*/
+ /* BB check if start of smb + data_offset > &bcc+ bcc */
ea_response_data = (struct fealist *)
(((char *) &pSMBr->hdr.Protocol) +
data_offset);
name_len = le32_to_cpu(ea_response_data->list_len);
- cFYI(1,("ea length %d", name_len));
- if(name_len <= 8) {
+ cFYI(1, ("ea length %d", name_len));
+ if (name_len <= 8) {
/* returned EA size zeroed at top of function */
- cFYI(1,("empty EA list returned from server"));
+ cFYI(1, ("empty EA list returned from server"));
} else {
/* account for ea list len */
name_len -= 4;
temp_fea = ea_response_data->list;
temp_ptr = (char *)temp_fea;
- while(name_len > 0) {
+ while (name_len > 0) {
__u16 value_len;
name_len -= 4;
temp_ptr += 4;
rc += temp_fea->name_len;
/* account for prefix user. and trailing null */
- rc = rc + 5 + 1;
- if(rc<(int)buf_size) {
- memcpy(EAData,"user.",5);
- EAData+=5;
- memcpy(EAData,temp_ptr,temp_fea->name_len);
- EAData+=temp_fea->name_len;
+ rc = rc + 5 + 1;
+ if (rc < (int)buf_size) {
+ memcpy(EAData, "user.", 5);
+ EAData += 5;
+ memcpy(EAData, temp_ptr,
+ temp_fea->name_len);
+ EAData += temp_fea->name_len;
/* null terminate name */
*EAData = 0;
EAData = EAData + 1;
- } else if(buf_size == 0) {
+ } else if (buf_size == 0) {
/* skip copy - calc size only */
} else {
/* stop before overrun buffer */
@@ -5172,11 +5277,15 @@ QAllEAsRetry:
/* account for trailing null */
name_len--;
temp_ptr++;
- value_len = le16_to_cpu(temp_fea->value_len);
+ value_len =
+ le16_to_cpu(temp_fea->value_len);
name_len -= value_len;
temp_ptr += value_len;
- /* BB check that temp_ptr is still within smb BB*/
- /* no trailing null to account for in value len */
+ /* BB check that temp_ptr is still
+ within the SMB BB*/
+
+ /* no trailing null to account for
+ in value len */
/* go on to next EA */
temp_fea = (struct fea *)temp_ptr;
}
@@ -5191,9 +5300,9 @@ QAllEAsRetry:
return (ssize_t)rc;
}
-ssize_t CIFSSMBQueryEA(const int xid,struct cifsTconInfo * tcon,
- const unsigned char * searchName,const unsigned char * ea_name,
- unsigned char * ea_value, size_t buf_size,
+ssize_t CIFSSMBQueryEA(const int xid, struct cifsTconInfo *tcon,
+ const unsigned char *searchName, const unsigned char *ea_name,
+ unsigned char *ea_value, size_t buf_size,
const struct nls_table *nls_codepage, int remap)
{
TRANSACTION2_QPI_REQ *pSMB = NULL;
@@ -5201,8 +5310,8 @@ ssize_t CIFSSMBQueryEA(const int xid,struct cifsTconInfo * tcon,
int rc = 0;
int bytes_returned;
int name_len;
- struct fea * temp_fea;
- char * temp_ptr;
+ struct fea *temp_fea;
+ char *temp_ptr;
__u16 params, byte_count;
cFYI(1, ("In Query EA path %s", searchName));
@@ -5214,7 +5323,7 @@ QEARetry:
if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
name_len =
- cifsConvertToUCS((__le16 *) pSMB->FileName, searchName,
+ cifsConvertToUCS((__le16 *) pSMB->FileName, searchName,
PATH_MAX, nls_codepage, remap);
name_len++; /* trailing null */
name_len *= 2;
@@ -5224,7 +5333,7 @@ QEARetry:
strncpy(pSMB->FileName, searchName, name_len);
}
- params = 2 /* level */ + 4 /* reserved */ + name_len /* includes NUL */ ;
+ params = 2 /* level */ + 4 /* reserved */ + name_len /* includes NUL */;
pSMB->TotalDataCount = 0;
pSMB->MaxParameterCount = cpu_to_le16(2);
pSMB->MaxDataCount = cpu_to_le16(4000); /* BB find exact max SMB PDU from sess structure BB */
@@ -5234,7 +5343,7 @@ QEARetry:
pSMB->Timeout = 0;
pSMB->Reserved2 = 0;
pSMB->ParameterOffset = cpu_to_le16(offsetof(
- struct smb_com_transaction2_qpi_req ,InformationLevel) - 4);
+ struct smb_com_transaction2_qpi_req, InformationLevel) - 4);
pSMB->DataCount = 0;
pSMB->DataOffset = 0;
pSMB->SetupCount = 1;
@@ -5258,7 +5367,7 @@ QEARetry:
/* BB also check enough total bytes returned */
/* BB we need to improve the validity checking
of these trans2 responses */
- if (rc || (pSMBr->ByteCount < 4))
+ if (rc || (pSMBr->ByteCount < 4))
rc = -EIO; /* bad smb */
/* else if (pFindData){
memcpy((char *) pFindData,
@@ -5271,18 +5380,18 @@ QEARetry:
/* check that each element of each entry does not
go beyond end of list */
__u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
- struct fealist * ea_response_data;
+ struct fealist *ea_response_data;
rc = -ENODATA;
/* validate_trans2_offsets() */
- /* BB to check if(start of smb + data_offset > &bcc+ bcc)*/
+ /* BB check if start of smb + data_offset > &bcc+ bcc*/
ea_response_data = (struct fealist *)
(((char *) &pSMBr->hdr.Protocol) +
data_offset);
name_len = le32_to_cpu(ea_response_data->list_len);
- cFYI(1,("ea length %d", name_len));
- if(name_len <= 8) {
+ cFYI(1, ("ea length %d", name_len));
+ if (name_len <= 8) {
/* returned EA size zeroed at top of function */
- cFYI(1,("empty EA list returned from server"));
+ cFYI(1, ("empty EA list returned from server"));
} else {
/* account for ea list len */
name_len -= 4;
@@ -5290,28 +5399,30 @@ QEARetry:
temp_ptr = (char *)temp_fea;
/* loop through checking if we have a matching
name and then return the associated value */
- while(name_len > 0) {
+ while (name_len > 0) {
__u16 value_len;
name_len -= 4;
temp_ptr += 4;
- value_len = le16_to_cpu(temp_fea->value_len);
- /* BB validate that value_len falls within SMB,
- even though maximum for name_len is 255 */
- if(memcmp(temp_fea->name,ea_name,
+ value_len =
+ le16_to_cpu(temp_fea->value_len);
+ /* BB validate that value_len falls within SMB,
+ even though maximum for name_len is 255 */
+ if (memcmp(temp_fea->name, ea_name,
temp_fea->name_len) == 0) {
/* found a match */
rc = value_len;
/* account for prefix user. and trailing null */
- if(rc<=(int)buf_size) {
+ if (rc <= (int)buf_size) {
memcpy(ea_value,
temp_fea->name+temp_fea->name_len+1,
rc);
- /* ea values, unlike ea names,
- are not null terminated */
- } else if(buf_size == 0) {
+ /* ea values, unlike ea
+ names, are not null
+ terminated */
+ } else if (buf_size == 0) {
/* skip copy - calc size only */
} else {
- /* stop before overrun buffer */
+ /* stop before overrun buffer */
rc = -ERANGE;
}
break;
@@ -5323,11 +5434,11 @@ QEARetry:
temp_ptr++;
name_len -= value_len;
temp_ptr += value_len;
- /* no trailing null to account for in value len */
- /* go on to next EA */
+ /* No trailing null to account for in
+ value_len. Go on to next EA */
temp_fea = (struct fea *)temp_ptr;
}
- }
+ }
}
}
if (pSMB)
@@ -5340,9 +5451,9 @@ QEARetry:
int
CIFSSMBSetEA(const int xid, struct cifsTconInfo *tcon, const char *fileName,
- const char * ea_name, const void * ea_value,
- const __u16 ea_value_len, const struct nls_table *nls_codepage,
- int remap)
+ const char *ea_name, const void *ea_value,
+ const __u16 ea_value_len, const struct nls_table *nls_codepage,
+ int remap)
{
struct smb_com_transaction2_spi_req *pSMB = NULL;
struct smb_com_transaction2_spi_rsp *pSMBr = NULL;
@@ -5361,11 +5472,11 @@ SetEARetry:
if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
name_len =
- cifsConvertToUCS((__le16 *) pSMB->FileName, fileName,
+ cifsConvertToUCS((__le16 *) pSMB->FileName, fileName,
PATH_MAX, nls_codepage, remap);
name_len++; /* trailing null */
name_len *= 2;
- } else { /* BB improve the check for buffer overruns BB */
+ } else { /* BB improve the check for buffer overruns BB */
name_len = strnlen(fileName, PATH_MAX);
name_len++; /* trailing null */
strncpy(pSMB->FileName, fileName, name_len);
@@ -5376,10 +5487,10 @@ SetEARetry:
/* done calculating parms using name_len of file name,
now use name_len to calculate length of ea name
we are going to create in the inode xattrs */
- if(ea_name == NULL)
+ if (ea_name == NULL)
name_len = 0;
else
- name_len = strnlen(ea_name,255);
+ name_len = strnlen(ea_name, 255);
count = sizeof(*parm_data) + ea_value_len + name_len + 1;
pSMB->MaxParameterCount = cpu_to_le16(2);
@@ -5390,7 +5501,7 @@ SetEARetry:
pSMB->Timeout = 0;
pSMB->Reserved2 = 0;
param_offset = offsetof(struct smb_com_transaction2_spi_req,
- InformationLevel) - 4;
+ InformationLevel) - 4;
offset = param_offset + params;
pSMB->InformationLevel =
cpu_to_le16(SMB_SET_FILE_EA);
@@ -5410,17 +5521,19 @@ SetEARetry:
/* we checked above that name len is less than 255 */
parm_data->list[0].name_len = (__u8)name_len;
/* EA names are always ASCII */
- if(ea_name)
- strncpy(parm_data->list[0].name,ea_name,name_len);
+ if (ea_name)
+ strncpy(parm_data->list[0].name, ea_name, name_len);
parm_data->list[0].name[name_len] = 0;
parm_data->list[0].value_len = cpu_to_le16(ea_value_len);
/* caller ensures that ea_value_len is less than 64K but
we need to ensure that it fits within the smb */
- /*BB add length check that it would fit in negotiated SMB buffer size BB */
- /* if(ea_value_len > buffer_size - 512 (enough for header)) */
- if(ea_value_len)
- memcpy(parm_data->list[0].name+name_len+1,ea_value,ea_value_len);
+ /*BB add length check to see if it would fit in
+ negotiated SMB buffer size BB */
+ /* if (ea_value_len > buffer_size - 512 (enough for header)) */
+ if (ea_value_len)
+ memcpy(parm_data->list[0].name+name_len+1,
+ ea_value, ea_value_len);
pSMB->TotalDataCount = pSMB->DataCount;
pSMB->ParameterCount = cpu_to_le16(params);
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 0a1b8bd..4af3588 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -1,7 +1,7 @@
/*
* fs/cifs/connect.c
*
- * Copyright (C) International Business Machines Corp., 2002,2006
+ * Copyright (C) International Business Machines Corp., 2002,2007
* Author(s): Steve French (sfrench@us.ibm.com)
*
* This library is free software; you can redistribute it and/or modify
@@ -16,7 +16,7 @@
*
* You should have received a copy of the GNU Lesser General Public License
* along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/fs.h>
#include <linux/net.h>
@@ -85,6 +85,7 @@ struct smb_vol {
unsigned direct_io:1;
unsigned remap:1; /* set to remap seven reserved chars in filenames */
unsigned posix_paths:1; /* unset to not ask for posix pathnames. */
+ unsigned no_linux_ext:1;
unsigned sfu_emul:1;
unsigned nullauth:1; /* attempt to authenticate with null user */
unsigned nocase; /* request case insensitive filenames */
@@ -93,20 +94,20 @@ struct smb_vol {
unsigned int wsize;
unsigned int sockopt;
unsigned short int port;
- char * prepath;
+ char *prepath;
};
-static int ipv4_connect(struct sockaddr_in *psin_server,
+static int ipv4_connect(struct sockaddr_in *psin_server,
struct socket **csocket,
- char * netb_name,
- char * server_netb_name);
-static int ipv6_connect(struct sockaddr_in6 *psin_server,
+ char *netb_name,
+ char *server_netb_name);
+static int ipv6_connect(struct sockaddr_in6 *psin_server,
struct socket **csocket);
- /*
+ /*
* cifs tcp session reconnection
- *
+ *
* mark tcp session as reconnecting so temporarily locked
* mark all smb sessions as reconnecting for tcp session
* reconnect tcp session
@@ -120,11 +121,11 @@ cifs_reconnect(struct TCP_Server_Info *server)
struct list_head *tmp;
struct cifsSesInfo *ses;
struct cifsTconInfo *tcon;
- struct mid_q_entry * mid_entry;
-
+ struct mid_q_entry *mid_entry;
+
spin_lock(&GlobalMid_Lock);
- if( kthread_should_stop() ) {
- /* the demux thread will exit normally
+ if ( kthread_should_stop() ) {
+ /* the demux thread will exit normally
next time through the loop */
spin_unlock(&GlobalMid_Lock);
return rc;
@@ -150,18 +151,19 @@ cifs_reconnect(struct TCP_Server_Info *server)
}
list_for_each(tmp, &GlobalTreeConnectionList) {
tcon = list_entry(tmp, struct cifsTconInfo, cifsConnectionList);
- if((tcon) && (tcon->ses) && (tcon->ses->server == server)) {
+ if ((tcon) && (tcon->ses) && (tcon->ses->server == server)) {
tcon->tidStatus = CifsNeedReconnect;
}
}
read_unlock(&GlobalSMBSeslock);
/* do not want to be sending data on a socket we are freeing */
- down(&server->tcpSem);
- if(server->ssocket) {
- cFYI(1,("State: 0x%x Flags: 0x%lx", server->ssocket->state,
+ down(&server->tcpSem);
+ if (server->ssocket) {
+ cFYI(1, ("State: 0x%x Flags: 0x%lx", server->ssocket->state,
server->ssocket->flags));
- server->ssocket->ops->shutdown(server->ssocket,SEND_SHUTDOWN);
- cFYI(1,("Post shutdown state: 0x%x Flags: 0x%lx", server->ssocket->state,
+ server->ssocket->ops->shutdown(server->ssocket, SEND_SHUTDOWN);
+ cFYI(1, ("Post shutdown state: 0x%x Flags: 0x%lx",
+ server->ssocket->state,
server->ssocket->flags));
sock_release(server->ssocket);
server->ssocket = NULL;
@@ -172,8 +174,8 @@ cifs_reconnect(struct TCP_Server_Info *server)
mid_entry = list_entry(tmp, struct
mid_q_entry,
qhead);
- if(mid_entry) {
- if(mid_entry->midState == MID_REQUEST_SUBMITTED) {
+ if (mid_entry) {
+ if (mid_entry->midState == MID_REQUEST_SUBMITTED) {
/* Mark other intransit requests as needing
retry so we do not immediately mark the
session bad again (ie after we reconnect
@@ -183,29 +185,29 @@ cifs_reconnect(struct TCP_Server_Info *server)
}
}
spin_unlock(&GlobalMid_Lock);
- up(&server->tcpSem);
+ up(&server->tcpSem);
- while ( (!kthread_should_stop()) && (server->tcpStatus != CifsGood))
- {
+ while ( (!kthread_should_stop()) && (server->tcpStatus != CifsGood)) {
try_to_freeze();
- if(server->protocolType == IPV6) {
- rc = ipv6_connect(&server->addr.sockAddr6,&server->ssocket);
+ if (server->protocolType == IPV6) {
+ rc = ipv6_connect(&server->addr.sockAddr6,
+ &server->ssocket);
} else {
- rc = ipv4_connect(&server->addr.sockAddr,
+ rc = ipv4_connect(&server->addr.sockAddr,
&server->ssocket,
server->workstation_RFC1001_name,
server->server_RFC1001_name);
}
- if(rc) {
- cFYI(1,("reconnect error %d",rc));
+ if (rc) {
+ cFYI(1, ("reconnect error %d", rc));
msleep(3000);
} else {
atomic_inc(&tcpSesReconnectCount);
spin_lock(&GlobalMid_Lock);
- if( !kthread_should_stop() )
+ if ( !kthread_should_stop() )
server->tcpStatus = CifsGood;
server->sequence_number = 0;
- spin_unlock(&GlobalMid_Lock);
+ spin_unlock(&GlobalMid_Lock);
/* atomic_set(&server->inFlight,0);*/
wake_up(&server->response_q);
}
@@ -213,27 +215,27 @@ cifs_reconnect(struct TCP_Server_Info *server)
return rc;
}
-/*
+/*
return codes:
0 not a transact2, or all data present
>0 transact2 with that much data missing
-EINVAL = invalid transact2
*/
-static int check2ndT2(struct smb_hdr * pSMB, unsigned int maxBufSize)
+static int check2ndT2(struct smb_hdr *pSMB, unsigned int maxBufSize)
{
- struct smb_t2_rsp * pSMBt;
- int total_data_size;
+ struct smb_t2_rsp *pSMBt;
+ int total_data_size;
int data_in_this_rsp;
int remaining;
- if(pSMB->Command != SMB_COM_TRANSACTION2)
+ if (pSMB->Command != SMB_COM_TRANSACTION2)
return 0;
- /* check for plausible wct, bcc and t2 data and parm sizes */
- /* check for parm and data offset going beyond end of smb */
- if(pSMB->WordCount != 10) { /* coalesce_t2 depends on this */
- cFYI(1,("invalid transact2 word count"));
+ /* check for plausible wct, bcc and t2 data and parm sizes */
+ /* check for parm and data offset going beyond end of smb */
+ if (pSMB->WordCount != 10) { /* coalesce_t2 depends on this */
+ cFYI(1, ("invalid transact2 word count"));
return -EINVAL;
}
@@ -244,25 +246,25 @@ static int check2ndT2(struct smb_hdr * pSMB, unsigned int maxBufSize)
remaining = total_data_size - data_in_this_rsp;
- if(remaining == 0)
+ if (remaining == 0)
return 0;
- else if(remaining < 0) {
- cFYI(1,("total data %d smaller than data in frame %d",
+ else if (remaining < 0) {
+ cFYI(1, ("total data %d smaller than data in frame %d",
total_data_size, data_in_this_rsp));
return -EINVAL;
} else {
- cFYI(1,("missing %d bytes from transact2, check next response",
+ cFYI(1, ("missing %d bytes from transact2, check next response",
remaining));
- if(total_data_size > maxBufSize) {
- cERROR(1,("TotalDataSize %d is over maximum buffer %d",
- total_data_size,maxBufSize));
- return -EINVAL;
+ if (total_data_size > maxBufSize) {
+ cERROR(1, ("TotalDataSize %d is over maximum buffer %d",
+ total_data_size, maxBufSize));
+ return -EINVAL;
}
return remaining;
}
}
-static int coalesce_t2(struct smb_hdr * psecond, struct smb_hdr *pTargetSMB)
+static int coalesce_t2(struct smb_hdr *psecond, struct smb_hdr *pTargetSMB)
{
struct smb_t2_rsp *pSMB2 = (struct smb_t2_rsp *)psecond;
struct smb_t2_rsp *pSMBt = (struct smb_t2_rsp *)pTargetSMB;
@@ -270,43 +272,43 @@ static int coalesce_t2(struct smb_hdr * psecond, struct smb_hdr *pTargetSMB)
int total_in_buf;
int remaining;
int total_in_buf2;
- char * data_area_of_target;
- char * data_area_of_buf2;
+ char *data_area_of_target;
+ char *data_area_of_buf2;
__u16 byte_count;
total_data_size = le16_to_cpu(pSMBt->t2_rsp.TotalDataCount);
- if(total_data_size != le16_to_cpu(pSMB2->t2_rsp.TotalDataCount)) {
- cFYI(1,("total data sizes of primary and secondary t2 differ"));
+ if (total_data_size != le16_to_cpu(pSMB2->t2_rsp.TotalDataCount)) {
+ cFYI(1, ("total data size of primary and secondary t2 differ"));
}
total_in_buf = le16_to_cpu(pSMBt->t2_rsp.DataCount);
remaining = total_data_size - total_in_buf;
-
- if(remaining < 0)
+
+ if (remaining < 0)
return -EINVAL;
- if(remaining == 0) /* nothing to do, ignore */
+ if (remaining == 0) /* nothing to do, ignore */
return 0;
-
+
total_in_buf2 = le16_to_cpu(pSMB2->t2_rsp.DataCount);
- if(remaining < total_in_buf2) {
- cFYI(1,("transact2 2nd response contains too much data"));
+ if (remaining < total_in_buf2) {
+ cFYI(1, ("transact2 2nd response contains too much data"));
}
/* find end of first SMB data area */
- data_area_of_target = (char *)&pSMBt->hdr.Protocol +
+ data_area_of_target = (char *)&pSMBt->hdr.Protocol +
le16_to_cpu(pSMBt->t2_rsp.DataOffset);
/* validate target area */
data_area_of_buf2 = (char *) &pSMB2->hdr.Protocol +
- le16_to_cpu(pSMB2->t2_rsp.DataOffset);
+ le16_to_cpu(pSMB2->t2_rsp.DataOffset);
data_area_of_target += total_in_buf;
/* copy second buffer into end of first buffer */
- memcpy(data_area_of_target,data_area_of_buf2,total_in_buf2);
+ memcpy(data_area_of_target, data_area_of_buf2, total_in_buf2);
total_in_buf += total_in_buf2;
pSMBt->t2_rsp.DataCount = cpu_to_le16(total_in_buf);
byte_count = le16_to_cpu(BCC_LE(pTargetSMB));
@@ -317,11 +319,11 @@ static int coalesce_t2(struct smb_hdr * psecond, struct smb_hdr *pTargetSMB)
byte_count += total_in_buf2;
/* BB also add check that we are not beyond maximum buffer size */
-
+
pTargetSMB->smb_buf_length = byte_count;
- if(remaining == total_in_buf2) {
- cFYI(1,("found the last secondary response"));
+ if (remaining == total_in_buf2) {
+ cFYI(1, ("found the last secondary response"));
return 0; /* we are done */
} else /* more responses to go */
return 1;
@@ -348,16 +350,15 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server)
int isMultiRsp;
int reconnect;
- allow_signal(SIGKILL);
current->flags |= PF_MEMALLOC;
server->tsk = current; /* save process info to wake at shutdown */
cFYI(1, ("Demultiplex PID: %d", current->pid));
- write_lock(&GlobalSMBSeslock);
+ write_lock(&GlobalSMBSeslock);
atomic_inc(&tcpSesAllocCount);
length = tcpSesAllocCount.counter;
write_unlock(&GlobalSMBSeslock);
complete(&cifsd_complete);
- if(length > 1) {
+ if (length > 1) {
mempool_resize(cifs_req_poolp,
length + cifs_min_rcv,
GFP_KERNEL);
@@ -426,10 +427,10 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server)
break;
}
if (!try_to_freeze() && (length == -EINTR)) {
- cFYI(1,("cifsd thread killed"));
+ cFYI(1, ("cifsd thread killed"));
break;
}
- cFYI(1,("Reconnect after unexpected peek error %d",
+ cFYI(1, ("Reconnect after unexpected peek error %d",
length));
cifs_reconnect(server);
csocket = server->ssocket;
@@ -453,26 +454,26 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server)
with the most common, zero, as regular data */
temp = *((char *) smb_buffer);
- /* Note that FC 1001 length is big endian on the wire,
+ /* Note that FC 1001 length is big endian on the wire,
but we convert it here so it is always manipulated
as host byte order */
pdu_length = ntohl(smb_buffer->smb_buf_length);
smb_buffer->smb_buf_length = pdu_length;
- cFYI(1,("rfc1002 length 0x%x)", pdu_length+4));
+ cFYI(1, ("rfc1002 length 0x%x", pdu_length+4));
if (temp == (char) RFC1002_SESSION_KEEP_ALIVE) {
- continue;
+ continue;
} else if (temp == (char)RFC1002_POSITIVE_SESSION_RESPONSE) {
- cFYI(1,("Good RFC 1002 session rsp"));
+ cFYI(1, ("Good RFC 1002 session rsp"));
continue;
} else if (temp == (char)RFC1002_NEGATIVE_SESSION_RESPONSE) {
- /* we get this from Windows 98 instead of
+ /* we get this from Windows 98 instead of
an error on SMB negprot response */
- cFYI(1,("Negative RFC1002 Session Response Error 0x%x)",
+ cFYI(1, ("Negative RFC1002 Session Response Error 0x%x)",
pdu_length));
- if(server->tcpStatus == CifsNew) {
- /* if nack on negprot (rather than
+ if (server->tcpStatus == CifsNew) {
+ /* if nack on negprot (rather than
ret of smb negprot error) reconnecting
not going to help, ret error to mount */
break;
@@ -482,10 +483,10 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server)
msleep(1000);
/* always try 445 first on reconnect
since we get NACK on some if we ever
- connected to port 139 (the NACK is
+ connected to port 139 (the NACK is
since we do not begin with RFC1001
session initialize frame) */
- server->addr.sockAddr.sin_port =
+ server->addr.sockAddr.sin_port =
htons(CIFS_PORT);
cifs_reconnect(server);
csocket = server->ssocket;
@@ -493,7 +494,7 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server)
continue;
}
} else if (temp != (char) 0) {
- cERROR(1,("Unknown RFC 1002 frame"));
+ cERROR(1, ("Unknown RFC 1002 frame"));
cifs_dump_mem(" Received Data: ", (char *)smb_buffer,
length);
cifs_reconnect(server);
@@ -502,7 +503,7 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server)
}
/* else we have an SMB response */
- if((pdu_length > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) ||
+ if ((pdu_length > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) ||
(pdu_length < sizeof (struct smb_hdr) - 1 - 4)) {
cERROR(1, ("Invalid size SMB length %d pdu_length %d",
length, pdu_length+4));
@@ -510,12 +511,12 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server)
csocket = server->ssocket;
wake_up(&server->response_q);
continue;
- }
+ }
/* else length ok */
reconnect = 0;
- if(pdu_length > MAX_CIFS_SMALL_BUFFER_SIZE - 4) {
+ if (pdu_length > MAX_CIFS_SMALL_BUFFER_SIZE - 4) {
isLargeBuf = TRUE;
memcpy(bigbuf, smallbuf, 4);
smb_buffer = bigbuf;
@@ -523,11 +524,11 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server)
length = 0;
iov.iov_base = 4 + (char *)smb_buffer;
iov.iov_len = pdu_length;
- for (total_read = 0; total_read < pdu_length;
+ for (total_read = 0; total_read < pdu_length;
total_read += length) {
length = kernel_recvmsg(csocket, &smb_msg, &iov, 1,
pdu_length - total_read, 0);
- if( kthread_should_stop() ||
+ if ( kthread_should_stop() ||
(length == -EINTR)) {
/* then will exit */
reconnect = 2;
@@ -535,19 +536,19 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server)
} else if (server->tcpStatus == CifsNeedReconnect) {
cifs_reconnect(server);
csocket = server->ssocket;
- /* Reconnect wakes up rspns q */
+ /* Reconnect wakes up rspns q */
/* Now we will reread sock */
reconnect = 1;
break;
- } else if ((length == -ERESTARTSYS) ||
+ } else if ((length == -ERESTARTSYS) ||
(length == -EAGAIN)) {
msleep(1); /* minimum sleep to prevent looping,
- allowing socket to clear and app
+ allowing socket to clear and app
threads to set tcpStatus
CifsNeedReconnect if server hung*/
continue;
} else if (length <= 0) {
- cERROR(1,("Received no data, expecting %d",
+ cERROR(1, ("Received no data, expecting %d",
pdu_length - total_read));
cifs_reconnect(server);
csocket = server->ssocket;
@@ -555,13 +556,13 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server)
break;
}
}
- if(reconnect == 2)
+ if (reconnect == 2)
break;
- else if(reconnect == 1)
+ else if (reconnect == 1)
continue;
length += 4; /* account for rfc1002 hdr */
-
+
dump_smb(smb_buffer, length);
if (checkSMB(smb_buffer, smb_buffer->Mid, total_read+4)) {
@@ -575,28 +576,28 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server)
list_for_each(tmp, &server->pending_mid_q) {
mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
- if ((mid_entry->mid == smb_buffer->Mid) &&
+ if ((mid_entry->mid == smb_buffer->Mid) &&
(mid_entry->midState == MID_REQUEST_SUBMITTED) &&
(mid_entry->command == smb_buffer->Command)) {
- if(check2ndT2(smb_buffer,server->maxBuf) > 0) {
+ if (check2ndT2(smb_buffer,server->maxBuf) > 0) {
/* We have a multipart transact2 resp */
isMultiRsp = TRUE;
- if(mid_entry->resp_buf) {
+ if (mid_entry->resp_buf) {
/* merge response - fix up 1st*/
- if(coalesce_t2(smb_buffer,
+ if (coalesce_t2(smb_buffer,
mid_entry->resp_buf)) {
mid_entry->multiRsp = 1;
break;
} else {
/* all parts received */
mid_entry->multiEnd = 1;
- goto multi_t2_fnd;
+ goto multi_t2_fnd;
}
} else {
- if(!isLargeBuf) {
+ if (!isLargeBuf) {
cERROR(1,("1st trans2 resp needs bigbuf"));
/* BB maybe we can fix this up, switch
- to already allocated large buffer? */
+ to already allocated large buffer? */
} else {
/* Have first buffer */
mid_entry->resp_buf =
@@ -606,9 +607,9 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server)
}
}
break;
- }
+ }
mid_entry->resp_buf = smb_buffer;
- if(isLargeBuf)
+ if (isLargeBuf)
mid_entry->largeBuf = 1;
else
mid_entry->largeBuf = 0;
@@ -628,24 +629,25 @@ multi_t2_fnd:
spin_unlock(&GlobalMid_Lock);
if (task_to_wake) {
/* Was previous buf put in mpx struct for multi-rsp? */
- if(!isMultiRsp) {
+ if (!isMultiRsp) {
/* smb buffer will be freed by user thread */
- if(isLargeBuf) {
+ if (isLargeBuf) {
bigbuf = NULL;
} else
smallbuf = NULL;
}
wake_up_process(task_to_wake);
} else if ((is_valid_oplock_break(smb_buffer, server) == FALSE)
- && (isMultiRsp == FALSE)) {
- cERROR(1, ("No task to wake, unknown frame rcvd! NumMids %d", midCount.counter));
- cifs_dump_mem("Received Data is: ",(char *)smb_buffer,
+ && (isMultiRsp == FALSE)) {
+ cERROR(1, ("No task to wake, unknown frame received! "
+ "NumMids %d", midCount.counter));
+ cifs_dump_mem("Received Data is: ", (char *)smb_buffer,
sizeof(struct smb_hdr));
#ifdef CONFIG_CIFS_DEBUG2
cifs_dump_detail(smb_buffer);
cifs_dump_mids(server);
#endif /* CIFS_DEBUG2 */
-
+
}
} /* end while !EXITING */
@@ -655,12 +657,12 @@ multi_t2_fnd:
/* check if we have blocked requests that need to free */
/* Note that cifs_max_pending is normally 50, but
can be set at module install time to as little as two */
- if(atomic_read(&server->inFlight) >= cifs_max_pending)
+ if (atomic_read(&server->inFlight) >= cifs_max_pending)
atomic_set(&server->inFlight, cifs_max_pending - 1);
/* We do not want to set the max_pending too low or we
could end up with the counter going negative */
spin_unlock(&GlobalMid_Lock);
- /* Although there should not be any requests blocked on
+ /* Although there should not be any requests blocked on
this queue it can not hurt to be paranoid and try to wake up requests
that may haven been blocked when more than 50 at time were on the wire
to the same server - they now will see the session is in exit state
@@ -668,8 +670,8 @@ multi_t2_fnd:
wake_up_all(&server->request_q);
/* give those requests time to exit */
msleep(125);
-
- if(server->ssocket) {
+
+ if (server->ssocket) {
sock_release(csocket);
server->ssocket = NULL;
}
@@ -709,10 +711,10 @@ multi_t2_fnd:
list_for_each(tmp, &server->pending_mid_q) {
mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
if (mid_entry->midState == MID_REQUEST_SUBMITTED) {
- cFYI(1,
- ("Clearing Mid 0x%x - waking up ",mid_entry->mid));
+ cFYI(1, ("Clearing Mid 0x%x - waking up ",
+ mid_entry->mid));
task_to_wake = mid_entry->tsk;
- if(task_to_wake) {
+ if (task_to_wake) {
wake_up_process(task_to_wake);
}
}
@@ -724,7 +726,7 @@ multi_t2_fnd:
}
if (!list_empty(&server->pending_mid_q)) {
- /* mpx threads have not exited yet give them
+ /* mpx threads have not exited yet give them
at least the smb send timeout time for long ops */
/* due to delays on oplock break requests, we need
to wait at least 45 seconds before giving up
@@ -742,7 +744,7 @@ multi_t2_fnd:
/* last chance to mark ses pointers invalid
if there are any pointing to this (e.g
- if a crazy root user tried to kill cifsd
+ if a crazy root user tried to kill cifsd
kernel thread explicitly this might happen) */
list_for_each(tmp, &GlobalSMBSessionList) {
ses = list_entry(tmp, struct cifsSesInfo,
@@ -754,17 +756,18 @@ multi_t2_fnd:
write_unlock(&GlobalSMBSeslock);
kfree(server);
- if(length > 0) {
+ if (length > 0) {
mempool_resize(cifs_req_poolp,
length + cifs_min_rcv,
GFP_KERNEL);
}
-
+
return 0;
}
static int
-cifs_parse_mount_options(char *options, const char *devname,struct smb_vol *vol)
+cifs_parse_mount_options(char *options, const char *devname,
+ struct smb_vol *vol)
{
char *value;
char *data;
@@ -772,15 +775,15 @@ cifs_parse_mount_options(char *options, const char *devname,struct smb_vol *vol)
char separator[2];
separator[0] = ',';
- separator[1] = 0;
+ separator[1] = 0;
if (Local_System_Name[0] != 0)
- memcpy(vol->source_rfc1001_name, Local_System_Name,15);
+ memcpy(vol->source_rfc1001_name, Local_System_Name, 15);
else {
char *nodename = utsname()->nodename;
- int n = strnlen(nodename,15);
- memset(vol->source_rfc1001_name,0x20,15);
- for(i=0 ; i < n ; i++) {
+ int n = strnlen(nodename, 15);
+ memset(vol->source_rfc1001_name, 0x20, 15);
+ for (i = 0; i < n; i++) {
/* does not have to be perfect mapping since field is
informational, only used for servers that do not support
port 445 and it can be overridden at mount time */
@@ -805,31 +808,32 @@ cifs_parse_mount_options(char *options, const char *devname,struct smb_vol *vol)
if (!options)
return 1;
- if(strncmp(options,"sep=",4) == 0) {
- if(options[4] != 0) {
+ if (strncmp(options, "sep=", 4) == 0) {
+ if (options[4] != 0) {
separator[0] = options[4];
options += 5;
} else {
- cFYI(1,("Null separator not allowed"));
+ cFYI(1, ("Null separator not allowed"));
}
}
-
+
while ((data = strsep(&options, separator)) != NULL) {
if (!*data)
continue;
if ((value = strchr(data, '=')) != NULL)
*value++ = '\0';
- if (strnicmp(data, "user_xattr",10) == 0) {/*parse before user*/
+ /* Have to parse this before we parse for "user" */
+ if (strnicmp(data, "user_xattr", 10) == 0) {
vol->no_xattr = 0;
- } else if (strnicmp(data, "nouser_xattr",12) == 0) {
+ } else if (strnicmp(data, "nouser_xattr", 12) == 0) {
vol->no_xattr = 1;
} else if (strnicmp(data, "user", 4) == 0) {
if (!value) {
printk(KERN_WARNING
"CIFS: invalid or missing username\n");
return 1; /* needs_arg; */
- } else if(!*value) {
+ } else if (!*value) {
/* null user, ie anonymous, authentication */
vol->nullauth = 1;
}
@@ -843,12 +847,12 @@ cifs_parse_mount_options(char *options, const char *devname,struct smb_vol *vol)
if (!value) {
vol->password = NULL;
continue;
- } else if(value[0] == 0) {
+ } else if (value[0] == 0) {
/* check if string begins with double comma
since that would mean the password really
does start with a comma, and would not
indicate an empty string */
- if(value[1] != separator[0]) {
+ if (value[1] != separator[0]) {
vol->password = NULL;
continue;
}
@@ -857,7 +861,7 @@ cifs_parse_mount_options(char *options, const char *devname,struct smb_vol *vol)
/* removed password length check, NTLM passwords
can be arbitrarily long */
- /* if comma in password, the string will be
+ /* if comma in password, the string will be
prematurely null terminated. Commas in password are
specified across the cifs mount interface by a double
comma ie ,, and a comma used as in other cases ie ','
@@ -867,18 +871,18 @@ cifs_parse_mount_options(char *options, const char *devname,struct smb_vol *vol)
/* NB: password legally can have multiple commas and
the only illegal character in a password is null */
- if ((value[temp_len] == 0) &&
+ if ((value[temp_len] == 0) &&
(value[temp_len+1] == separator[0])) {
/* reinsert comma */
value[temp_len] = separator[0];
- temp_len+=2; /* move after the second comma */
- while(value[temp_len] != 0) {
+ temp_len += 2; /* move after second comma */
+ while (value[temp_len] != 0) {
if (value[temp_len] == separator[0]) {
- if (value[temp_len+1] ==
+ if (value[temp_len+1] ==
separator[0]) {
/* skip second comma */
temp_len++;
- } else {
+ } else {
/* single comma indicating start
of next parm */
break;
@@ -886,24 +890,25 @@ cifs_parse_mount_options(char *options, const char *devname,struct smb_vol *vol)
}
temp_len++;
}
- if(value[temp_len] == 0) {
+ if (value[temp_len] == 0) {
options = NULL;
} else {
value[temp_len] = 0;
/* point option to start of next parm */
options = value + temp_len + 1;
}
- /* go from value to value + temp_len condensing
+ /* go from value to value + temp_len condensing
double commas to singles. Note that this ends up
allocating a few bytes too many, which is ok */
vol->password = kzalloc(temp_len, GFP_KERNEL);
- if(vol->password == NULL) {
- printk("CIFS: no memory for pass\n");
+ if (vol->password == NULL) {
+ printk(KERN_WARNING "CIFS: no memory "
+ "for password\n");
return 1;
}
- for(i=0,j=0;i<temp_len;i++,j++) {
+ for (i = 0, j = 0; i < temp_len; i++, j++) {
vol->password[j] = value[i];
- if(value[i] == separator[0]
+ if (value[i] == separator[0]
&& value[i+1] == separator[0]) {
/* skip second comma */
i++;
@@ -912,8 +917,9 @@ cifs_parse_mount_options(char *options, const char *devname,struct smb_vol *vol)
vol->password[j] = 0;
} else {
vol->password = kzalloc(temp_len+1, GFP_KERNEL);
- if(vol->password == NULL) {
- printk("CIFS: no memory for pass\n");
+ if (vol->password == NULL) {
+ printk(KERN_WARNING "CIFS: no memory "
+ "for password\n");
return 1;
}
strcpy(vol->password, value);
@@ -924,20 +930,21 @@ cifs_parse_mount_options(char *options, const char *devname,struct smb_vol *vol)
} else if (strnlen(value, 35) < 35) {
vol->UNCip = value;
} else {
- printk(KERN_WARNING "CIFS: ip address too long\n");
+ printk(KERN_WARNING "CIFS: ip address "
+ "too long\n");
return 1;
}
- } else if (strnicmp(data, "sec", 3) == 0) {
- if (!value || !*value) {
- cERROR(1,("no security value specified"));
- continue;
- } else if (strnicmp(value, "krb5i", 5) == 0) {
- vol->secFlg |= CIFSSEC_MAY_KRB5 |
+ } else if (strnicmp(data, "sec", 3) == 0) {
+ if (!value || !*value) {
+ cERROR(1, ("no security value specified"));
+ continue;
+ } else if (strnicmp(value, "krb5i", 5) == 0) {
+ vol->secFlg |= CIFSSEC_MAY_KRB5 |
CIFSSEC_MUST_SIGN;
} else if (strnicmp(value, "krb5p", 5) == 0) {
- /* vol->secFlg |= CIFSSEC_MUST_SEAL |
- CIFSSEC_MAY_KRB5; */
- cERROR(1,("Krb5 cifs privacy not supported"));
+ /* vol->secFlg |= CIFSSEC_MUST_SEAL |
+ CIFSSEC_MAY_KRB5; */
+ cERROR(1, ("Krb5 cifs privacy not supported"));
return 1;
} else if (strnicmp(value, "krb5", 4) == 0) {
vol->secFlg |= CIFSSEC_MAY_KRB5;
@@ -957,33 +964,34 @@ cifs_parse_mount_options(char *options, const char *devname,struct smb_vol *vol)
vol->secFlg |= CIFSSEC_MAY_NTLMV2;
#ifdef CONFIG_CIFS_WEAK_PW_HASH
} else if (strnicmp(value, "lanman", 6) == 0) {
- vol->secFlg |= CIFSSEC_MAY_LANMAN;
+ vol->secFlg |= CIFSSEC_MAY_LANMAN;
#endif
} else if (strnicmp(value, "none", 4) == 0) {
vol->nullauth = 1;
- } else {
- cERROR(1,("bad security option: %s", value));
- return 1;
- }
+ } else {
+ cERROR(1, ("bad security option: %s", value));
+ return 1;
+ }
} else if ((strnicmp(data, "unc", 3) == 0)
|| (strnicmp(data, "target", 6) == 0)
|| (strnicmp(data, "path", 4) == 0)) {
if (!value || !*value) {
- printk(KERN_WARNING
- "CIFS: invalid path to network resource\n");
+ printk(KERN_WARNING "CIFS: invalid path to "
+ "network resource\n");
return 1; /* needs_arg; */
}
if ((temp_len = strnlen(value, 300)) < 300) {
- vol->UNC = kmalloc(temp_len+1,GFP_KERNEL);
+ vol->UNC = kmalloc(temp_len+1, GFP_KERNEL);
if (vol->UNC == NULL)
return 1;
- strcpy(vol->UNC,value);
+ strcpy(vol->UNC, value);
if (strncmp(vol->UNC, "//", 2) == 0) {
vol->UNC[0] = '\\';
vol->UNC[1] = '\\';
- } else if (strncmp(vol->UNC, "\\\\", 2) != 0) {
+ } else if (strncmp(vol->UNC, "\\\\", 2) != 0) {
printk(KERN_WARNING
- "CIFS: UNC Path does not begin with // or \\\\ \n");
+ "CIFS: UNC Path does not begin "
+ "with // or \\\\ \n");
return 1;
}
} else {
@@ -1002,43 +1010,47 @@ cifs_parse_mount_options(char *options, const char *devname,struct smb_vol *vol)
vol->domainname = value;
cFYI(1, ("Domain name set"));
} else {
- printk(KERN_WARNING "CIFS: domain name too long\n");
+ printk(KERN_WARNING "CIFS: domain name too "
+ "long\n");
return 1;
}
- } else if (strnicmp(data, "prefixpath", 10) == 0) {
- if (!value || !*value) {
- printk(KERN_WARNING
- "CIFS: invalid path prefix\n");
- return 1; /* needs_arg; */
- }
- if ((temp_len = strnlen(value, 1024)) < 1024) {
+ } else if (strnicmp(data, "prefixpath", 10) == 0) {
+ if (!value || !*value) {
+ printk(KERN_WARNING
+ "CIFS: invalid path prefix\n");
+ return 1; /* needs_argument */
+ }
+ if ((temp_len = strnlen(value, 1024)) < 1024) {
if (value[0] != '/')
temp_len++; /* missing leading slash */
- vol->prepath = kmalloc(temp_len+1,GFP_KERNEL);
- if (vol->prepath == NULL)
- return 1;
+ vol->prepath = kmalloc(temp_len+1, GFP_KERNEL);
+ if (vol->prepath == NULL)
+ return 1;
if (value[0] != '/') {
vol->prepath[0] = '/';
- strcpy(vol->prepath+1,value);
+ strcpy(vol->prepath+1, value);
} else
- strcpy(vol->prepath,value);
- cFYI(1,("prefix path %s",vol->prepath));
- } else {
- printk(KERN_WARNING "CIFS: prefix too long\n");
- return 1;
- }
+ strcpy(vol->prepath, value);
+ cFYI(1, ("prefix path %s", vol->prepath));
+ } else {
+ printk(KERN_WARNING "CIFS: prefix too long\n");
+ return 1;
+ }
} else if (strnicmp(data, "iocharset", 9) == 0) {
if (!value || !*value) {
- printk(KERN_WARNING "CIFS: invalid iocharset specified\n");
+ printk(KERN_WARNING "CIFS: invalid iocharset "
+ "specified\n");
return 1; /* needs_arg; */
}
if (strnlen(value, 65) < 65) {
- if (strnicmp(value,"default",7))
+ if (strnicmp(value, "default", 7))
vol->iocharset = value;
- /* if iocharset not set load_nls_default used by caller */
- cFYI(1, ("iocharset set to %s",value));
+ /* if iocharset not set then load_nls_default
+ is used by caller */
+ cFYI(1, ("iocharset set to %s", value));
} else {
- printk(KERN_WARNING "CIFS: iocharset name too long.\n");
+ printk(KERN_WARNING "CIFS: iocharset name "
+ "too long.\n");
return 1;
}
} else if (strnicmp(data, "uid", 3) == 0) {
@@ -1090,54 +1102,59 @@ cifs_parse_mount_options(char *options, const char *devname,struct smb_vol *vol)
}
} else if (strnicmp(data, "netbiosname", 4) == 0) {
if (!value || !*value || (*value == ' ')) {
- cFYI(1,("invalid (empty) netbiosname specified"));
+ cFYI(1, ("invalid (empty) netbiosname"));
} else {
- memset(vol->source_rfc1001_name,0x20,15);
- for(i=0;i<15;i++) {
- /* BB are there cases in which a comma can be
+ memset(vol->source_rfc1001_name, 0x20, 15);
+ for (i = 0; i < 15; i++) {
+ /* BB are there cases in which a comma can be
valid in this workstation netbios name (and need
special handling)? */
/* We do not uppercase netbiosname for user */
- if (value[i]==0)
+ if (value[i] == 0)
break;
- else
- vol->source_rfc1001_name[i] = value[i];
+ else
+ vol->source_rfc1001_name[i] =
+ value[i];
}
/* The string has 16th byte zero still from
set at top of the function */
- if ((i==15) && (value[i] != 0))
- printk(KERN_WARNING "CIFS: netbiosname longer than 15 truncated.\n");
+ if ((i == 15) && (value[i] != 0))
+ printk(KERN_WARNING "CIFS: netbiosname"
+ " longer than 15 truncated.\n");
}
} else if (strnicmp(data, "servern", 7) == 0) {
/* servernetbiosname specified override *SMBSERVER */
if (!value || !*value || (*value == ' ')) {
- cFYI(1,("empty server netbiosname specified"));
+ cFYI(1, ("empty server netbiosname specified"));
} else {
/* last byte, type, is 0x20 for servr type */
- memset(vol->target_rfc1001_name,0x20,16);
+ memset(vol->target_rfc1001_name, 0x20, 16);
- for(i=0;i<15;i++) {
+ for (i = 0; i < 15; i++) {
/* BB are there cases in which a comma can be
- valid in this workstation netbios name (and need
- special handling)? */
+ valid in this workstation netbios name
+ (and need special handling)? */
- /* user or mount helper must uppercase netbiosname */
- if (value[i]==0)
+ /* user or mount helper must uppercase
+ the netbiosname */
+ if (value[i] == 0)
break;
else
- vol->target_rfc1001_name[i] = value[i];
+ vol->target_rfc1001_name[i] =
+ value[i];
}
/* The string has 16th byte zero still from
set at top of the function */
- if ((i==15) && (value[i] != 0))
- printk(KERN_WARNING "CIFS: server netbiosname longer than 15 truncated.\n");
+ if ((i == 15) && (value[i] != 0))
+ printk(KERN_WARNING "CIFS: server net"
+ "biosname longer than 15 truncated.\n");
}
} else if (strnicmp(data, "credentials", 4) == 0) {
/* ignore */
} else if (strnicmp(data, "version", 3) == 0) {
/* ignore */
- } else if (strnicmp(data, "guest",5) == 0) {
+ } else if (strnicmp(data, "guest", 5) == 0) {
/* ignore */
} else if (strnicmp(data, "rw", 2) == 0) {
vol->rw = TRUE;
@@ -1149,11 +1166,11 @@ cifs_parse_mount_options(char *options, const char *devname,struct smb_vol *vol)
(strnicmp(data, "noauto", 6) == 0) ||
(strnicmp(data, "dev", 3) == 0)) {
/* The mount tool or mount.cifs helper (if present)
- uses these opts to set flags, and the flags are read
- by the kernel vfs layer before we get here (ie
- before read super) so there is no point trying to
- parse these options again and set anything and it
- is ok to just ignore them */
+ uses these opts to set flags, and the flags are read
+ by the kernel vfs layer before we get here (ie
+ before read super) so there is no point trying to
+ parse these options again and set anything and it
+ is ok to just ignore them */
continue;
} else if (strnicmp(data, "ro", 2) == 0) {
vol->rw = FALSE;
@@ -1169,26 +1186,31 @@ cifs_parse_mount_options(char *options, const char *devname,struct smb_vol *vol)
vol->remap = 1;
} else if (strnicmp(data, "nomapchars", 10) == 0) {
vol->remap = 0;
- } else if (strnicmp(data, "sfu", 3) == 0) {
- vol->sfu_emul = 1;
- } else if (strnicmp(data, "nosfu", 5) == 0) {
- vol->sfu_emul = 0;
+ } else if (strnicmp(data, "sfu", 3) == 0) {
+ vol->sfu_emul = 1;
+ } else if (strnicmp(data, "nosfu", 5) == 0) {
+ vol->sfu_emul = 0;
} else if (strnicmp(data, "posixpaths", 10) == 0) {
vol->posix_paths = 1;
} else if (strnicmp(data, "noposixpaths", 12) == 0) {
vol->posix_paths = 0;
- } else if ((strnicmp(data, "nocase", 6) == 0) ||
+ } else if (strnicmp(data, "nounix", 6) == 0) {
+ vol->no_linux_ext = 1;
+ } else if (strnicmp(data, "nolinux", 7) == 0) {
+ vol->no_linux_ext = 1;
+ } else if ((strnicmp(data, "nocase", 6) == 0) ||
(strnicmp(data, "ignorecase", 10) == 0)) {
- vol->nocase = 1;
+ vol->nocase = 1;
} else if (strnicmp(data, "brl", 3) == 0) {
vol->nobrl = 0;
- } else if ((strnicmp(data, "nobrl", 5) == 0) ||
+ } else if ((strnicmp(data, "nobrl", 5) == 0) ||
(strnicmp(data, "nolock", 6) == 0)) {
vol->nobrl = 1;
/* turn off mandatory locking in mode
if remote locking is turned off since the
local vfs will do advisory */
- if(vol->file_mode == (S_IALLUGO & ~(S_ISUID | S_IXGRP)))
+ if (vol->file_mode ==
+ (S_IALLUGO & ~(S_ISUID | S_IXGRP)))
vol->file_mode = S_IALLUGO;
} else if (strnicmp(data, "setuids", 7) == 0) {
vol->setuids = 1;
@@ -1202,55 +1224,61 @@ cifs_parse_mount_options(char *options, const char *devname,struct smb_vol *vol)
vol->intr = 0;
} else if (strnicmp(data, "intr", 4) == 0) {
vol->intr = 1;
- } else if (strnicmp(data, "serverino",7) == 0) {
+ } else if (strnicmp(data, "serverino", 7) == 0) {
vol->server_ino = 1;
- } else if (strnicmp(data, "noserverino",9) == 0) {
+ } else if (strnicmp(data, "noserverino", 9) == 0) {
vol->server_ino = 0;
- } else if (strnicmp(data, "cifsacl",7) == 0) {
+ } else if (strnicmp(data, "cifsacl", 7) == 0) {
vol->cifs_acl = 1;
} else if (strnicmp(data, "nocifsacl", 9) == 0) {
vol->cifs_acl = 0;
- } else if (strnicmp(data, "acl",3) == 0) {
+ } else if (strnicmp(data, "acl", 3) == 0) {
vol->no_psx_acl = 0;
- } else if (strnicmp(data, "noacl",5) == 0) {
+ } else if (strnicmp(data, "noacl", 5) == 0) {
vol->no_psx_acl = 1;
- } else if (strnicmp(data, "sign",4) == 0) {
+ } else if (strnicmp(data, "sign", 4) == 0) {
vol->secFlg |= CIFSSEC_MUST_SIGN;
/* } else if (strnicmp(data, "seal",4) == 0) {
vol->secFlg |= CIFSSEC_MUST_SEAL; */
- } else if (strnicmp(data, "direct",6) == 0) {
+ } else if (strnicmp(data, "direct", 6) == 0) {
vol->direct_io = 1;
- } else if (strnicmp(data, "forcedirectio",13) == 0) {
+ } else if (strnicmp(data, "forcedirectio", 13) == 0) {
vol->direct_io = 1;
- } else if (strnicmp(data, "in6_addr",8) == 0) {
+ } else if (strnicmp(data, "in6_addr", 8) == 0) {
if (!value || !*value) {
vol->in6_addr = NULL;
} else if (strnlen(value, 49) == 48) {
vol->in6_addr = value;
} else {
- printk(KERN_WARNING "CIFS: ip v6 address not 48 characters long\n");
+ printk(KERN_WARNING "CIFS: ip v6 address not "
+ "48 characters long\n");
return 1;
}
} else if (strnicmp(data, "noac", 4) == 0) {
- printk(KERN_WARNING "CIFS: Mount option noac not supported. Instead set /proc/fs/cifs/LookupCacheEnabled to 0\n");
+ printk(KERN_WARNING "CIFS: Mount option noac not "
+ "supported. Instead set "
+ "/proc/fs/cifs/LookupCacheEnabled to 0\n");
} else
- printk(KERN_WARNING "CIFS: Unknown mount option %s\n",data);
+ printk(KERN_WARNING "CIFS: Unknown mount option %s\n",
+ data);
}
if (vol->UNC == NULL) {
if (devname == NULL) {
- printk(KERN_WARNING "CIFS: Missing UNC name for mount target\n");
+ printk(KERN_WARNING "CIFS: Missing UNC name for mount "
+ "target\n");
return 1;
}
if ((temp_len = strnlen(devname, 300)) < 300) {
- vol->UNC = kmalloc(temp_len+1,GFP_KERNEL);
+ vol->UNC = kmalloc(temp_len+1, GFP_KERNEL);
if (vol->UNC == NULL)
return 1;
- strcpy(vol->UNC,devname);
+ strcpy(vol->UNC, devname);
if (strncmp(vol->UNC, "//", 2) == 0) {
vol->UNC[0] = '\\';
vol->UNC[1] = '\\';
} else if (strncmp(vol->UNC, "\\\\", 2) != 0) {
- printk(KERN_WARNING "CIFS: UNC Path does not begin with // or \\\\ \n");
+ printk(KERN_WARNING "CIFS: UNC Path does not "
+ "begin with // or \\\\ \n");
return 1;
}
} else {
@@ -1258,14 +1286,14 @@ cifs_parse_mount_options(char *options, const char *devname,struct smb_vol *vol)
return 1;
}
}
- if(vol->UNCip == NULL)
+ if (vol->UNCip == NULL)
vol->UNCip = &vol->UNC[2];
return 0;
}
static struct cifsSesInfo *
-cifs_find_tcp_session(struct in_addr * target_ip_addr,
+cifs_find_tcp_session(struct in_addr *target_ip_addr,
struct in6_addr *target_ip6_addr,
char *userName, struct TCP_Server_Info **psrvTcp)
{
@@ -1277,19 +1305,25 @@ cifs_find_tcp_session(struct in_addr * target_ip_addr,
list_for_each(tmp, &GlobalSMBSessionList) {
ses = list_entry(tmp, struct cifsSesInfo, cifsSessionList);
if (ses->server) {
- if((target_ip_addr &&
+ if ((target_ip_addr &&
(ses->server->addr.sockAddr.sin_addr.s_addr
== target_ip_addr->s_addr)) || (target_ip6_addr
&& memcmp(&ses->server->addr.sockAddr6.sin6_addr,
- target_ip6_addr,sizeof(*target_ip6_addr)))){
- /* BB lock server and tcp session and increment use count here?? */
- *psrvTcp = ses->server; /* found a match on the TCP session */
+ target_ip6_addr, sizeof(*target_ip6_addr)))) {
+ /* BB lock server and tcp session and increment
+ use count here?? */
+
+ /* found a match on the TCP session */
+ *psrvTcp = ses->server;
+
/* BB check if reconnection needed */
if (strncmp
(ses->userName, userName,
MAX_USERNAME_SIZE) == 0){
read_unlock(&GlobalSMBSeslock);
- return ses; /* found exact match on both tcp and SMB sessions */
+ /* Found exact match on both TCP and
+ SMB sessions */
+ return ses;
}
}
}
@@ -1320,7 +1354,8 @@ find_unc(__be32 new_target_ip_addr, char *uncName, char *userName)
/* BB lock tcon, server and tcp session and increment use count here? */
/* found a match on the TCP session */
/* BB check if reconnection needed */
- cFYI(1,("IP match, old UNC: %s new: %s",
+ cFYI(1,
+ ("IP match, old UNC: %s new: %s",
tcon->treeName, uncName));
if (strncmp
(tcon->treeName, uncName,
@@ -1355,11 +1390,11 @@ connect_to_dfs_path(int xid, struct cifsSesInfo *pSesInfo,
unsigned int num_referrals;
int rc = 0;
- rc = get_dfs_path(xid, pSesInfo,old_path, nls_codepage,
+ rc = get_dfs_path(xid, pSesInfo, old_path, nls_codepage,
&num_referrals, &referrals, remap);
/* BB Add in code to: if valid refrl, if not ip address contact
- the helper that resolves tcp names, mount to it, try to
+ the helper that resolves tcp names, mount to it, try to
tcon to it unmount it if fail */
kfree(referrals);
@@ -1368,10 +1403,9 @@ connect_to_dfs_path(int xid, struct cifsSesInfo *pSesInfo,
}
int
-get_dfs_path(int xid, struct cifsSesInfo *pSesInfo,
- const char *old_path, const struct nls_table *nls_codepage,
- unsigned int *pnum_referrals,
- unsigned char ** preferrals, int remap)
+get_dfs_path(int xid, struct cifsSesInfo *pSesInfo, const char *old_path,
+ const struct nls_table *nls_codepage, unsigned int *pnum_referrals,
+ unsigned char **preferrals, int remap)
{
char *temp_unc;
int rc = 0;
@@ -1380,7 +1414,8 @@ get_dfs_path(int xid, struct cifsSesInfo *pSesInfo,
if (pSesInfo->ipc_tid == 0) {
temp_unc = kmalloc(2 /* for slashes */ +
- strnlen(pSesInfo->serverName,SERVER_NAME_LEN_WITH_NULL * 2)
+ strnlen(pSesInfo->serverName,
+ SERVER_NAME_LEN_WITH_NULL * 2)
+ 1 + 4 /* slash IPC$ */ + 2,
GFP_KERNEL);
if (temp_unc == NULL)
@@ -1391,7 +1426,7 @@ get_dfs_path(int xid, struct cifsSesInfo *pSesInfo,
strcpy(temp_unc + 2 + strlen(pSesInfo->serverName), "\\IPC$");
rc = CIFSTCon(xid, pSesInfo, temp_unc, NULL, nls_codepage);
cFYI(1,
- ("CIFS Tcon rc = %d ipc_tid = %d", rc,pSesInfo->ipc_tid));
+ ("CIFS Tcon rc = %d ipc_tid = %d", rc, pSesInfo->ipc_tid));
kfree(temp_unc);
}
if (rc == 0)
@@ -1402,62 +1437,63 @@ get_dfs_path(int xid, struct cifsSesInfo *pSesInfo,
}
/* See RFC1001 section 14 on representation of Netbios names */
-static void rfc1002mangle(char * target,char * source, unsigned int length)
+static void rfc1002mangle(char *target, char *source, unsigned int length)
{
- unsigned int i,j;
+ unsigned int i, j;
- for(i=0,j=0;i<(length);i++) {
+ for (i = 0, j = 0; i < (length); i++) {
/* mask a nibble at a time and encode */
target[j] = 'A' + (0x0F & (source[i] >> 4));
target[j+1] = 'A' + (0x0F & source[i]);
- j+=2;
+ j += 2;
}
}
static int
-ipv4_connect(struct sockaddr_in *psin_server, struct socket **csocket,
- char * netbios_name, char * target_name)
+ipv4_connect(struct sockaddr_in *psin_server, struct socket **csocket,
+ char *netbios_name, char *target_name)
{
int rc = 0;
int connected = 0;
__be16 orig_port = 0;
- if(*csocket == NULL) {
- rc = sock_create_kern(PF_INET, SOCK_STREAM, IPPROTO_TCP, csocket);
+ if (*csocket == NULL) {
+ rc = sock_create_kern(PF_INET, SOCK_STREAM,
+ IPPROTO_TCP, csocket);
if (rc < 0) {
- cERROR(1, ("Error %d creating socket",rc));
+ cERROR(1, ("Error %d creating socket", rc));
*csocket = NULL;
return rc;
} else {
/* BB other socket options to set KEEPALIVE, NODELAY? */
- cFYI(1,("Socket created"));
- (*csocket)->sk->sk_allocation = GFP_NOFS;
+ cFYI(1, ("Socket created"));
+ (*csocket)->sk->sk_allocation = GFP_NOFS;
}
}
psin_server->sin_family = AF_INET;
- if(psin_server->sin_port) { /* user overrode default port */
+ if (psin_server->sin_port) { /* user overrode default port */
rc = (*csocket)->ops->connect(*csocket,
(struct sockaddr *) psin_server,
- sizeof (struct sockaddr_in),0);
+ sizeof (struct sockaddr_in), 0);
if (rc >= 0)
connected = 1;
- }
+ }
- if(!connected) {
- /* save original port so we can retry user specified port
+ if (!connected) {
+ /* save original port so we can retry user specified port
later if fall back ports fail this time */
orig_port = psin_server->sin_port;
/* do not retry on the same port we just failed on */
- if(psin_server->sin_port != htons(CIFS_PORT)) {
+ if (psin_server->sin_port != htons(CIFS_PORT)) {
psin_server->sin_port = htons(CIFS_PORT);
rc = (*csocket)->ops->connect(*csocket,
(struct sockaddr *) psin_server,
- sizeof (struct sockaddr_in),0);
+ sizeof (struct sockaddr_in), 0);
if (rc >= 0)
connected = 1;
}
@@ -1465,60 +1501,63 @@ ipv4_connect(struct sockaddr_in *psin_server, struct socket **csocket,
if (!connected) {
psin_server->sin_port = htons(RFC1001_PORT);
rc = (*csocket)->ops->connect(*csocket, (struct sockaddr *)
- psin_server, sizeof (struct sockaddr_in),0);
- if (rc >= 0)
+ psin_server,
+ sizeof (struct sockaddr_in), 0);
+ if (rc >= 0)
connected = 1;
}
/* give up here - unless we want to retry on different
protocol families some day */
if (!connected) {
- if(orig_port)
+ if (orig_port)
psin_server->sin_port = orig_port;
- cFYI(1,("Error %d connecting to server via ipv4",rc));
+ cFYI(1, ("Error %d connecting to server via ipv4", rc));
sock_release(*csocket);
*csocket = NULL;
return rc;
}
- /* Eventually check for other socket options to change from
- the default. sock_setsockopt not used because it expects
+ /* Eventually check for other socket options to change from
+ the default. sock_setsockopt not used because it expects
user space buffer */
- cFYI(1,("sndbuf %d rcvbuf %d rcvtimeo 0x%lx",(*csocket)->sk->sk_sndbuf,
+ cFYI(1, ("sndbuf %d rcvbuf %d rcvtimeo 0x%lx",
+ (*csocket)->sk->sk_sndbuf,
(*csocket)->sk->sk_rcvbuf, (*csocket)->sk->sk_rcvtimeo));
(*csocket)->sk->sk_rcvtimeo = 7 * HZ;
/* make the bufsizes depend on wsize/rsize and max requests */
- if((*csocket)->sk->sk_sndbuf < (200 * 1024))
+ if ((*csocket)->sk->sk_sndbuf < (200 * 1024))
(*csocket)->sk->sk_sndbuf = 200 * 1024;
- if((*csocket)->sk->sk_rcvbuf < (140 * 1024))
+ if ((*csocket)->sk->sk_rcvbuf < (140 * 1024))
(*csocket)->sk->sk_rcvbuf = 140 * 1024;
/* send RFC1001 sessinit */
- if(psin_server->sin_port == htons(RFC1001_PORT)) {
+ if (psin_server->sin_port == htons(RFC1001_PORT)) {
/* some servers require RFC1001 sessinit before sending
- negprot - BB check reconnection in case where second
+ negprot - BB check reconnection in case where second
sessinit is sent but no second negprot */
- struct rfc1002_session_packet * ses_init_buf;
- struct smb_hdr * smb_buf;
- ses_init_buf = kzalloc(sizeof(struct rfc1002_session_packet), GFP_KERNEL);
- if(ses_init_buf) {
+ struct rfc1002_session_packet *ses_init_buf;
+ struct smb_hdr *smb_buf;
+ ses_init_buf = kzalloc(sizeof(struct rfc1002_session_packet),
+ GFP_KERNEL);
+ if (ses_init_buf) {
ses_init_buf->trailer.session_req.called_len = 32;
- if(target_name && (target_name[0] != 0)) {
+ if (target_name && (target_name[0] != 0)) {
rfc1002mangle(ses_init_buf->trailer.session_req.called_name,
target_name, 16);
} else {
rfc1002mangle(ses_init_buf->trailer.session_req.called_name,
- DEFAULT_CIFS_CALLED_NAME,16);
+ DEFAULT_CIFS_CALLED_NAME, 16);
}
ses_init_buf->trailer.session_req.calling_len = 32;
/* calling name ends in null (byte 16) from old smb
convention. */
- if(netbios_name && (netbios_name[0] !=0)) {
+ if (netbios_name && (netbios_name[0] != 0)) {
rfc1002mangle(ses_init_buf->trailer.session_req.calling_name,
- netbios_name,16);
+ netbios_name, 16);
} else {
rfc1002mangle(ses_init_buf->trailer.session_req.calling_name,
- "LINUX_CIFS_CLNT",16);
+ "LINUX_CIFS_CLNT", 16);
}
ses_init_buf->trailer.session_req.scope1 = 0;
ses_init_buf->trailer.session_req.scope2 = 0;
@@ -1528,20 +1567,20 @@ ipv4_connect(struct sockaddr_in *psin_server, struct socket **csocket,
rc = smb_send(*csocket, smb_buf, 0x44,
(struct sockaddr *)psin_server);
kfree(ses_init_buf);
- msleep(1); /* RFC1001 layer in at least one server
+ msleep(1); /* RFC1001 layer in at least one server
requires very short break before negprot
presumably because not expecting negprot
to follow so fast. This is a simple
- solution that works without
+ solution that works without
complicating the code and causes no
significant slowing down on mount
for everyone else */
}
- /* else the negprot may still work without this
+ /* else the negprot may still work without this
even though malloc failed */
-
+
}
-
+
return rc;
}
@@ -1552,41 +1591,42 @@ ipv6_connect(struct sockaddr_in6 *psin_server, struct socket **csocket)
int connected = 0;
__be16 orig_port = 0;
- if(*csocket == NULL) {
- rc = sock_create_kern(PF_INET6, SOCK_STREAM, IPPROTO_TCP, csocket);
+ if (*csocket == NULL) {
+ rc = sock_create_kern(PF_INET6, SOCK_STREAM,
+ IPPROTO_TCP, csocket);
if (rc < 0) {
- cERROR(1, ("Error %d creating ipv6 socket",rc));
+ cERROR(1, ("Error %d creating ipv6 socket", rc));
*csocket = NULL;
return rc;
} else {
/* BB other socket options to set KEEPALIVE, NODELAY? */
- cFYI(1,("ipv6 Socket created"));
+ cFYI(1, ("ipv6 Socket created"));
(*csocket)->sk->sk_allocation = GFP_NOFS;
}
}
psin_server->sin6_family = AF_INET6;
- if(psin_server->sin6_port) { /* user overrode default port */
+ if (psin_server->sin6_port) { /* user overrode default port */
rc = (*csocket)->ops->connect(*csocket,
(struct sockaddr *) psin_server,
- sizeof (struct sockaddr_in6),0);
+ sizeof (struct sockaddr_in6), 0);
if (rc >= 0)
connected = 1;
- }
+ }
- if(!connected) {
- /* save original port so we can retry user specified port
+ if (!connected) {
+ /* save original port so we can retry user specified port
later if fall back ports fail this time */
orig_port = psin_server->sin6_port;
/* do not retry on the same port we just failed on */
- if(psin_server->sin6_port != htons(CIFS_PORT)) {
+ if (psin_server->sin6_port != htons(CIFS_PORT)) {
psin_server->sin6_port = htons(CIFS_PORT);
rc = (*csocket)->ops->connect(*csocket,
(struct sockaddr *) psin_server,
- sizeof (struct sockaddr_in6),0);
+ sizeof (struct sockaddr_in6), 0);
if (rc >= 0)
connected = 1;
}
@@ -1594,31 +1634,31 @@ ipv6_connect(struct sockaddr_in6 *psin_server, struct socket **csocket)
if (!connected) {
psin_server->sin6_port = htons(RFC1001_PORT);
rc = (*csocket)->ops->connect(*csocket, (struct sockaddr *)
- psin_server, sizeof (struct sockaddr_in6),0);
- if (rc >= 0)
+ psin_server, sizeof (struct sockaddr_in6), 0);
+ if (rc >= 0)
connected = 1;
}
/* give up here - unless we want to retry on different
protocol families some day */
if (!connected) {
- if(orig_port)
+ if (orig_port)
psin_server->sin6_port = orig_port;
- cFYI(1,("Error %d connecting to server via ipv6",rc));
+ cFYI(1, ("Error %d connecting to server via ipv6", rc));
sock_release(*csocket);
*csocket = NULL;
return rc;
}
- /* Eventually check for other socket options to change from
- the default. sock_setsockopt not used because it expects
+ /* Eventually check for other socket options to change from
+ the default. sock_setsockopt not used because it expects
user space buffer */
(*csocket)->sk->sk_rcvtimeo = 7 * HZ;
-
+
return rc;
}
-void reset_cifs_unix_caps(int xid, struct cifsTconInfo * tcon,
- struct super_block * sb, struct smb_vol * vol_info)
+void reset_cifs_unix_caps(int xid, struct cifsTconInfo *tcon,
+ struct super_block *sb, struct smb_vol *vol_info)
{
/* if we are reconnecting then should we check to see if
* any requested capabilities changed locally e.g. via
@@ -1630,65 +1670,87 @@ void reset_cifs_unix_caps(int xid, struct cifsTconInfo * tcon,
* What if we wanted to mount the server share twice once with
* and once without posixacls or posix paths? */
__u64 saved_cap = le64_to_cpu(tcon->fsUnixInfo.Capability);
-
-
- if(!CIFSSMBQFSUnixInfo(xid, tcon)) {
+
+ if (vol_info && vol_info->no_linux_ext) {
+ tcon->fsUnixInfo.Capability = 0;
+ tcon->unix_ext = 0; /* Unix Extensions disabled */
+ cFYI(1, ("Linux protocol extensions disabled"));
+ return;
+ } else if (vol_info)
+ tcon->unix_ext = 1; /* Unix Extensions supported */
+
+ if (tcon->unix_ext == 0) {
+ cFYI(1, ("Unix extensions disabled so not set on reconnect"));
+ return;
+ }
+
+ if (!CIFSSMBQFSUnixInfo(xid, tcon)) {
__u64 cap = le64_to_cpu(tcon->fsUnixInfo.Capability);
-
+
/* check for reconnect case in which we do not
want to change the mount behavior if we can avoid it */
- if(vol_info == NULL) {
- /* turn off POSIX ACL and PATHNAMES if not set
+ if (vol_info == NULL) {
+ /* turn off POSIX ACL and PATHNAMES if not set
originally at mount time */
if ((saved_cap & CIFS_UNIX_POSIX_ACL_CAP) == 0)
cap &= ~CIFS_UNIX_POSIX_ACL_CAP;
if ((saved_cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) == 0)
cap &= ~CIFS_UNIX_POSIX_PATHNAMES_CAP;
-
-
-
-
}
-
+
cap &= CIFS_UNIX_CAP_MASK;
- if(vol_info && vol_info->no_psx_acl)
+ if (vol_info && vol_info->no_psx_acl)
cap &= ~CIFS_UNIX_POSIX_ACL_CAP;
- else if(CIFS_UNIX_POSIX_ACL_CAP & cap) {
- cFYI(1,("negotiated posix acl support"));
- if(sb)
+ else if (CIFS_UNIX_POSIX_ACL_CAP & cap) {
+ cFYI(1, ("negotiated posix acl support"));
+ if (sb)
sb->s_flags |= MS_POSIXACL;
}
- if(vol_info && vol_info->posix_paths == 0)
+ if (vol_info && vol_info->posix_paths == 0)
cap &= ~CIFS_UNIX_POSIX_PATHNAMES_CAP;
- else if(cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) {
- cFYI(1,("negotiate posix pathnames"));
- if(sb)
- CIFS_SB(sb)->mnt_cifs_flags |=
+ else if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) {
+ cFYI(1, ("negotiate posix pathnames"));
+ if (sb)
+ CIFS_SB(sb)->mnt_cifs_flags |=
CIFS_MOUNT_POSIX_PATHS;
}
-
+
/* We might be setting the path sep back to a different
form if we are reconnecting and the server switched its
- posix path capability for this share */
- if(sb && (CIFS_SB(sb)->prepathlen > 0))
+ posix path capability for this share */
+ if (sb && (CIFS_SB(sb)->prepathlen > 0))
CIFS_SB(sb)->prepath[0] = CIFS_DIR_SEP(CIFS_SB(sb));
-
- cFYI(1,("Negotiate caps 0x%x",(int)cap));
+
+ if (sb && (CIFS_SB(sb)->rsize > 127 * 1024)) {
+ if ((cap & CIFS_UNIX_LARGE_READ_CAP) == 0) {
+ CIFS_SB(sb)->rsize = 127 * 1024;
+#ifdef CONFIG_CIFS_DEBUG2
+ cFYI(1, ("larger reads not supported by srv"));
+#endif
+ }
+ }
+
+
+ cFYI(1, ("Negotiate caps 0x%x", (int)cap));
#ifdef CONFIG_CIFS_DEBUG2
- if(cap & CIFS_UNIX_FCNTL_CAP)
- cFYI(1,("FCNTL cap"));
- if(cap & CIFS_UNIX_EXTATTR_CAP)
- cFYI(1,("EXTATTR cap"));
- if(cap & CIFS_UNIX_POSIX_PATHNAMES_CAP)
- cFYI(1,("POSIX path cap"));
- if(cap & CIFS_UNIX_XATTR_CAP)
- cFYI(1,("XATTR cap"));
- if(cap & CIFS_UNIX_POSIX_ACL_CAP)
- cFYI(1,("POSIX ACL cap"));
+ if (cap & CIFS_UNIX_FCNTL_CAP)
+ cFYI(1, ("FCNTL cap"));
+ if (cap & CIFS_UNIX_EXTATTR_CAP)
+ cFYI(1, ("EXTATTR cap"));
+ if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP)
+ cFYI(1, ("POSIX path cap"));
+ if (cap & CIFS_UNIX_XATTR_CAP)
+ cFYI(1, ("XATTR cap"));
+ if (cap & CIFS_UNIX_POSIX_ACL_CAP)
+ cFYI(1, ("POSIX ACL cap"));
+ if (cap & CIFS_UNIX_LARGE_READ_CAP)
+ cFYI(1, ("very large read cap"));
+ if (cap & CIFS_UNIX_LARGE_WRITE_CAP)
+ cFYI(1, ("very large write cap"));
#endif /* CIFS_DEBUG2 */
if (CIFSSMBSetFSUnixInfo(xid, tcon, cap)) {
- cFYI(1,("setting capabilities failed"));
+ cFYI(1, ("setting capabilities failed"));
}
}
}
@@ -1712,8 +1774,8 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
xid = GetXid();
/* cFYI(1, ("Entering cifs_mount. Xid: %d with: %s", xid, mount_data)); */
-
- memset(&volume_info,0,sizeof(struct smb_vol));
+
+ memset(&volume_info, 0, sizeof(struct smb_vol));
if (cifs_parse_mount_options(mount_data, devname, &volume_info)) {
kfree(volume_info.UNC);
kfree(volume_info.password);
@@ -1723,15 +1785,15 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
}
if (volume_info.nullauth) {
- cFYI(1,("null user"));
+ cFYI(1, ("null user"));
volume_info.username = NULL;
} else if (volume_info.username) {
/* BB fixme parse for domain name here */
- cFYI(1, ("Username: %s ", volume_info.username));
+ cFYI(1, ("Username: %s", volume_info.username));
} else {
cifserror("No username specified");
- /* In userspace mount helper we can get user name from alternate
- locations such as env variables and files on disk */
+ /* In userspace mount helper we can get user name from alternate
+ locations such as env variables and files on disk */
kfree(volume_info.UNC);
kfree(volume_info.password);
kfree(volume_info.prepath);
@@ -1740,18 +1802,20 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
}
if (volume_info.UNCip && volume_info.UNC) {
- rc = cifs_inet_pton(AF_INET, volume_info.UNCip,&sin_server.sin_addr.s_addr);
+ rc = cifs_inet_pton(AF_INET, volume_info.UNCip,
+ &sin_server.sin_addr.s_addr);
- if(rc <= 0) {
+ if (rc <= 0) {
/* not ipv4 address, try ipv6 */
- rc = cifs_inet_pton(AF_INET6,volume_info.UNCip,&sin_server6.sin6_addr.in6_u);
- if(rc > 0)
+ rc = cifs_inet_pton(AF_INET6, volume_info.UNCip,
+ &sin_server6.sin6_addr.in6_u);
+ if (rc > 0)
address_type = AF_INET6;
} else {
address_type = AF_INET;
}
-
- if(rc <= 0) {
+
+ if (rc <= 0) {
/* we failed translating address */
kfree(volume_info.UNC);
kfree(volume_info.password);
@@ -1763,9 +1827,10 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
cFYI(1, ("UNC: %s ip: %s", volume_info.UNC, volume_info.UNCip));
/* success */
rc = 0;
- } else if (volume_info.UNCip){
- /* BB using ip addr as server name connect to the DFS root below */
- cERROR(1,("Connecting to DFS root not implemented yet"));
+ } else if (volume_info.UNCip) {
+ /* BB using ip addr as server name to connect to the
+ DFS root below */
+ cERROR(1, ("Connecting to DFS root not implemented yet"));
kfree(volume_info.UNC);
kfree(volume_info.password);
kfree(volume_info.prepath);
@@ -1773,7 +1838,8 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
return -EINVAL;
} else /* which servers DFS root would we conect to */ {
cERROR(1,
- ("CIFS mount error: No UNC path (e.g. -o unc=//192.168.1.100/public) specified"));
+ ("CIFS mount error: No UNC path (e.g. -o "
+ "unc=//192.168.1.100/public) specified"));
kfree(volume_info.UNC);
kfree(volume_info.password);
kfree(volume_info.prepath);
@@ -1782,13 +1848,14 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
}
/* this is needed for ASCII cp to Unicode converts */
- if(volume_info.iocharset == NULL) {
+ if (volume_info.iocharset == NULL) {
cifs_sb->local_nls = load_nls_default();
/* load_nls_default can not return null */
} else {
cifs_sb->local_nls = load_nls(volume_info.iocharset);
- if(cifs_sb->local_nls == NULL) {
- cERROR(1,("CIFS mount error: iocharset %s not found",volume_info.iocharset));
+ if (cifs_sb->local_nls == NULL) {
+ cERROR(1, ("CIFS mount error: iocharset %s not found",
+ volume_info.iocharset));
kfree(volume_info.UNC);
kfree(volume_info.password);
kfree(volume_info.prepath);
@@ -1797,12 +1864,12 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
}
}
- if(address_type == AF_INET)
+ if (address_type == AF_INET)
existingCifsSes = cifs_find_tcp_session(&sin_server.sin_addr,
NULL /* no ipv6 addr */,
volume_info.username, &srvTcp);
- else if(address_type == AF_INET6) {
- cFYI(1,("looking for ipv6 address"));
+ else if (address_type == AF_INET6) {
+ cFYI(1, ("looking for ipv6 address"));
existingCifsSes = cifs_find_tcp_session(NULL /* no ipv4 addr */,
&sin_server6.sin6_addr,
volume_info.username, &srvTcp);
@@ -1814,26 +1881,25 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
return -EINVAL;
}
-
if (srvTcp) {
- cFYI(1, ("Existing tcp session with server found"));
+ cFYI(1, ("Existing tcp session with server found"));
} else { /* create socket */
if (volume_info.port)
sin_server.sin_port = htons(volume_info.port);
else
sin_server.sin_port = 0;
if (address_type == AF_INET6) {
- cFYI(1,("attempting ipv6 connect"));
+ cFYI(1, ("attempting ipv6 connect"));
/* BB should we allow ipv6 on port 139? */
/* other OS never observed in Wild doing 139 with v6 */
- rc = ipv6_connect(&sin_server6,&csocket);
- } else
- rc = ipv4_connect(&sin_server,&csocket,
+ rc = ipv6_connect(&sin_server6, &csocket);
+ } else
+ rc = ipv4_connect(&sin_server, &csocket,
volume_info.source_rfc1001_name,
volume_info.target_rfc1001_name);
if (rc < 0) {
- cERROR(1,
- ("Error connecting to IPv4 socket. Aborting operation"));
+ cERROR(1, ("Error connecting to IPv4 socket. "
+ "Aborting operation"));
if (csocket != NULL)
sock_release(csocket);
kfree(volume_info.UNC);
@@ -1854,8 +1920,9 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
return rc;
} else {
memset(srvTcp, 0, sizeof (struct TCP_Server_Info));
- memcpy(&srvTcp->addr.sockAddr, &sin_server, sizeof (struct sockaddr_in));
- atomic_set(&srvTcp->inFlight,0);
+ memcpy(&srvTcp->addr.sockAddr, &sin_server,
+ sizeof (struct sockaddr_in));
+ atomic_set(&srvTcp->inFlight, 0);
/* BB Add code for ipv6 case too */
srvTcp->ssocket = csocket;
srvTcp->protocolType = IPV4;
@@ -1870,7 +1937,7 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
srvTcp->tsk = kthread_run((void *)(void *)cifs_demultiplex_thread, srvTcp, "cifsd");
if ( IS_ERR(srvTcp->tsk) ) {
rc = PTR_ERR(srvTcp->tsk);
- cERROR(1,("error %d create cifsd thread", rc));
+ cERROR(1, ("error %d create cifsd thread", rc));
srvTcp->tsk = NULL;
sock_release(csocket);
kfree(volume_info.UNC);
@@ -1881,8 +1948,10 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
}
wait_for_completion(&cifsd_complete);
rc = 0;
- memcpy(srvTcp->workstation_RFC1001_name, volume_info.source_rfc1001_name,16);
- memcpy(srvTcp->server_RFC1001_name, volume_info.target_rfc1001_name,16);
+ memcpy(srvTcp->workstation_RFC1001_name,
+ volume_info.source_rfc1001_name, 16);
+ memcpy(srvTcp->server_RFC1001_name,
+ volume_info.target_rfc1001_name, 16);
srvTcp->sequence_number = 0;
}
}
@@ -1903,16 +1972,17 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
NIPQUAD(sin_server.sin_addr.s_addr));
}
- if (!rc){
- /* volume_info.password freed at unmount */
+ if (!rc) {
+ /* volume_info.password freed at unmount */
if (volume_info.password)
pSesInfo->password = volume_info.password;
if (volume_info.username)
strncpy(pSesInfo->userName,
- volume_info.username,MAX_USERNAME_SIZE);
+ volume_info.username,
+ MAX_USERNAME_SIZE);
if (volume_info.domainname) {
int len = strlen(volume_info.domainname);
- pSesInfo->domainName =
+ pSesInfo->domainName =
kmalloc(len + 1, GFP_KERNEL);
if (pSesInfo->domainName)
strcpy(pSesInfo->domainName,
@@ -1922,46 +1992,48 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
pSesInfo->overrideSecFlg = volume_info.secFlg;
down(&pSesInfo->sesSem);
/* BB FIXME need to pass vol->secFlgs BB */
- rc = cifs_setup_session(xid,pSesInfo, cifs_sb->local_nls);
+ rc = cifs_setup_session(xid, pSesInfo,
+ cifs_sb->local_nls);
up(&pSesInfo->sesSem);
if (!rc)
atomic_inc(&srvTcp->socketUseCount);
} else
kfree(volume_info.password);
}
-
+
/* search for existing tcon to this server share */
if (!rc) {
if (volume_info.rsize > CIFSMaxBufSize) {
- cERROR(1,("rsize %d too large, using MaxBufSize",
+ cERROR(1, ("rsize %d too large, using MaxBufSize",
volume_info.rsize));
cifs_sb->rsize = CIFSMaxBufSize;
- } else if((volume_info.rsize) && (volume_info.rsize <= CIFSMaxBufSize))
+ } else if ((volume_info.rsize) &&
+ (volume_info.rsize <= CIFSMaxBufSize))
cifs_sb->rsize = volume_info.rsize;
else /* default */
cifs_sb->rsize = CIFSMaxBufSize;
if (volume_info.wsize > PAGEVEC_SIZE * PAGE_CACHE_SIZE) {
- cERROR(1,("wsize %d too large using 4096 instead",
+ cERROR(1, ("wsize %d too large, using 4096 instead",
volume_info.wsize));
cifs_sb->wsize = 4096;
} else if (volume_info.wsize)
cifs_sb->wsize = volume_info.wsize;
else
- cifs_sb->wsize =
+ cifs_sb->wsize =
min_t(const int, PAGEVEC_SIZE * PAGE_CACHE_SIZE,
127*1024);
/* old default of CIFSMaxBufSize was too small now
- that SMB Write2 can send multiple pages in kvec.
+ that SMB Write2 can send multiple pages in kvec.
RFC1001 does not describe what happens when frame
bigger than 128K is sent so use that as max in
conjunction with 52K kvec constraint on arch with 4K
page size */
if (cifs_sb->rsize < 2048) {
- cifs_sb->rsize = 2048;
+ cifs_sb->rsize = 2048;
/* Windows ME may prefer this */
- cFYI(1,("readsize set to minimum 2048"));
+ cFYI(1, ("readsize set to minimum: 2048"));
}
/* calculate prepath */
cifs_sb->prepath = volume_info.prepath;
@@ -1969,14 +2041,14 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
cifs_sb->prepathlen = strlen(cifs_sb->prepath);
cifs_sb->prepath[0] = CIFS_DIR_SEP(cifs_sb);
volume_info.prepath = NULL;
- } else
+ } else
cifs_sb->prepathlen = 0;
cifs_sb->mnt_uid = volume_info.linux_uid;
cifs_sb->mnt_gid = volume_info.linux_gid;
cifs_sb->mnt_file_mode = volume_info.file_mode;
cifs_sb->mnt_dir_mode = volume_info.dir_mode;
- cFYI(1,("file mode: 0x%x dir mode: 0x%x",
- cifs_sb->mnt_file_mode,cifs_sb->mnt_dir_mode));
+ cFYI(1, ("file mode: 0x%x dir mode: 0x%x",
+ cifs_sb->mnt_file_mode, cifs_sb->mnt_dir_mode));
if (volume_info.noperm)
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_PERM;
@@ -1999,7 +2071,7 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
if (volume_info.override_gid)
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_OVERR_GID;
if (volume_info.direct_io) {
- cFYI(1,("mounting share using direct i/o"));
+ cFYI(1, ("mounting share using direct i/o"));
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_DIRECT_IO;
}
@@ -2010,7 +2082,7 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
cFYI(1, ("Found match on UNC path"));
/* we can have only one retry value for a connection
to a share so for resources mounted more than once
- to the same server share the last value passed in
+ to the same server share the last value passed in
for the retry flag is used */
tcon->retry = volume_info.retry;
tcon->nocase = volume_info.nocase;
@@ -2019,17 +2091,17 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
if (tcon == NULL)
rc = -ENOMEM;
else {
- /* check for null share name ie connecting to
+ /* check for null share name ie connecting to
* dfs root */
- /* BB check if this works for exactly length
+ /* BB check if this works for exactly length
* three strings */
if ((strchr(volume_info.UNC + 3, '\\') == NULL)
&& (strchr(volume_info.UNC + 3, '/') ==
NULL)) {
rc = connect_to_dfs_path(xid, pSesInfo,
"", cifs_sb->local_nls,
- cifs_sb->mnt_cifs_flags &
+ cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
kfree(volume_info.UNC);
FreeXid(xid);
@@ -2038,7 +2110,7 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
/* BB Do we need to wrap sesSem around
* this TCon call and Unix SetFS as
* we do on SessSetup and reconnect? */
- rc = CIFSTCon(xid, pSesInfo,
+ rc = CIFSTCon(xid, pSesInfo,
volume_info.UNC,
tcon, cifs_sb->local_nls);
cFYI(1, ("CIFS Tcon rc = %d", rc));
@@ -2075,9 +2147,9 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
always wake up processes blocked in
tcp in recv_mesg then we could remove the
send_sig call */
- send_sig(SIGKILL,srvTcp->tsk,1);
+ force_sig(SIGKILL, srvTcp->tsk);
tsk = srvTcp->tsk;
- if(tsk)
+ if (tsk)
kthread_stop(tsk);
}
}
@@ -2086,15 +2158,17 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
tconInfoFree(tcon);
if (existingCifsSes == NULL) {
if (pSesInfo) {
- if ((pSesInfo->server) &&
+ if ((pSesInfo->server) &&
(pSesInfo->status == CifsGood)) {
int temp_rc;
temp_rc = CIFSSMBLogoff(xid, pSesInfo);
/* if the socketUseCount is now zero */
if ((temp_rc == -ESHUTDOWN) &&
- (pSesInfo->server) && (pSesInfo->server->tsk)) {
+ (pSesInfo->server) &&
+ (pSesInfo->server->tsk)) {
struct task_struct *tsk;
- send_sig(SIGKILL,pSesInfo->server->tsk,1);
+ force_sig(SIGKILL,
+ pSesInfo->server->tsk);
tsk = pSesInfo->server->tsk;
if (tsk)
kthread_stop(tsk);
@@ -2113,19 +2187,29 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
/* do not care if following two calls succeed - informational */
CIFSSMBQFSDeviceInfo(xid, tcon);
CIFSSMBQFSAttributeInfo(xid, tcon);
-
+
/* tell server which Unix caps we support */
if (tcon->ses->capabilities & CAP_UNIX)
+ /* reset of caps checks mount to see if unix extensions
+ disabled for just this mount */
reset_cifs_unix_caps(xid, tcon, sb, &volume_info);
-
+ else
+ tcon->unix_ext = 0; /* server does not support them */
+
+ if ((tcon->unix_ext == 0) && (cifs_sb->rsize > (1024 * 127))) {
+ cifs_sb->rsize = 1024 * 127;
+#ifdef CONFIG_CIFS_DEBUG2
+ cFYI(1, ("no very large read support, rsize now 127K"));
+#endif
+ }
if (!(tcon->ses->capabilities & CAP_LARGE_WRITE_X))
cifs_sb->wsize = min(cifs_sb->wsize,
(tcon->ses->server->maxBuf -
MAX_CIFS_HDR_SIZE));
if (!(tcon->ses->capabilities & CAP_LARGE_READ_X))
- cifs_sb->rsize = min(cifs_sb->rsize,
- (tcon->ses->server->maxBuf -
- MAX_CIFS_HDR_SIZE));
+ cifs_sb->rsize = min(cifs_sb->rsize,
+ (tcon->ses->server->maxBuf -
+ MAX_CIFS_HDR_SIZE));
}
/* volume_info.password is freed above when existing session found
@@ -2178,7 +2262,8 @@ CIFSSessSetup(unsigned int xid, struct cifsSesInfo *ses,
pSMB->req_no_secext.MaxBufferSize = cpu_to_le16(ses->server->maxBuf);
pSMB->req_no_secext.MaxMpxCount = cpu_to_le16(ses->server->maxReq);
- if(ses->server->secMode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
+ if (ses->server->secMode &
+ (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
smb_buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
capabilities = CAP_LARGE_FILES | CAP_NT_SMBS | CAP_LEVEL_II_OPLOCKS |
@@ -2197,7 +2282,7 @@ CIFSSessSetup(unsigned int xid, struct cifsSesInfo *ses,
}
pSMB->req_no_secext.Capabilities = cpu_to_le32(capabilities);
- pSMB->req_no_secext.CaseInsensitivePasswordLength =
+ pSMB->req_no_secext.CaseInsensitivePasswordLength =
cpu_to_le16(CIFS_SESS_KEY_SIZE);
pSMB->req_no_secext.CaseSensitivePasswordLength =
@@ -2215,9 +2300,9 @@ CIFSSessSetup(unsigned int xid, struct cifsSesInfo *ses,
}
if (user == NULL)
bytes_returned = 0; /* skip null user */
- else
+ else
bytes_returned =
- cifs_strtoUCS((__le16 *) bcc_ptr, user, 100,
+ cifs_strtoUCS((__le16 *) bcc_ptr, user, 100,
nls_codepage);
/* convert number of 16 bit words to bytes */
bcc_ptr += 2 * bytes_returned;
@@ -2247,7 +2332,7 @@ CIFSSessSetup(unsigned int xid, struct cifsSesInfo *ses,
bcc_ptr += 2 * bytes_returned;
bcc_ptr += 2;
} else {
- if (user != NULL) {
+ if (user != NULL) {
strncpy(bcc_ptr, user, 200);
bcc_ptr += strnlen(user, 200);
}
@@ -2282,11 +2367,12 @@ CIFSSessSetup(unsigned int xid, struct cifsSesInfo *ses,
__u16 action = le16_to_cpu(pSMBr->resp.Action);
__u16 blob_len = le16_to_cpu(pSMBr->resp.SecurityBlobLength);
if (action & GUEST_LOGIN)
- cFYI(1, (" Guest login")); /* do we want to mark SesInfo struct ? */
- ses->Suid = smb_buffer_response->Uid; /* UID left in wire format (le) */
+ cFYI(1, (" Guest login")); /* BB mark SesInfo struct? */
+ ses->Suid = smb_buffer_response->Uid; /* UID left in wire format
+ (little endian) */
cFYI(1, ("UID = %d ", ses->Suid));
- /* response can have either 3 or 4 word count - Samba sends 3 */
- bcc_ptr = pByteArea(smb_buffer_response);
+ /* response can have either 3 or 4 word count - Samba sends 3 */
+ bcc_ptr = pByteArea(smb_buffer_response);
if ((pSMBr->resp.hdr.WordCount == 3)
|| ((pSMBr->resp.hdr.WordCount == 4)
&& (blob_len < pSMBr->resp.ByteCount))) {
@@ -2296,8 +2382,10 @@ CIFSSessSetup(unsigned int xid, struct cifsSesInfo *ses,
if (smb_buffer->Flags2 & SMBFLG2_UNICODE) {
if ((long) (bcc_ptr) % 2) {
remaining_words =
- (BCC(smb_buffer_response) - 1) /2;
- bcc_ptr++; /* Unicode strings must be word aligned */
+ (BCC(smb_buffer_response) - 1) / 2;
+ /* Unicode strings must be word
+ aligned */
+ bcc_ptr++;
} else {
remaining_words =
BCC(smb_buffer_response) / 2;
@@ -2308,13 +2396,15 @@ CIFSSessSetup(unsigned int xid, struct cifsSesInfo *ses,
/* We look for obvious messed up bcc or strings in response so we do not go off
the end since (at least) WIN2K and Windows XP have a major bug in not null
terminating last Unicode string in response */
- if(ses->serverOS)
+ if (ses->serverOS)
kfree(ses->serverOS);
- ses->serverOS = kzalloc(2 * (len + 1), GFP_KERNEL);
- if(ses->serverOS == NULL)
+ ses->serverOS = kzalloc(2 * (len + 1),
+ GFP_KERNEL);
+ if (ses->serverOS == NULL)
goto sesssetup_nomem;
cifs_strfromUCS_le(ses->serverOS,
- (__le16 *)bcc_ptr, len,nls_codepage);
+ (__le16 *)bcc_ptr,
+ len, nls_codepage);
bcc_ptr += 2 * (len + 1);
remaining_words -= len + 1;
ses->serverOS[2 * len] = 0;
@@ -2323,42 +2413,49 @@ CIFSSessSetup(unsigned int xid, struct cifsSesInfo *ses,
len = UniStrnlen((wchar_t *)bcc_ptr,
remaining_words-1);
kfree(ses->serverNOS);
- ses->serverNOS = kzalloc(2 * (len + 1),GFP_KERNEL);
- if(ses->serverNOS == NULL)
+ ses->serverNOS = kzalloc(2 * (len + 1),
+ GFP_KERNEL);
+ if (ses->serverNOS == NULL)
goto sesssetup_nomem;
cifs_strfromUCS_le(ses->serverNOS,
- (__le16 *)bcc_ptr,len,nls_codepage);
+ (__le16 *)bcc_ptr,
+ len, nls_codepage);
bcc_ptr += 2 * (len + 1);
ses->serverNOS[2 * len] = 0;
ses->serverNOS[1 + (2 * len)] = 0;
- if(strncmp(ses->serverNOS,
- "NT LAN Manager 4",16) == 0) {
- cFYI(1,("NT4 server"));
+ if (strncmp(ses->serverNOS,
+ "NT LAN Manager 4", 16) == 0) {
+ cFYI(1, ("NT4 server"));
ses->flags |= CIFS_SES_NT4;
}
remaining_words -= len + 1;
if (remaining_words > 0) {
len = UniStrnlen((wchar_t *) bcc_ptr, remaining_words);
- /* last string is not always null terminated (for e.g. for Windows XP & 2000) */
- if(ses->serverDomain)
+ /* last string is not always null terminated
+ (for e.g. for Windows XP & 2000) */
+ if (ses->serverDomain)
kfree(ses->serverDomain);
ses->serverDomain =
- kzalloc(2*(len+1),GFP_KERNEL);
- if(ses->serverDomain == NULL)
+ kzalloc(2*(len+1),
+ GFP_KERNEL);
+ if (ses->serverDomain == NULL)
goto sesssetup_nomem;
cifs_strfromUCS_le(ses->serverDomain,
- (__le16 *)bcc_ptr,len,nls_codepage);
+ (__le16 *)bcc_ptr,
+ len, nls_codepage);
bcc_ptr += 2 * (len + 1);
ses->serverDomain[2*len] = 0;
ses->serverDomain[1+(2*len)] = 0;
- } /* else no more room so create dummy domain string */
- else {
- if(ses->serverDomain)
+ } else { /* else no more room so create
+ dummy domain string */
+ if (ses->serverDomain)
kfree(ses->serverDomain);
- ses->serverDomain =
+ ses->serverDomain =
kzalloc(2, GFP_KERNEL);
}
- } else { /* no room so create dummy domain and NOS string */
+ } else { /* no room so create dummy domain
+ and NOS string */
+
/* if these kcallocs fail not much we
can do, but better to not fail the
sesssetup itself */
@@ -2375,19 +2472,22 @@ CIFSSessSetup(unsigned int xid, struct cifsSesInfo *ses,
pByteArea(smb_buffer_response)
<= BCC(smb_buffer_response)) {
kfree(ses->serverOS);
- ses->serverOS = kzalloc(len + 1,GFP_KERNEL);
- if(ses->serverOS == NULL)
+ ses->serverOS = kzalloc(len + 1,
+ GFP_KERNEL);
+ if (ses->serverOS == NULL)
goto sesssetup_nomem;
- strncpy(ses->serverOS,bcc_ptr, len);
+ strncpy(ses->serverOS, bcc_ptr, len);
bcc_ptr += len;
- bcc_ptr[0] = 0; /* null terminate the string */
+ /* null terminate the string */
+ bcc_ptr[0] = 0;
bcc_ptr++;
len = strnlen(bcc_ptr, 1024);
kfree(ses->serverNOS);
- ses->serverNOS = kzalloc(len + 1,GFP_KERNEL);
- if(ses->serverNOS == NULL)
+ ses->serverNOS = kzalloc(len + 1,
+ GFP_KERNEL);
+ if (ses->serverNOS == NULL)
goto sesssetup_nomem;
strncpy(ses->serverNOS, bcc_ptr, len);
bcc_ptr += len;
@@ -2395,23 +2495,27 @@ CIFSSessSetup(unsigned int xid, struct cifsSesInfo *ses,
bcc_ptr++;
len = strnlen(bcc_ptr, 1024);
- if(ses->serverDomain)
+ if (ses->serverDomain)
kfree(ses->serverDomain);
- ses->serverDomain = kzalloc(len + 1,GFP_KERNEL);
- if(ses->serverDomain == NULL)
+ ses->serverDomain = kzalloc(len + 1,
+ GFP_KERNEL);
+ if (ses->serverDomain == NULL)
goto sesssetup_nomem;
- strncpy(ses->serverDomain, bcc_ptr, len);
+ strncpy(ses->serverDomain, bcc_ptr,
+ len);
bcc_ptr += len;
bcc_ptr[0] = 0;
bcc_ptr++;
} else
cFYI(1,
- ("Variable field of length %d extends beyond end of smb ",
+ ("Variable field of length %d "
+ "extends beyond end of smb ",
len));
}
} else {
cERROR(1,
- (" Security Blob Length extends beyond end of SMB"));
+ (" Security Blob Length extends beyond "
+ "end of SMB"));
}
} else {
cERROR(1,
@@ -2430,7 +2534,7 @@ sesssetup_nomem: /* do not return an error on nomem for the info strings,
static int
CIFSNTLMSSPNegotiateSessSetup(unsigned int xid,
- struct cifsSesInfo *ses, int * pNTLMv2_flag,
+ struct cifsSesInfo *ses, int *pNTLMv2_flag,
const struct nls_table *nls_codepage)
{
struct smb_hdr *smb_buffer;
@@ -2450,7 +2554,7 @@ CIFSNTLMSSPNegotiateSessSetup(unsigned int xid,
__u16 count;
cFYI(1, ("In NTLMSSP sesssetup (negotiate)"));
- if(ses == NULL)
+ if (ses == NULL)
return -EINVAL;
domain = ses->domainName;
*pNTLMv2_flag = FALSE;
@@ -2474,7 +2578,7 @@ CIFSNTLMSSPNegotiateSessSetup(unsigned int xid,
pSMB->req.MaxBufferSize = cpu_to_le16(ses->server->maxBuf);
pSMB->req.MaxMpxCount = cpu_to_le16(ses->server->maxReq);
- if(ses->server->secMode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
+ if (ses->server->secMode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
smb_buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
capabilities = CAP_LARGE_FILES | CAP_NT_SMBS | CAP_LEVEL_II_OPLOCKS |
@@ -2502,9 +2606,9 @@ CIFSNTLMSSPNegotiateSessSetup(unsigned int xid,
NTLMSSP_REQUEST_TARGET | NTLMSSP_NEGOTIATE_NTLM |
NTLMSSP_NEGOTIATE_56 |
/* NTLMSSP_NEGOTIATE_ALWAYS_SIGN | */ NTLMSSP_NEGOTIATE_128;
- if(sign_CIFS_PDUs)
+ if (sign_CIFS_PDUs)
negotiate_flags |= NTLMSSP_NEGOTIATE_SIGN;
-/* if(ntlmv2_support)
+/* if (ntlmv2_support)
negotiate_flags |= NTLMSSP_NEGOTIATE_NTLMV2;*/
/* setup pointers to domain name and workstation name */
bcc_ptr += SecurityBlobLength;
@@ -2574,11 +2678,11 @@ CIFSNTLMSSPNegotiateSessSetup(unsigned int xid,
__u16 blob_len = le16_to_cpu(pSMBr->resp.SecurityBlobLength);
if (action & GUEST_LOGIN)
- cFYI(1, (" Guest login"));
- /* Do we want to set anything in SesInfo struct when guest login? */
+ cFYI(1, (" Guest login"));
+ /* Do we want to set anything in SesInfo struct when guest login? */
- bcc_ptr = pByteArea(smb_buffer_response);
- /* response can have either 3 or 4 word count - Samba sends 3 */
+ bcc_ptr = pByteArea(smb_buffer_response);
+ /* response can have either 3 or 4 word count - Samba sends 3 */
SecurityBlob2 = (PCHALLENGE_MESSAGE) bcc_ptr;
if (SecurityBlob2->MessageType != NtLmChallenge) {
@@ -2586,7 +2690,7 @@ CIFSNTLMSSPNegotiateSessSetup(unsigned int xid,
("Unexpected NTLMSSP message type received %d",
SecurityBlob2->MessageType));
} else if (ses) {
- ses->Suid = smb_buffer_response->Uid; /* UID left in le format */
+ ses->Suid = smb_buffer_response->Uid; /* UID left in le format */
cFYI(1, ("UID = %d", ses->Suid));
if ((pSMBr->resp.hdr.WordCount == 3)
|| ((pSMBr->resp.hdr.WordCount == 4)
@@ -2604,18 +2708,18 @@ CIFSNTLMSSPNegotiateSessSetup(unsigned int xid,
memcpy(ses->server->cryptKey,
SecurityBlob2->Challenge,
CIFS_CRYPTO_KEY_SIZE);
- if(SecurityBlob2->NegotiateFlags &
+ if (SecurityBlob2->NegotiateFlags &
cpu_to_le32(NTLMSSP_NEGOTIATE_NTLMV2))
*pNTLMv2_flag = TRUE;
- if((SecurityBlob2->NegotiateFlags &
- cpu_to_le32(NTLMSSP_NEGOTIATE_ALWAYS_SIGN))
+ if ((SecurityBlob2->NegotiateFlags &
+ cpu_to_le32(NTLMSSP_NEGOTIATE_ALWAYS_SIGN))
|| (sign_CIFS_PDUs > 1))
- ses->server->secMode |=
- SECMODE_SIGN_REQUIRED;
- if ((SecurityBlob2->NegotiateFlags &
+ ses->server->secMode |=
+ SECMODE_SIGN_REQUIRED;
+ if ((SecurityBlob2->NegotiateFlags &
cpu_to_le32(NTLMSSP_NEGOTIATE_SIGN)) && (sign_CIFS_PDUs))
- ses->server->secMode |=
+ ses->server->secMode |=
SECMODE_SIGN_ENABLED;
if (smb_buffer->Flags2 & SMBFLG2_UNICODE) {
@@ -2623,7 +2727,8 @@ CIFSNTLMSSPNegotiateSessSetup(unsigned int xid,
remaining_words =
(BCC(smb_buffer_response)
- 1) / 2;
- bcc_ptr++; /* Unicode strings must be word aligned */
+ /* Must word align unicode strings */
+ bcc_ptr++;
} else {
remaining_words =
BCC
@@ -2635,7 +2740,7 @@ CIFSNTLMSSPNegotiateSessSetup(unsigned int xid,
/* We look for obvious messed up bcc or strings in response so we do not go off
the end since (at least) WIN2K and Windows XP have a major bug in not null
terminating last Unicode string in response */
- if(ses->serverOS)
+ if (ses->serverOS)
kfree(ses->serverOS);
ses->serverOS =
kzalloc(2 * (len + 1), GFP_KERNEL);
@@ -2668,8 +2773,9 @@ CIFSNTLMSSPNegotiateSessSetup(unsigned int xid,
(2 * len)] = 0;
remaining_words -= len + 1;
if (remaining_words > 0) {
- len = UniStrnlen((wchar_t *) bcc_ptr, remaining_words);
- /* last string is not always null terminated (for e.g. for Windows XP & 2000) */
+ len = UniStrnlen((wchar_t *) bcc_ptr, remaining_words);
+ /* last string not always null terminated
+ (for e.g. for Windows XP & 2000) */
kfree(ses->serverDomain);
ses->serverDomain =
kzalloc(2 *
@@ -2707,7 +2813,7 @@ CIFSNTLMSSPNegotiateSessSetup(unsigned int xid,
if (((long) bcc_ptr + len) - (long)
pByteArea(smb_buffer_response)
<= BCC(smb_buffer_response)) {
- if(ses->serverOS)
+ if (ses->serverOS)
kfree(ses->serverOS);
ses->serverOS =
kzalloc(len + 1,
@@ -2734,18 +2840,20 @@ CIFSNTLMSSPNegotiateSessSetup(unsigned int xid,
ses->serverDomain =
kzalloc(len + 1,
GFP_KERNEL);
- strncpy(ses->serverDomain, bcc_ptr, len);
+ strncpy(ses->serverDomain,
+ bcc_ptr, len);
bcc_ptr += len;
bcc_ptr[0] = 0;
bcc_ptr++;
} else
cFYI(1,
- ("Variable field of length %d extends beyond end of smb",
+ ("field of length %d "
+ "extends beyond end of smb",
len));
}
} else {
- cERROR(1,
- (" Security Blob Length extends beyond end of SMB"));
+ cERROR(1, ("Security Blob Length extends beyond"
+ " end of SMB"));
}
} else {
cERROR(1, ("No session structure passed in."));
@@ -2784,7 +2892,7 @@ CIFSNTLMSSPAuthSessSetup(unsigned int xid, struct cifsSesInfo *ses,
__u16 count;
cFYI(1, ("In NTLMSSPSessSetup (Authenticate)"));
- if(ses == NULL)
+ if (ses == NULL)
return -EINVAL;
user = ses->userName;
domain = ses->domainName;
@@ -2809,7 +2917,7 @@ CIFSNTLMSSPAuthSessSetup(unsigned int xid, struct cifsSesInfo *ses,
pSMB->req.hdr.Uid = ses->Suid;
- if(ses->server->secMode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
+ if (ses->server->secMode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
smb_buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
capabilities = CAP_LARGE_FILES | CAP_NT_SMBS | CAP_LEVEL_II_OPLOCKS |
@@ -2833,13 +2941,13 @@ CIFSNTLMSSPAuthSessSetup(unsigned int xid, struct cifsSesInfo *ses,
strncpy(SecurityBlob->Signature, NTLMSSP_SIGNATURE, 8);
SecurityBlob->MessageType = NtLmAuthenticate;
bcc_ptr += SecurityBlobLength;
- negotiate_flags =
+ negotiate_flags =
NTLMSSP_NEGOTIATE_UNICODE | NTLMSSP_REQUEST_TARGET |
NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_TARGET_INFO |
0x80000000 | NTLMSSP_NEGOTIATE_128;
- if(sign_CIFS_PDUs)
+ if (sign_CIFS_PDUs)
negotiate_flags |= /* NTLMSSP_NEGOTIATE_ALWAYS_SIGN |*/ NTLMSSP_NEGOTIATE_SIGN;
- if(ntlmv2_flag)
+ if (ntlmv2_flag)
negotiate_flags |= NTLMSSP_NEGOTIATE_NTLMV2;
/* setup pointers to domain name and workstation name */
@@ -2903,13 +3011,17 @@ CIFSNTLMSSPAuthSessSetup(unsigned int xid, struct cifsSesInfo *ses,
cpu_to_le16(len);
}
- /* SecurityBlob->WorkstationName.Length = cifs_strtoUCS((__le16 *) bcc_ptr, "AMACHINE",64, nls_codepage);
+ /* SecurityBlob->WorkstationName.Length =
+ cifs_strtoUCS((__le16 *) bcc_ptr, "AMACHINE",64, nls_codepage);
SecurityBlob->WorkstationName.Length *= 2;
- SecurityBlob->WorkstationName.MaximumLength = cpu_to_le16(SecurityBlob->WorkstationName.Length);
- SecurityBlob->WorkstationName.Buffer = cpu_to_le32(SecurityBlobLength);
+ SecurityBlob->WorkstationName.MaximumLength =
+ cpu_to_le16(SecurityBlob->WorkstationName.Length);
+ SecurityBlob->WorkstationName.Buffer =
+ cpu_to_le32(SecurityBlobLength);
bcc_ptr += SecurityBlob->WorkstationName.Length;
SecurityBlobLength += SecurityBlob->WorkstationName.Length;
- SecurityBlob->WorkstationName.Length = cpu_to_le16(SecurityBlob->WorkstationName.Length); */
+ SecurityBlob->WorkstationName.Length =
+ cpu_to_le16(SecurityBlob->WorkstationName.Length); */
if ((long) bcc_ptr % 2) {
*bcc_ptr = 0;
@@ -2995,17 +3107,20 @@ CIFSNTLMSSPAuthSessSetup(unsigned int xid, struct cifsSesInfo *ses,
__u16 blob_len =
le16_to_cpu(pSMBr->resp.SecurityBlobLength);
if (action & GUEST_LOGIN)
- cFYI(1, (" Guest login")); /* BB do we want to set anything in SesInfo struct ? */
-/* if(SecurityBlob2->MessageType != NtLm??){
- cFYI("Unexpected message type on auth response is %d "));
- } */
+ cFYI(1, (" Guest login")); /* BB Should we set anything
+ in SesInfo struct ? */
+/* if (SecurityBlob2->MessageType != NtLm??) {
+ cFYI("Unexpected message type on auth response is %d"));
+ } */
+
if (ses) {
cFYI(1,
- ("Does UID on challenge %d match auth response UID %d ",
+ ("Check challenge UID %d vs auth response UID %d",
ses->Suid, smb_buffer_response->Uid));
- ses->Suid = smb_buffer_response->Uid; /* UID left in wire format */
- bcc_ptr = pByteArea(smb_buffer_response);
- /* response can have either 3 or 4 word count - Samba sends 3 */
+ /* UID left in wire format */
+ ses->Suid = smb_buffer_response->Uid;
+ bcc_ptr = pByteArea(smb_buffer_response);
+ /* response can have either 3 or 4 word count - Samba sends 3 */
if ((pSMBr->resp.hdr.WordCount == 3)
|| ((pSMBr->resp.hdr.WordCount == 4)
&& (blob_len <
@@ -3035,7 +3150,7 @@ CIFSNTLMSSPAuthSessSetup(unsigned int xid, struct cifsSesInfo *ses,
/* We look for obvious messed up bcc or strings in response so we do not go off
the end since (at least) WIN2K and Windows XP have a major bug in not null
terminating last Unicode string in response */
- if(ses->serverOS)
+ if (ses->serverOS)
kfree(ses->serverOS);
ses->serverOS =
kzalloc(2 * (len + 1), GFP_KERNEL);
@@ -3067,9 +3182,9 @@ CIFSNTLMSSPAuthSessSetup(unsigned int xid, struct cifsSesInfo *ses,
ses->serverNOS[1+(2*len)] = 0;
remaining_words -= len + 1;
if (remaining_words > 0) {
- len = UniStrnlen((wchar_t *) bcc_ptr, remaining_words);
+ len = UniStrnlen((wchar_t *) bcc_ptr, remaining_words);
/* last string not always null terminated (e.g. for Windows XP & 2000) */
- if(ses->serverDomain)
+ if (ses->serverDomain)
kfree(ses->serverDomain);
ses->serverDomain =
kzalloc(2 *
@@ -3097,12 +3212,12 @@ CIFSNTLMSSPAuthSessSetup(unsigned int xid, struct cifsSesInfo *ses,
= 0;
} /* else no more room so create dummy domain string */
else {
- if(ses->serverDomain)
+ if (ses->serverDomain)
kfree(ses->serverDomain);
ses->serverDomain = kzalloc(2,GFP_KERNEL);
}
} else { /* no room so create dummy domain and NOS string */
- if(ses->serverDomain)
+ if (ses->serverDomain)
kfree(ses->serverDomain);
ses->serverDomain = kzalloc(2, GFP_KERNEL);
kfree(ses->serverNOS);
@@ -3110,10 +3225,10 @@ CIFSNTLMSSPAuthSessSetup(unsigned int xid, struct cifsSesInfo *ses,
}
} else { /* ASCII */
len = strnlen(bcc_ptr, 1024);
- if (((long) bcc_ptr + len) -
- (long) pByteArea(smb_buffer_response)
- <= BCC(smb_buffer_response)) {
- if(ses->serverOS)
+ if (((long) bcc_ptr + len) -
+ (long) pByteArea(smb_buffer_response)
+ <= BCC(smb_buffer_response)) {
+ if (ses->serverOS)
kfree(ses->serverOS);
ses->serverOS = kzalloc(len + 1,GFP_KERNEL);
strncpy(ses->serverOS,bcc_ptr, len);
@@ -3124,28 +3239,35 @@ CIFSNTLMSSPAuthSessSetup(unsigned int xid, struct cifsSesInfo *ses,
len = strnlen(bcc_ptr, 1024);
kfree(ses->serverNOS);
- ses->serverNOS = kzalloc(len+1,GFP_KERNEL);
- strncpy(ses->serverNOS, bcc_ptr, len);
+ ses->serverNOS = kzalloc(len+1,
+ GFP_KERNEL);
+ strncpy(ses->serverNOS,
+ bcc_ptr, len);
bcc_ptr += len;
bcc_ptr[0] = 0;
bcc_ptr++;
len = strnlen(bcc_ptr, 1024);
- if(ses->serverDomain)
+ if (ses->serverDomain)
kfree(ses->serverDomain);
- ses->serverDomain = kzalloc(len+1,GFP_KERNEL);
- strncpy(ses->serverDomain, bcc_ptr, len);
+ ses->serverDomain =
+ kzalloc(len+1,
+ GFP_KERNEL);
+ strncpy(ses->serverDomain,
+ bcc_ptr, len);
bcc_ptr += len;
bcc_ptr[0] = 0;
bcc_ptr++;
} else
cFYI(1,
- ("Variable field of length %d extends beyond end of smb ",
+ ("field of length %d "
+ "extends beyond end of smb ",
len));
}
} else {
cERROR(1,
- (" Security Blob Length extends beyond end of SMB"));
+ (" Security Blob extends beyond end "
+ "of SMB"));
}
} else {
cERROR(1, ("No session structure passed in."));
@@ -3197,7 +3319,7 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
pSMB->AndXCommand = 0xFF;
pSMB->Flags = cpu_to_le16(TCON_EXTENDED_SECINFO);
bcc_ptr = &pSMB->Password[0];
- if((ses->server->secMode) & SECMODE_USER) {
+ if ((ses->server->secMode) & SECMODE_USER) {
pSMB->PasswordLength = cpu_to_le16(1); /* minimum */
*bcc_ptr = 0; /* password is null byte */
bcc_ptr++; /* skip password */
@@ -3211,7 +3333,7 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
by Samba (not sure whether other servers allow
NTLMv2 password here) */
#ifdef CONFIG_CIFS_WEAK_PW_HASH
- if((extended_security & CIFSSEC_MAY_LANMAN) &&
+ if ((extended_security & CIFSSEC_MAY_LANMAN) &&
(ses->server->secType == LANMAN))
calc_lanman_hash(ses, bcc_ptr);
else
@@ -3221,14 +3343,14 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
bcc_ptr);
bcc_ptr += CIFS_SESS_KEY_SIZE;
- if(ses->capabilities & CAP_UNICODE) {
+ if (ses->capabilities & CAP_UNICODE) {
/* must align unicode strings */
*bcc_ptr = 0; /* null byte password */
bcc_ptr++;
}
}
- if(ses->server->secMode &
+ if (ses->server->secMode &
(SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
smb_buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
@@ -3241,8 +3363,8 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
if (ses->capabilities & CAP_UNICODE) {
smb_buffer->Flags2 |= SMBFLG2_UNICODE;
length =
- cifs_strtoUCS((__le16 *) bcc_ptr, tree,
- 6 /* max utf8 char length in bytes */ *
+ cifs_strtoUCS((__le16 *) bcc_ptr, tree,
+ 6 /* max utf8 char length in bytes */ *
(/* server len*/ + 256 /* share len */), nls_codepage);
bcc_ptr += 2 * length; /* convert num 16 bit words to bytes */
bcc_ptr += 2; /* skip trailing null */
@@ -3266,8 +3388,8 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
tcon->tid = smb_buffer_response->Tid;
bcc_ptr = pByteArea(smb_buffer_response);
length = strnlen(bcc_ptr, BCC(smb_buffer_response) - 2);
- /* skip service field (NB: this field is always ASCII) */
- bcc_ptr += length + 1;
+ /* skip service field (NB: this field is always ASCII) */
+ bcc_ptr += length + 1;
strncpy(tcon->treeName, tree, MAX_TREE_SIZE);
if (smb_buffer->Flags2 & SMBFLG2_UNICODE) {
length = UniStrnlen((wchar_t *) bcc_ptr, 512);
@@ -3285,7 +3407,7 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
bcc_ptr[1] = 0;
bcc_ptr += 2;
}
- /* else do not bother copying these informational fields */
+ /* else do not bother copying these information fields*/
} else {
length = strnlen(bcc_ptr, 1024);
if ((bcc_ptr + length) -
@@ -3297,9 +3419,9 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
strncpy(tcon->nativeFileSystem, bcc_ptr,
length);
}
- /* else do not bother copying these informational fields */
+ /* else do not bother copying these information fields*/
}
- if((smb_buffer_response->WordCount == 3) ||
+ if ((smb_buffer_response->WordCount == 3) ||
(smb_buffer_response->WordCount == 7))
/* field is in same location */
tcon->Flags = le16_to_cpu(pSMBr->OptionalSupport);
@@ -3307,7 +3429,7 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
tcon->Flags = 0;
cFYI(1, ("Tcon flags: 0x%x ", tcon->Flags));
} else if ((rc == 0) && tcon == NULL) {
- /* all we need to save for IPC$ connection */
+ /* all we need to save for IPC$ connection */
ses->ipc_tid = smb_buffer_response->Tid;
}
@@ -3323,7 +3445,7 @@ cifs_umount(struct super_block *sb, struct cifs_sb_info *cifs_sb)
int xid;
struct cifsSesInfo *ses = NULL;
struct task_struct *cifsd_task;
- char * tmp;
+ char *tmp;
xid = GetXid();
@@ -3344,9 +3466,9 @@ cifs_umount(struct super_block *sb, struct cifs_sb_info *cifs_sb)
FreeXid(xid);
return 0;
} else if (rc == -ESHUTDOWN) {
- cFYI(1,("Waking up socket by sending it signal"));
+ cFYI(1, ("Waking up socket by sending signal"));
if (cifsd_task) {
- send_sig(SIGKILL,cifsd_task,1);
+ force_sig(SIGKILL, cifsd_task);
kthread_stop(cifsd_task);
}
rc = 0;
@@ -3355,7 +3477,7 @@ cifs_umount(struct super_block *sb, struct cifs_sb_info *cifs_sb)
} else
cFYI(1, ("No session or bad tcon"));
}
-
+
cifs_sb->tcon = NULL;
tmp = cifs_sb->prepath;
cifs_sb->prepathlen = 0;
@@ -3367,11 +3489,11 @@ cifs_umount(struct super_block *sb, struct cifs_sb_info *cifs_sb)
sesInfoFree(ses);
FreeXid(xid);
- return rc; /* BB check if we should always return zero here */
-}
+ return rc; /* BB check if we should always return zero here */
+}
int cifs_setup_session(unsigned int xid, struct cifsSesInfo *pSesInfo,
- struct nls_table * nls_info)
+ struct nls_table *nls_info)
{
int rc = 0;
char ntlm_session_key[CIFS_SESS_KEY_SIZE];
@@ -3379,16 +3501,16 @@ int cifs_setup_session(unsigned int xid, struct cifsSesInfo *pSesInfo,
int first_time = 0;
/* what if server changes its buffer size after dropping the session? */
- if(pSesInfo->server->maxBuf == 0) /* no need to send on reconnect */ {
+ if (pSesInfo->server->maxBuf == 0) /* no need to send on reconnect */ {
rc = CIFSSMBNegotiate(xid, pSesInfo);
- if(rc == -EAGAIN) /* retry only once on 1st time connection */ {
+ if (rc == -EAGAIN) /* retry only once on 1st time connection */ {
rc = CIFSSMBNegotiate(xid, pSesInfo);
- if(rc == -EAGAIN)
+ if (rc == -EAGAIN)
rc = -EHOSTDOWN;
}
- if(rc == 0) {
+ if (rc == 0) {
spin_lock(&GlobalMid_Lock);
- if(pSesInfo->server->tcpStatus != CifsExiting)
+ if (pSesInfo->server->tcpStatus != CifsExiting)
pSesInfo->server->tcpStatus = CifsGood;
else
rc = -EHOSTDOWN;
@@ -3400,18 +3522,19 @@ int cifs_setup_session(unsigned int xid, struct cifsSesInfo *pSesInfo,
if (!rc) {
pSesInfo->flags = 0;
pSesInfo->capabilities = pSesInfo->server->capabilities;
- if(linuxExtEnabled == 0)
+ if (linuxExtEnabled == 0)
pSesInfo->capabilities &= (~CAP_UNIX);
/* pSesInfo->sequence_number = 0;*/
- cFYI(1,("Security Mode: 0x%x Capabilities: 0x%x TimeAdjust: %d",
+ cFYI(1,
+ ("Security Mode: 0x%x Capabilities: 0x%x TimeAdjust: %d",
pSesInfo->server->secMode,
pSesInfo->server->capabilities,
pSesInfo->server->timeAdj));
- if(experimEnabled < 2)
+ if (experimEnabled < 2)
rc = CIFS_SessSetup(xid, pSesInfo,
first_time, nls_info);
else if (extended_security
- && (pSesInfo->capabilities
+ && (pSesInfo->capabilities
& CAP_EXTENDED_SECURITY)
&& (pSesInfo->server->secType == NTLMSSP)) {
rc = -EOPNOTSUPP;
@@ -3424,21 +3547,22 @@ int cifs_setup_session(unsigned int xid, struct cifsSesInfo *pSesInfo,
&ntlmv2_flag,
nls_info);
if (!rc) {
- if(ntlmv2_flag) {
- char * v2_response;
- cFYI(1,("more secure NTLM ver2 hash"));
- if(CalcNTLMv2_partial_mac_key(pSesInfo,
+ if (ntlmv2_flag) {
+ char *v2_response;
+ cFYI(1, ("more secure NTLM ver2 hash"));
+ if (CalcNTLMv2_partial_mac_key(pSesInfo,
nls_info)) {
rc = -ENOMEM;
goto ss_err_exit;
} else
v2_response = kmalloc(16 + 64 /* blob */, GFP_KERNEL);
- if(v2_response) {
- CalcNTLMv2_response(pSesInfo,v2_response);
- /* if(first_time)
- cifs_calculate_ntlmv2_mac_key(
- pSesInfo->server->mac_signing_key,
- response, ntlm_session_key, */
+ if (v2_response) {
+ CalcNTLMv2_response(pSesInfo,
+ v2_response);
+ /* if (first_time)
+ cifs_calculate_ntlmv2_mac_key(
+ pSesInfo->server->mac_signing_key,
+ response, ntlm_session_key,*/
kfree(v2_response);
/* BB Put dummy sig in SessSetup PDU? */
} else {
@@ -3451,9 +3575,9 @@ int cifs_setup_session(unsigned int xid, struct cifsSesInfo *pSesInfo,
pSesInfo->server->cryptKey,
ntlm_session_key);
- if(first_time)
+ if (first_time)
cifs_calculate_mac_key(
- pSesInfo->server->mac_signing_key,
+ &pSesInfo->server->mac_signing_key,
ntlm_session_key,
pSesInfo->password);
}
@@ -3471,18 +3595,18 @@ int cifs_setup_session(unsigned int xid, struct cifsSesInfo *pSesInfo,
pSesInfo->server->cryptKey,
ntlm_session_key);
- if(first_time)
+ if (first_time)
cifs_calculate_mac_key(
- pSesInfo->server->mac_signing_key,
+ &pSesInfo->server->mac_signing_key,
ntlm_session_key, pSesInfo->password);
rc = CIFSSessSetup(xid, pSesInfo,
ntlm_session_key, nls_info);
}
if (rc) {
- cERROR(1,("Send error in SessSetup = %d",rc));
+ cERROR(1, ("Send error in SessSetup = %d", rc));
} else {
- cFYI(1,("CIFS Session Established successfully"));
+ cFYI(1, ("CIFS Session Established successfully"));
pSesInfo->status = CifsGood;
}
}
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index 8e86aac..4830acc 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -135,10 +135,10 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
struct cifs_sb_info *cifs_sb;
struct cifsTconInfo *pTcon;
char *full_path = NULL;
- FILE_ALL_INFO * buf = NULL;
+ FILE_ALL_INFO *buf = NULL;
struct inode *newinode = NULL;
- struct cifsFileInfo * pCifsFile = NULL;
- struct cifsInodeInfo * pCifsInode;
+ struct cifsFileInfo *pCifsFile = NULL;
+ struct cifsInodeInfo *pCifsInode;
int disposition = FILE_OVERWRITE_IF;
int write_only = FALSE;
@@ -207,8 +207,7 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
} else {
/* If Open reported that we actually created a file
then we now have to set the mode if possible */
- if ((cifs_sb->tcon->ses->capabilities & CAP_UNIX) &&
- (oplock & CIFS_CREATE_ACTION)) {
+ if ((pTcon->unix_ext) && (oplock & CIFS_CREATE_ACTION)) {
mode &= ~current->fs->umask;
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) {
CIFSSMBUnixSetPerms(xid, pTcon, full_path, mode,
@@ -235,8 +234,8 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
/* Could set r/o dos attribute if mode & 0222 == 0 */
}
- /* BB server might mask mode so we have to query for Unix case*/
- if (pTcon->ses->capabilities & CAP_UNIX)
+ /* server might mask mode so we have to query for it */
+ if (pTcon->unix_ext)
rc = cifs_get_inode_info_unix(&newinode, full_path,
inode->i_sb, xid);
else {
@@ -264,7 +263,8 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
direntry->d_op = &cifs_dentry_ops;
d_instantiate(direntry, newinode);
}
- if ((nd->flags & LOOKUP_OPEN) == FALSE) {
+ if ((nd == NULL /* nfsd case - nfs srv does not set nd */) ||
+ ((nd->flags & LOOKUP_OPEN) == FALSE)) {
/* mknod case - do not leave file open */
CIFSSMBClose(xid, pTcon, fileHandle);
} else if (newinode) {
@@ -323,7 +323,7 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, int mode,
struct cifs_sb_info *cifs_sb;
struct cifsTconInfo *pTcon;
char *full_path = NULL;
- struct inode * newinode = NULL;
+ struct inode *newinode = NULL;
if (!old_valid_dev(device_number))
return -EINVAL;
@@ -336,7 +336,7 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, int mode,
full_path = build_path_from_dentry(direntry);
if (full_path == NULL)
rc = -ENOMEM;
- else if (pTcon->ses->capabilities & CAP_UNIX) {
+ else if (pTcon->unix_ext) {
mode &= ~current->fs->umask;
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) {
rc = CIFSSMBUnixSetPerms(xid, pTcon, full_path,
@@ -490,7 +490,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
cFYI(1,
(" Full path: %s inode = 0x%p", full_path, direntry->d_inode));
- if (pTcon->ses->capabilities & CAP_UNIX)
+ if (pTcon->unix_ext)
rc = cifs_get_inode_info_unix(&newInode, full_path,
parent_dir_inode->i_sb, xid);
else
diff --git a/fs/cifs/export.c b/fs/cifs/export.c
index 96df1d5..893fd0a 100644
--- a/fs/cifs/export.c
+++ b/fs/cifs/export.c
@@ -5,7 +5,7 @@
* Author(s): Steve French (sfrench@us.ibm.com)
*
* Common Internet FileSystem (CIFS) client
- *
+ *
* Operations related to support for exporting files via NFSD
*
* This library is free software; you can redistribute it and/or modify
@@ -22,32 +22,45 @@
* along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-
- /*
+
+ /*
* See Documentation/filesystems/Exporting
* and examples in fs/exportfs
+ *
+ * Since cifs is a network file system, an "fsid" must be included for
+ * any nfs exports file entries which refer to cifs paths. In addition
+ * the cifs mount must be mounted with the "serverino" option (ie use stable
+ * server inode numbers instead of locally generated temporary ones).
+ * Although cifs inodes do not use generation numbers (have generation number
+ * of zero) - the inode number alone should be good enough for simple cases
+ * in which users want to export cifs shares with NFS. The decode and encode
+ * could be improved by using a new routine which expects 64 bit inode numbers
+ * instead of the default 32 bit routines in fs/exportfs
+ *
*/
#include <linux/fs.h>
#include <linux/exportfs.h>
-
+#include "cifsglob.h"
+#include "cifs_debug.h"
+
#ifdef CONFIG_CIFS_EXPERIMENTAL
-
static struct dentry *cifs_get_parent(struct dentry *dentry)
{
- /* BB need to add code here eventually to enable export via NFSD */
- return ERR_PTR(-EACCES);
+ /* BB need to add code here eventually to enable export via NFSD */
+ cFYI(1, ("get parent for %p", dentry));
+ return ERR_PTR(-EACCES);
}
-
+
struct export_operations cifs_export_ops = {
- .get_parent = cifs_get_parent,
-/* Following five export operations are unneeded so far and can default */
-/* .get_dentry =
- .get_name =
- .find_exported_dentry =
- .decode_fh =
- .encode_fs = */
- };
-
+ .get_parent = cifs_get_parent,
+/* Following five export operations are unneeded so far and can default:
+ .get_dentry =
+ .get_name =
+ .find_exported_dentry =
+ .decode_fh =
+ .encode_fs = */
+};
+
#endif /* EXPERIMENTAL */
-
+
diff --git a/fs/cifs/fcntl.c b/fs/cifs/fcntl.c
index 8e375bb..995474c 100644
--- a/fs/cifs/fcntl.c
+++ b/fs/cifs/fcntl.c
@@ -66,7 +66,7 @@ static __u32 convert_to_cifs_notify_flags(unsigned long fcntl_notify_flags)
return cifs_ntfy_flags;
}
-int cifs_dir_notify(struct file * file, unsigned long arg)
+int cifs_dir_notify(struct file *file, unsigned long arg)
{
int xid;
int rc = -EINVAL;
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 94d5b49..894b1f7 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -2,8 +2,8 @@
* fs/cifs/file.c
*
* vfs operations that deal with files
- *
- * Copyright (C) International Business Machines Corp., 2002,2003
+ *
+ * Copyright (C) International Business Machines Corp., 2002,2007
* Author(s): Steve French (sfrench@us.ibm.com)
* Jeremy Allison (jra@samba.org)
*
@@ -45,7 +45,7 @@ static inline struct cifsFileInfo *cifs_init_private(
{
memset(private_data, 0, sizeof(struct cifsFileInfo));
private_data->netfid = netfid;
- private_data->pid = current->tgid;
+ private_data->pid = current->tgid;
init_MUTEX(&private_data->fh_sem);
mutex_init(&private_data->lock_mutex);
INIT_LIST_HEAD(&private_data->llist);
@@ -57,7 +57,7 @@ static inline struct cifsFileInfo *cifs_init_private(
does not tell us which handle the write is for so there can
be a close (overlapping with write) of the filehandle that
cifs_writepages chose to use */
- atomic_set(&private_data->wrtPending,0);
+ atomic_set(&private_data->wrtPending, 0);
return private_data;
}
@@ -105,7 +105,7 @@ static inline int cifs_open_inode_helper(struct inode *inode, struct file *file,
in the list so we do not have to walk the
list to search for one in prepare_write */
if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
- list_add_tail(&pCifsFile->flist,
+ list_add_tail(&pCifsFile->flist,
&pCifsInode->openFileList);
} else {
list_add(&pCifsFile->flist,
@@ -138,7 +138,7 @@ static inline int cifs_open_inode_helper(struct inode *inode, struct file *file,
}
client_can_cache:
- if (pTcon->ses->capabilities & CAP_UNIX)
+ if (pTcon->unix_ext)
rc = cifs_get_inode_info_unix(&file->f_path.dentry->d_inode,
full_path, inode->i_sb, xid);
else
@@ -189,7 +189,7 @@ int cifs_open(struct inode *inode, struct file *file)
/* needed for writepage */
pCifsFile->pfile = file;
-
+
file->private_data = pCifsFile;
break;
}
@@ -212,15 +212,15 @@ int cifs_open(struct inode *inode, struct file *file)
return -ENOMEM;
}
- cFYI(1, (" inode = 0x%p file flags are 0x%x for %s",
+ cFYI(1, ("inode = 0x%p file flags are 0x%x for %s",
inode, file->f_flags, full_path));
desiredAccess = cifs_convert_flags(file->f_flags);
/*********************************************************************
* open flag mapping table:
- *
+ *
* POSIX Flag CIFS Disposition
- * ---------- ----------------
+ * ---------- ----------------
* O_CREAT FILE_OPEN_IF
* O_CREAT | O_EXCL FILE_CREATE
* O_CREAT | O_TRUNC FILE_OVERWRITE_IF
@@ -228,12 +228,12 @@ int cifs_open(struct inode *inode, struct file *file)
* none of the above FILE_OPEN
*
* Note that there is not a direct match between disposition
- * FILE_SUPERSEDE (ie create whether or not file exists although
+ * FILE_SUPERSEDE (ie create whether or not file exists although
* O_CREAT | O_TRUNC is similar but truncates the existing
* file rather than creating a new file as FILE_SUPERSEDE does
* (which uses the attributes / metadata passed in on open call)
*?
- *? O_SYNC is a reasonable match to CIFS writethrough flag
+ *? O_SYNC is a reasonable match to CIFS writethrough flag
*? and the read write flags match reasonably. O_LARGEFILE
*? is irrelevant because largefile support is always used
*? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
@@ -253,8 +253,8 @@ int cifs_open(struct inode *inode, struct file *file)
and calling get_inode_info with returned buf (at least helps
non-Unix server case) */
- /* BB we can not do this if this is the second open of a file
- and the first handle has writebehind data, we might be
+ /* BB we can not do this if this is the second open of a file
+ and the first handle has writebehind data, we might be
able to simply do a filemap_fdatawrite/filemap_fdatawait first */
buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
if (!buf) {
@@ -263,7 +263,7 @@ int cifs_open(struct inode *inode, struct file *file)
}
if (cifs_sb->tcon->ses->capabilities & CAP_NT_SMBS)
- rc = CIFSSMBOpen(xid, pTcon, full_path, disposition,
+ rc = CIFSSMBOpen(xid, pTcon, full_path, disposition,
desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf,
cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
& CIFS_MOUNT_MAP_SPECIAL_CHR);
@@ -300,15 +300,15 @@ int cifs_open(struct inode *inode, struct file *file)
write_unlock(&GlobalSMBSeslock);
}
- if (oplock & CIFS_CREATE_ACTION) {
+ if (oplock & CIFS_CREATE_ACTION) {
/* time to set mode which we can not set earlier due to
problems creating new read-only files */
- if (cifs_sb->tcon->ses->capabilities & CAP_UNIX) {
+ if (pTcon->unix_ext) {
CIFSSMBUnixSetPerms(xid, pTcon, full_path,
inode->i_mode,
(__u64)-1, (__u64)-1, 0 /* dev */,
cifs_sb->local_nls,
- cifs_sb->mnt_cifs_flags &
+ cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
} else {
/* BB implement via Windows security descriptors eg
@@ -345,7 +345,7 @@ static int cifs_reopen_file(struct file *file, int can_flush)
struct cifsTconInfo *pTcon;
struct cifsFileInfo *pCifsFile;
struct cifsInodeInfo *pCifsInode;
- struct inode * inode;
+ struct inode *inode;
char *full_path = NULL;
int desiredAccess;
int disposition = FILE_OPEN;
@@ -372,13 +372,13 @@ static int cifs_reopen_file(struct file *file, int can_flush)
}
inode = file->f_path.dentry->d_inode;
- if(inode == NULL) {
+ if (inode == NULL) {
cERROR(1, ("inode not valid"));
dump_stack();
rc = -EBADF;
goto reopen_error_exit;
}
-
+
cifs_sb = CIFS_SB(inode->i_sb);
pTcon = cifs_sb->tcon;
@@ -396,7 +396,7 @@ reopen_error_exit:
}
cFYI(1, ("inode = 0x%p file flags 0x%x for %s",
- inode, file->f_flags,full_path));
+ inode, file->f_flags, full_path));
desiredAccess = cifs_convert_flags(file->f_flags);
if (oplockEnabled)
@@ -405,14 +405,14 @@ reopen_error_exit:
oplock = FALSE;
/* Can not refresh inode by passing in file_info buf to be returned
- by SMBOpen and then calling get_inode_info with returned buf
- since file might have write behind data that needs to be flushed
+ by SMBOpen and then calling get_inode_info with returned buf
+ since file might have write behind data that needs to be flushed
and server version of file size can be stale. If we knew for sure
that inode was not dirty locally we could do this */
rc = CIFSSMBOpen(xid, pTcon, full_path, disposition, desiredAccess,
CREATE_NOT_DIR, &netfid, &oplock, NULL,
- cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
+ cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
if (rc) {
up(&pCifsFile->fh_sem);
@@ -430,7 +430,7 @@ reopen_error_exit:
go to server to get inode info */
pCifsInode->clientCanCacheAll = FALSE;
pCifsInode->clientCanCacheRead = FALSE;
- if (pTcon->ses->capabilities & CAP_UNIX)
+ if (pTcon->unix_ext)
rc = cifs_get_inode_info_unix(&inode,
full_path, inode->i_sb, xid);
else
@@ -486,23 +486,24 @@ int cifs_close(struct inode *inode, struct file *file)
already closed */
if (pTcon->tidStatus != CifsNeedReconnect) {
int timeout = 2;
- while((atomic_read(&pSMBFile->wrtPending) != 0)
+ while ((atomic_read(&pSMBFile->wrtPending) != 0)
&& (timeout < 1000) ) {
/* Give write a better chance to get to
server ahead of the close. We do not
want to add a wait_q here as it would
increase the memory utilization as
the struct would be in each open file,
- but this should give enough time to
+ but this should give enough time to
clear the socket */
#ifdef CONFIG_CIFS_DEBUG2
- cFYI(1,("close delay, write pending"));
+ cFYI(1, ("close delay, write pending"));
#endif /* DEBUG2 */
msleep(timeout);
timeout *= 4;
}
- if(atomic_read(&pSMBFile->wrtPending))
- cERROR(1,("close with pending writes"));
+ if (atomic_read(&pSMBFile->wrtPending))
+ cERROR(1,
+ ("close with pending writes"));
rc = CIFSSMBClose(xid, pTcon,
pSMBFile->netfid);
}
@@ -534,7 +535,7 @@ int cifs_close(struct inode *inode, struct file *file)
CIFS_I(inode)->clientCanCacheRead = FALSE;
CIFS_I(inode)->clientCanCacheAll = FALSE;
}
- if ((rc ==0) && CIFS_I(inode)->write_behind_rc)
+ if ((rc == 0) && CIFS_I(inode)->write_behind_rc)
rc = CIFS_I(inode)->write_behind_rc;
FreeXid(xid);
return rc;
@@ -554,7 +555,8 @@ int cifs_closedir(struct inode *inode, struct file *file)
if (pCFileStruct) {
struct cifsTconInfo *pTcon;
- struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
+ struct cifs_sb_info *cifs_sb =
+ CIFS_SB(file->f_path.dentry->d_sb);
pTcon = cifs_sb->tcon;
@@ -572,7 +574,7 @@ int cifs_closedir(struct inode *inode, struct file *file)
if (ptmp) {
cFYI(1, ("closedir free smb buf in srch struct"));
pCFileStruct->srch_inf.ntwrk_buf_start = NULL;
- if(pCFileStruct->srch_inf.smallBuf)
+ if (pCFileStruct->srch_inf.smallBuf)
cifs_small_buf_release(ptmp);
else
cifs_buf_release(ptmp);
@@ -594,7 +596,8 @@ int cifs_closedir(struct inode *inode, struct file *file)
static int store_file_lock(struct cifsFileInfo *fid, __u64 len,
__u64 offset, __u8 lockType)
{
- struct cifsLockInfo *li = kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
+ struct cifsLockInfo *li =
+ kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
if (li == NULL)
return -ENOMEM;
li->offset = offset;
@@ -625,8 +628,8 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
cFYI(1, ("Lock parm: 0x%x flockflags: "
"0x%x flocktype: 0x%x start: %lld end: %lld",
- cmd, pfLock->fl_flags, pfLock->fl_type, pfLock->fl_start,
- pfLock->fl_end));
+ cmd, pfLock->fl_flags, pfLock->fl_type, pfLock->fl_start,
+ pfLock->fl_end));
if (pfLock->fl_flags & FL_POSIX)
cFYI(1, ("Posix"));
@@ -641,7 +644,7 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
"not implemented yet"));
if (pfLock->fl_flags & FL_LEASE)
cFYI(1, ("Lease on file - not implemented yet"));
- if (pfLock->fl_flags &
+ if (pfLock->fl_flags &
(~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
cFYI(1, ("Unknown lock flags 0x%x", pfLock->fl_flags));
@@ -683,9 +686,9 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
account for negative length which we can not accept over the
wire */
if (IS_GETLK(cmd)) {
- if(posix_locking) {
+ if (posix_locking) {
int posix_lock_type;
- if(lockType & LOCKING_ANDX_SHARED_LOCK)
+ if (lockType & LOCKING_ANDX_SHARED_LOCK)
posix_lock_type = CIFS_RDLCK;
else
posix_lock_type = CIFS_WRLCK;
@@ -700,7 +703,7 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
rc = CIFSSMBLock(xid, pTcon, netfid, length, pfLock->fl_start,
0, 1, lockType, 0 /* wait flag */ );
if (rc == 0) {
- rc = CIFSSMBLock(xid, pTcon, netfid, length,
+ rc = CIFSSMBLock(xid, pTcon, netfid, length,
pfLock->fl_start, 1 /* numUnlock */ ,
0 /* numLock */ , lockType,
0 /* wait flag */ );
@@ -729,22 +732,24 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
if (posix_locking) {
int posix_lock_type;
- if(lockType & LOCKING_ANDX_SHARED_LOCK)
+ if (lockType & LOCKING_ANDX_SHARED_LOCK)
posix_lock_type = CIFS_RDLCK;
else
posix_lock_type = CIFS_WRLCK;
-
- if(numUnlock == 1)
+
+ if (numUnlock == 1)
posix_lock_type = CIFS_UNLCK;
rc = CIFSSMBPosixLock(xid, pTcon, netfid, 0 /* set */,
length, pfLock,
posix_lock_type, wait_flag);
} else {
- struct cifsFileInfo *fid = (struct cifsFileInfo *)file->private_data;
+ struct cifsFileInfo *fid =
+ (struct cifsFileInfo *)file->private_data;
if (numLock) {
- rc = CIFSSMBLock(xid, pTcon, netfid, length, pfLock->fl_start,
+ rc = CIFSSMBLock(xid, pTcon, netfid, length,
+ pfLock->fl_start,
0, numLock, lockType, wait_flag);
if (rc == 0) {
@@ -763,7 +768,8 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
list_for_each_entry_safe(li, tmp, &fid->llist, llist) {
if (pfLock->fl_start <= li->offset &&
length >= li->length) {
- stored_rc = CIFSSMBLock(xid, pTcon, netfid,
+ stored_rc = CIFSSMBLock(xid, pTcon,
+ netfid,
li->length, li->offset,
1, 0, li->type, FALSE);
if (stored_rc)
@@ -805,7 +811,7 @@ ssize_t cifs_user_write(struct file *file, const char __user *write_data,
if (file->private_data == NULL)
return -EBADF;
open_file = (struct cifsFileInfo *) file->private_data;
-
+
xid = GetXid();
if (*poffset > file->f_path.dentry->d_inode->i_size)
@@ -824,7 +830,7 @@ ssize_t cifs_user_write(struct file *file, const char __user *write_data,
and blocked, and the file has been freed on us while
we blocked so return what we managed to write */
return total_written;
- }
+ }
if (open_file->closePend) {
FreeXid(xid);
if (total_written)
@@ -867,8 +873,8 @@ ssize_t cifs_user_write(struct file *file, const char __user *write_data,
/* since the write may have blocked check these pointers again */
if ((file->f_path.dentry) && (file->f_path.dentry->d_inode)) {
struct inode *inode = file->f_path.dentry->d_inode;
-/* Do not update local mtime - server will set its actual value on write
- * inode->i_ctime = inode->i_mtime =
+/* Do not update local mtime - server will set its actual value on write
+ * inode->i_ctime = inode->i_mtime =
* current_fs_time(inode->i_sb);*/
if (total_written > 0) {
spin_lock(&inode->i_lock);
@@ -877,7 +883,7 @@ ssize_t cifs_user_write(struct file *file, const char __user *write_data,
*poffset);
spin_unlock(&inode->i_lock);
}
- mark_inode_dirty_sync(file->f_path.dentry->d_inode);
+ mark_inode_dirty_sync(file->f_path.dentry->d_inode);
}
FreeXid(xid);
return total_written;
@@ -898,13 +904,13 @@ static ssize_t cifs_write(struct file *file, const char *write_data,
pTcon = cifs_sb->tcon;
- cFYI(1,("write %zd bytes to offset %lld of %s", write_size,
+ cFYI(1, ("write %zd bytes to offset %lld of %s", write_size,
*poffset, file->f_path.dentry->d_name.name));
if (file->private_data == NULL)
return -EBADF;
open_file = (struct cifsFileInfo *)file->private_data;
-
+
xid = GetXid();
if (*poffset > file->f_path.dentry->d_inode->i_size)
@@ -921,10 +927,10 @@ static ssize_t cifs_write(struct file *file, const char *write_data,
FreeXid(xid);
/* if we have gotten here we have written some data
and blocked, and the file has been freed on us
- while we blocked so return what we managed to
+ while we blocked so return what we managed to
write */
return total_written;
- }
+ }
if (open_file->closePend) {
FreeXid(xid);
if (total_written)
@@ -935,14 +941,14 @@ static ssize_t cifs_write(struct file *file, const char *write_data,
if (open_file->invalidHandle) {
/* we could deadlock if we called
filemap_fdatawait from here so tell
- reopen_file not to flush data to
+ reopen_file not to flush data to
server now */
rc = cifs_reopen_file(file, FALSE);
if (rc != 0)
break;
}
- if(experimEnabled || (pTcon->ses->server &&
- ((pTcon->ses->server->secMode &
+ if (experimEnabled || (pTcon->ses->server &&
+ ((pTcon->ses->server->secMode &
(SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
== 0))) {
struct kvec iov[2];
@@ -976,7 +982,7 @@ static ssize_t cifs_write(struct file *file, const char *write_data,
}
} else
*poffset += bytes_written;
- long_op = FALSE; /* subsequent writes fast -
+ long_op = FALSE; /* subsequent writes fast -
15 seconds is plenty */
}
@@ -1009,8 +1015,8 @@ struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode)
the VFS or MM) should not happen but we had reports of on oops (due to
it being zero) during stress testcases so we need to check for it */
- if(cifs_inode == NULL) {
- cERROR(1,("Null inode passed to cifs_writeable_file"));
+ if (cifs_inode == NULL) {
+ cERROR(1, ("Null inode passed to cifs_writeable_file"));
dump_stack();
return NULL;
}
@@ -1024,13 +1030,14 @@ struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode)
(open_file->pfile->f_flags & O_WRONLY))) {
atomic_inc(&open_file->wrtPending);
read_unlock(&GlobalSMBSeslock);
- if((open_file->invalidHandle) &&
+ if ((open_file->invalidHandle) &&
(!open_file->closePend) /* BB fixme -since the second clause can not be true remove it BB */) {
rc = cifs_reopen_file(open_file->pfile, FALSE);
/* if it fails, try another handle - might be */
/* dangerous to hold up writepages with retry */
- if(rc) {
- cFYI(1,("failed on reopen file in wp"));
+ if (rc) {
+ cFYI(1,
+ ("failed on reopen file in wp"));
read_lock(&GlobalSMBSeslock);
/* can not use this handle, no write
pending on this one after all */
@@ -1082,7 +1089,7 @@ static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
/* check to make sure that we are not extending the file */
if (mapping->host->i_size - offset < (loff_t)to)
- to = (unsigned)(mapping->host->i_size - offset);
+ to = (unsigned)(mapping->host->i_size - offset);
open_file = find_writable_file(CIFS_I(mapping->host));
if (open_file) {
@@ -1116,8 +1123,8 @@ static int cifs_writepages(struct address_space *mapping,
int done = 0;
pgoff_t end;
pgoff_t index;
- int range_whole = 0;
- struct kvec * iov;
+ int range_whole = 0;
+ struct kvec *iov;
int len;
int n_iov = 0;
pgoff_t next;
@@ -1131,7 +1138,7 @@ static int cifs_writepages(struct address_space *mapping,
int xid;
cifs_sb = CIFS_SB(mapping->host->i_sb);
-
+
/*
* If wsize is smaller that the page cache size, default to writing
* one page at a time via cifs_writepage
@@ -1139,14 +1146,14 @@ static int cifs_writepages(struct address_space *mapping,
if (cifs_sb->wsize < PAGE_CACHE_SIZE)
return generic_writepages(mapping, wbc);
- if((cifs_sb->tcon->ses) && (cifs_sb->tcon->ses->server))
- if(cifs_sb->tcon->ses->server->secMode &
- (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
- if(!experimEnabled)
+ if ((cifs_sb->tcon->ses) && (cifs_sb->tcon->ses->server))
+ if (cifs_sb->tcon->ses->server->secMode &
+ (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
+ if (!experimEnabled)
return generic_writepages(mapping, wbc);
iov = kmalloc(32 * sizeof(struct kvec), GFP_KERNEL);
- if(iov == NULL)
+ if (iov == NULL)
return generic_writepages(mapping, wbc);
@@ -1279,7 +1286,7 @@ retry:
1);
atomic_dec(&open_file->wrtPending);
if (rc || bytes_written < bytes_to_write) {
- cERROR(1,("Write2 ret %d, written = %d",
+ cERROR(1, ("Write2 ret %d, wrote %d",
rc, bytes_written));
/* BB what if continued retry is
requested via mount flags? */
@@ -1295,8 +1302,8 @@ retry:
success rc but too little data written? */
/* BB investigate retry logic on temporary
server crash cases and how recovery works
- when page marked as error */
- if(rc)
+ when page marked as error */
+ if (rc)
SetPageError(page);
kunmap(page);
unlock_page(page);
@@ -1326,7 +1333,7 @@ retry:
return rc;
}
-static int cifs_writepage(struct page* page, struct writeback_control *wbc)
+static int cifs_writepage(struct page *page, struct writeback_control *wbc)
{
int rc = -EFAULT;
int xid;
@@ -1334,7 +1341,7 @@ static int cifs_writepage(struct page* page, struct writeback_control *wbc)
xid = GetXid();
/* BB add check for wbc flags */
page_cache_get(page);
- if (!PageUptodate(page)) {
+ if (!PageUptodate(page)) {
cFYI(1, ("ppw - page not up to date"));
}
@@ -1348,7 +1355,7 @@ static int cifs_writepage(struct page* page, struct writeback_control *wbc)
* Just unlocking the page will cause the radix tree tag-bits
* to fail to update with the state of the page correctly.
*/
- set_page_writeback(page);
+ set_page_writeback(page);
rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
SetPageUptodate(page); /* BB add check for error and Clearuptodate? */
unlock_page(page);
@@ -1368,7 +1375,7 @@ static int cifs_commit_write(struct file *file, struct page *page,
char *page_data;
xid = GetXid();
- cFYI(1, ("commit write for page %p up to position %lld for %d",
+ cFYI(1, ("commit write for page %p up to position %lld for %d",
page, position, to));
spin_lock(&inode->i_lock);
if (position > inode->i_size) {
@@ -1396,7 +1403,7 @@ static int cifs_commit_write(struct file *file, struct page *page,
rc = 0;
/* else if (rc < 0) should we set writebehind rc? */
kunmap(page);
- } else {
+ } else {
set_page_dirty(page);
}
@@ -1412,9 +1419,9 @@ int cifs_fsync(struct file *file, struct dentry *dentry, int datasync)
xid = GetXid();
- cFYI(1, ("Sync file - name: %s datasync: 0x%x",
+ cFYI(1, ("Sync file - name: %s datasync: 0x%x",
dentry->d_name.name, datasync));
-
+
rc = filemap_fdatawrite(inode->i_mapping);
if (rc == 0)
CIFS_I(inode)->write_behind_rc = 0;
@@ -1438,7 +1445,7 @@ int cifs_fsync(struct file *file, struct dentry *dentry, int datasync)
if (!inode)
return; */
-/* fill in rpages then
+/* fill in rpages then
result = cifs_pagein_inode(inode, index, rpages); */ /* BB finish */
/* cFYI(1, ("rpages is %d for sync page of Index %ld", rpages, index));
@@ -1456,7 +1463,7 @@ int cifs_fsync(struct file *file, struct dentry *dentry, int datasync)
*/
int cifs_flush(struct file *file, fl_owner_t id)
{
- struct inode * inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file->f_path.dentry->d_inode;
int rc = 0;
/* Rather than do the steps manually:
@@ -1471,8 +1478,8 @@ int cifs_flush(struct file *file, fl_owner_t id)
rc = filemap_fdatawrite(inode->i_mapping);
if (!rc) /* reset wb rc if we were able to write out dirty pages */
CIFS_I(inode)->write_behind_rc = 0;
-
- cFYI(1, ("Flush inode %p file %p rc %d",inode,file,rc));
+
+ cFYI(1, ("Flush inode %p file %p rc %d", inode, file, rc));
return rc;
}
@@ -1508,13 +1515,13 @@ ssize_t cifs_user_read(struct file *file, char __user *read_data,
for (total_read = 0, current_offset = read_data;
read_size > total_read;
total_read += bytes_read, current_offset += bytes_read) {
- current_read_size = min_t(const int, read_size - total_read,
+ current_read_size = min_t(const int, read_size - total_read,
cifs_sb->rsize);
rc = -EAGAIN;
smb_read_data = NULL;
while (rc == -EAGAIN) {
int buf_type = CIFS_NO_BUFFER;
- if ((open_file->invalidHandle) &&
+ if ((open_file->invalidHandle) &&
(!open_file->closePend)) {
rc = cifs_reopen_file(file, TRUE);
if (rc != 0)
@@ -1535,9 +1542,9 @@ ssize_t cifs_user_read(struct file *file, char __user *read_data,
rc = -EFAULT;
}
- if(buf_type == CIFS_SMALL_BUFFER)
+ if (buf_type == CIFS_SMALL_BUFFER)
cifs_small_buf_release(smb_read_data);
- else if(buf_type == CIFS_LARGE_BUFFER)
+ else if (buf_type == CIFS_LARGE_BUFFER)
cifs_buf_release(smb_read_data);
smb_read_data = NULL;
}
@@ -1586,21 +1593,21 @@ static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
if ((file->f_flags & O_ACCMODE) == O_WRONLY)
cFYI(1, ("attempting read on write only file instance"));
- for (total_read = 0, current_offset = read_data;
+ for (total_read = 0, current_offset = read_data;
read_size > total_read;
total_read += bytes_read, current_offset += bytes_read) {
current_read_size = min_t(const int, read_size - total_read,
cifs_sb->rsize);
/* For windows me and 9x we do not want to request more
than it negotiated since it will refuse the read then */
- if((pTcon->ses) &&
+ if ((pTcon->ses) &&
!(pTcon->ses->capabilities & CAP_LARGE_FILES)) {
current_read_size = min_t(const int, current_read_size,
pTcon->ses->server->maxBuf - 128);
}
rc = -EAGAIN;
while (rc == -EAGAIN) {
- if ((open_file->invalidHandle) &&
+ if ((open_file->invalidHandle) &&
(!open_file->closePend)) {
rc = cifs_reopen_file(file, TRUE);
if (rc != 0)
@@ -1646,7 +1653,7 @@ int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
}
-static void cifs_copy_cache_pages(struct address_space *mapping,
+static void cifs_copy_cache_pages(struct address_space *mapping,
struct list_head *pages, int bytes_read, char *data,
struct pagevec *plru_pvec)
{
@@ -1669,12 +1676,12 @@ static void cifs_copy_cache_pages(struct address_space *mapping,
continue;
}
- target = kmap_atomic(page,KM_USER0);
+ target = kmap_atomic(page, KM_USER0);
if (PAGE_CACHE_SIZE > bytes_read) {
memcpy(target, data, bytes_read);
/* zero the tail end of this partial page */
- memset(target + bytes_read, 0,
+ memset(target + bytes_read, 0,
PAGE_CACHE_SIZE - bytes_read);
bytes_read = 0;
} else {
@@ -1703,7 +1710,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
struct cifs_sb_info *cifs_sb;
struct cifsTconInfo *pTcon;
int bytes_read = 0;
- unsigned int read_size,i;
+ unsigned int read_size, i;
char *smb_read_data = NULL;
struct smb_com_read_rsp *pSMBr;
struct pagevec lru_pvec;
@@ -1720,7 +1727,9 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
pTcon = cifs_sb->tcon;
pagevec_init(&lru_pvec, 0);
-
+#ifdef CONFIG_CIFS_DEBUG2
+ cFYI(1, ("rpages: num pages %d", num_pages));
+#endif
for (i = 0; i < num_pages; ) {
unsigned contig_pages;
struct page *tmp_page;
@@ -1734,14 +1743,14 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
/* count adjacent pages that we will read into */
contig_pages = 0;
- expected_index =
+ expected_index =
list_entry(page_list->prev, struct page, lru)->index;
- list_for_each_entry_reverse(tmp_page,page_list,lru) {
+ list_for_each_entry_reverse(tmp_page, page_list, lru) {
if (tmp_page->index == expected_index) {
contig_pages++;
expected_index++;
} else
- break;
+ break;
}
if (contig_pages + i > num_pages)
contig_pages = num_pages - i;
@@ -1753,10 +1762,13 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
/* Read size needs to be in multiples of one page */
read_size = min_t(const unsigned int, read_size,
cifs_sb->rsize & PAGE_CACHE_MASK);
-
+#ifdef CONFIG_CIFS_DEBUG2
+ cFYI(1, ("rpages: read size 0x%x contiguous pages %d",
+ read_size, contig_pages));
+#endif
rc = -EAGAIN;
while (rc == -EAGAIN) {
- if ((open_file->invalidHandle) &&
+ if ((open_file->invalidHandle) &&
(!open_file->closePend)) {
rc = cifs_reopen_file(file, TRUE);
if (rc != 0)
@@ -1769,11 +1781,11 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
&bytes_read, &smb_read_data,
&buf_type);
/* BB more RC checks ? */
- if (rc== -EAGAIN) {
+ if (rc == -EAGAIN) {
if (smb_read_data) {
- if(buf_type == CIFS_SMALL_BUFFER)
+ if (buf_type == CIFS_SMALL_BUFFER)
cifs_small_buf_release(smb_read_data);
- else if(buf_type == CIFS_LARGE_BUFFER)
+ else if (buf_type == CIFS_LARGE_BUFFER)
cifs_buf_release(smb_read_data);
smb_read_data = NULL;
}
@@ -1794,10 +1806,10 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
if ((int)(bytes_read & PAGE_CACHE_MASK) != bytes_read) {
i++; /* account for partial page */
- /* server copy of file can have smaller size
+ /* server copy of file can have smaller size
than client */
- /* BB do we need to verify this common case ?
- this case is ok - if we are at server EOF
+ /* BB do we need to verify this common case ?
+ this case is ok - if we are at server EOF
we will hit it on next read */
/* break; */
@@ -1806,14 +1818,14 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
cFYI(1, ("No bytes read (%d) at offset %lld . "
"Cleaning remaining pages from readahead list",
bytes_read, offset));
- /* BB turn off caching and do new lookup on
+ /* BB turn off caching and do new lookup on
file size at server? */
break;
}
if (smb_read_data) {
- if(buf_type == CIFS_SMALL_BUFFER)
+ if (buf_type == CIFS_SMALL_BUFFER)
cifs_small_buf_release(smb_read_data);
- else if(buf_type == CIFS_LARGE_BUFFER)
+ else if (buf_type == CIFS_LARGE_BUFFER)
cifs_buf_release(smb_read_data);
smb_read_data = NULL;
}
@@ -1824,12 +1836,12 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
/* need to free smb_read_data buf before exit */
if (smb_read_data) {
- if(buf_type == CIFS_SMALL_BUFFER)
+ if (buf_type == CIFS_SMALL_BUFFER)
cifs_small_buf_release(smb_read_data);
- else if(buf_type == CIFS_LARGE_BUFFER)
+ else if (buf_type == CIFS_LARGE_BUFFER)
cifs_buf_release(smb_read_data);
smb_read_data = NULL;
- }
+ }
FreeXid(xid);
return rc;
@@ -1844,26 +1856,26 @@ static int cifs_readpage_worker(struct file *file, struct page *page,
page_cache_get(page);
read_data = kmap(page);
/* for reads over a certain size could initiate async read ahead */
-
+
rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
-
+
if (rc < 0)
goto io_error;
else
- cFYI(1, ("Bytes read %d",rc));
-
+ cFYI(1, ("Bytes read %d", rc));
+
file->f_path.dentry->d_inode->i_atime =
current_fs_time(file->f_path.dentry->d_inode->i_sb);
-
+
if (PAGE_CACHE_SIZE > rc)
memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
flush_dcache_page(page);
SetPageUptodate(page);
rc = 0;
-
+
io_error:
- kunmap(page);
+ kunmap(page);
page_cache_release(page);
return rc;
}
@@ -1881,7 +1893,7 @@ static int cifs_readpage(struct file *file, struct page *page)
return -EBADF;
}
- cFYI(1, ("readpage %p at offset %d 0x%x\n",
+ cFYI(1, ("readpage %p at offset %d 0x%x\n",
page, (int)offset, (int)offset));
rc = cifs_readpage_worker(file, page, &offset);
@@ -1892,35 +1904,48 @@ static int cifs_readpage(struct file *file, struct page *page)
return rc;
}
+static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
+{
+ struct cifsFileInfo *open_file;
+
+ read_lock(&GlobalSMBSeslock);
+ list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
+ if (open_file->closePend)
+ continue;
+ if (open_file->pfile &&
+ ((open_file->pfile->f_flags & O_RDWR) ||
+ (open_file->pfile->f_flags & O_WRONLY))) {
+ read_unlock(&GlobalSMBSeslock);
+ return 1;
+ }
+ }
+ read_unlock(&GlobalSMBSeslock);
+ return 0;
+}
+
/* We do not want to update the file size from server for inodes
open for write - to avoid races with writepage extending
the file - in the future we could consider allowing
- refreshing the inode only on increases in the file size
+ refreshing the inode only on increases in the file size
but this is tricky to do without racing with writebehind
page caching in the current Linux kernel design */
int is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
{
- struct cifsFileInfo *open_file = NULL;
+ if (!cifsInode)
+ return 1;
- if (cifsInode)
- open_file = find_writable_file(cifsInode);
-
- if(open_file) {
+ if (is_inode_writable(cifsInode)) {
+ /* This inode is open for write at least once */
struct cifs_sb_info *cifs_sb;
- /* there is not actually a write pending so let
- this handle go free and allow it to
- be closable if needed */
- atomic_dec(&open_file->wrtPending);
-
cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
if ( cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO ) {
- /* since no page cache to corrupt on directio
+ /* since no page cache to corrupt on directio
we can change size safely */
return 1;
}
- if(i_size_read(&cifsInode->vfs_inode) < end_of_file)
+ if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
return 1;
return 0;
@@ -1935,7 +1960,7 @@ static int cifs_prepare_write(struct file *file, struct page *page,
loff_t i_size;
loff_t offset;
- cFYI(1, ("prepare write for page %p from %d to %d",page,from,to));
+ cFYI(1, ("prepare write for page %p from %d to %d", page, from, to));
if (PageUptodate(page))
return 0;
@@ -1955,14 +1980,7 @@ static int cifs_prepare_write(struct file *file, struct page *page,
* We don't need to read data beyond the end of the file.
* zero it, and set the page uptodate
*/
- void *kaddr = kmap_atomic(page, KM_USER0);
-
- if (from)
- memset(kaddr, 0, from);
- if (to < PAGE_CACHE_SIZE)
- memset(kaddr + to, 0, PAGE_CACHE_SIZE - to);
- flush_dcache_page(page);
- kunmap_atomic(kaddr, KM_USER0);
+ simple_prepare_write(file, page, from, to);
SetPageUptodate(page);
} else if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
/* might as well read a page, it is fast enough */
@@ -1974,8 +1992,8 @@ static int cifs_prepare_write(struct file *file, struct page *page,
this will be written out by commit_write so is fine */
}
- /* we do not need to pass errors back
- e.g. if we do not have read access to the file
+ /* we do not need to pass errors back
+ e.g. if we do not have read access to the file
because cifs_commit_write will do the right thing. -- shaggy */
return 0;
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index f0ff12b..dd41677 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -57,14 +57,14 @@ int cifs_get_inode_info_unix(struct inode **pinode,
if (tmp_path == NULL) {
return -ENOMEM;
}
- /* have to skip first of the double backslash of
+ /* have to skip first of the double backslash of
UNC name */
strncpy(tmp_path, pTcon->treeName, MAX_TREE_SIZE);
strncat(tmp_path, search_path, MAX_PATHCONF);
rc = connect_to_dfs_path(xid, pTcon->ses,
/* treename + */ tmp_path,
- cifs_sb->local_nls,
- cifs_sb->mnt_cifs_flags &
+ cifs_sb->local_nls,
+ cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
kfree(tmp_path);
@@ -81,7 +81,7 @@ int cifs_get_inode_info_unix(struct inode **pinode,
/* get new inode */
if (*pinode == NULL) {
*pinode = new_inode(sb);
- if (*pinode == NULL)
+ if (*pinode == NULL)
return -ENOMEM;
/* Is an i_ino of zero legal? */
/* Are there sanity checks we can use to ensure that
@@ -92,7 +92,7 @@ int cifs_get_inode_info_unix(struct inode **pinode,
} /* note ino incremented to unique num in new_inode */
if (sb->s_flags & MS_NOATIME)
(*pinode)->i_flags |= S_NOATIME | S_NOCMTIME;
-
+
insert_inode_hash(*pinode);
}
@@ -103,7 +103,7 @@ int cifs_get_inode_info_unix(struct inode **pinode,
cifsInfo->time = jiffies;
cFYI(1, ("New time %ld", cifsInfo->time));
/* this is ok to set on every inode revalidate */
- atomic_set(&cifsInfo->inUse,1);
+ atomic_set(&cifsInfo->inUse, 1);
inode->i_atime =
cifs_NTtimeToUnix(le64_to_cpu(findData.LastAccessTime));
@@ -114,8 +114,8 @@ int cifs_get_inode_info_unix(struct inode **pinode,
cifs_NTtimeToUnix(le64_to_cpu(findData.LastStatusChange));
inode->i_mode = le64_to_cpu(findData.Permissions);
/* since we set the inode type below we need to mask off
- to avoid strange results if bits set above */
- inode->i_mode &= ~S_IFMT;
+ to avoid strange results if bits set above */
+ inode->i_mode &= ~S_IFMT;
if (type == UNIX_FILE) {
inode->i_mode |= S_IFREG;
} else if (type == UNIX_SYMLINK) {
@@ -137,9 +137,9 @@ int cifs_get_inode_info_unix(struct inode **pinode,
} else {
/* safest to call it a file if we do not know */
inode->i_mode |= S_IFREG;
- cFYI(1,("unknown type %d",type));
+ cFYI(1, ("unknown type %d", type));
}
-
+
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
inode->i_uid = cifs_sb->mnt_uid;
else
@@ -149,7 +149,7 @@ int cifs_get_inode_info_unix(struct inode **pinode,
inode->i_gid = cifs_sb->mnt_gid;
else
inode->i_gid = le64_to_cpu(findData.Gid);
-
+
inode->i_nlink = le64_to_cpu(findData.Nlinks);
spin_lock(&inode->i_lock);
@@ -183,17 +183,17 @@ int cifs_get_inode_info_unix(struct inode **pinode,
inode->i_op = &cifs_file_inode_ops;
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
- inode->i_fop =
+ inode->i_fop =
&cifs_file_direct_nobrl_ops;
else
inode->i_fop = &cifs_file_direct_ops;
} else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
inode->i_fop = &cifs_file_nobrl_ops;
- else /* not direct, send byte range locks */
+ else /* not direct, send byte range locks */
inode->i_fop = &cifs_file_ops;
/* check if server can support readpages */
- if (pTcon->ses->server->maxBuf <
+ if (pTcon->ses->server->maxBuf <
PAGE_CACHE_SIZE + MAX_CIFS_HDR_SIZE)
inode->i_data.a_ops = &cifs_addr_ops_smallbuf;
else
@@ -215,7 +215,7 @@ int cifs_get_inode_info_unix(struct inode **pinode,
return rc;
}
-static int decode_sfu_inode(struct inode * inode, __u64 size,
+static int decode_sfu_inode(struct inode *inode, __u64 size,
const unsigned char *path,
struct cifs_sb_info *cifs_sb, int xid)
{
@@ -225,7 +225,7 @@ static int decode_sfu_inode(struct inode * inode, __u64 size,
struct cifsTconInfo *pTcon = cifs_sb->tcon;
char buf[24];
unsigned int bytes_read;
- char * pbuf;
+ char *pbuf;
pbuf = buf;
@@ -235,22 +235,22 @@ static int decode_sfu_inode(struct inode * inode, __u64 size,
} else if (size < 8) {
return -EINVAL; /* EOPNOTSUPP? */
}
-
+
rc = CIFSSMBOpen(xid, pTcon, path, FILE_OPEN, GENERIC_READ,
CREATE_NOT_DIR, &netfid, &oplock, NULL,
cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
- if (rc==0) {
+ if (rc == 0) {
int buf_type = CIFS_NO_BUFFER;
/* Read header */
rc = CIFSSMBRead(xid, pTcon,
- netfid,
+ netfid,
24 /* length */, 0 /* offset */,
&bytes_read, &pbuf, &buf_type);
if ((rc == 0) && (bytes_read >= 8)) {
if (memcmp("IntxBLK", pbuf, 8) == 0) {
- cFYI(1,("Block device"));
+ cFYI(1, ("Block device"));
inode->i_mode |= S_IFBLK;
if (bytes_read == 24) {
/* we have enough to decode dev num */
@@ -261,7 +261,7 @@ static int decode_sfu_inode(struct inode * inode, __u64 size,
inode->i_rdev = MKDEV(mjr, mnr);
}
} else if (memcmp("IntxCHR", pbuf, 8) == 0) {
- cFYI(1,("Char device"));
+ cFYI(1, ("Char device"));
inode->i_mode |= S_IFCHR;
if (bytes_read == 24) {
/* we have enough to decode dev num */
@@ -270,27 +270,26 @@ static int decode_sfu_inode(struct inode * inode, __u64 size,
mjr = le64_to_cpu(*(__le64 *)(pbuf+8));
mnr = le64_to_cpu(*(__le64 *)(pbuf+16));
inode->i_rdev = MKDEV(mjr, mnr);
- }
+ }
} else if (memcmp("IntxLNK", pbuf, 7) == 0) {
- cFYI(1,("Symlink"));
+ cFYI(1, ("Symlink"));
inode->i_mode |= S_IFLNK;
} else {
inode->i_mode |= S_IFREG; /* file? */
- rc = -EOPNOTSUPP;
+ rc = -EOPNOTSUPP;
}
} else {
inode->i_mode |= S_IFREG; /* then it is a file */
- rc = -EOPNOTSUPP; /* or some unknown SFU type */
- }
+ rc = -EOPNOTSUPP; /* or some unknown SFU type */
+ }
CIFSSMBClose(xid, pTcon, netfid);
}
return rc;
-
}
#define SFBITS_MASK (S_ISVTX | S_ISGID | S_ISUID) /* SETFILEBITS valid bits */
-static int get_sfu_uid_mode(struct inode * inode,
+static int get_sfu_uid_mode(struct inode *inode,
const unsigned char *path,
struct cifs_sb_info *cifs_sb, int xid)
{
@@ -301,15 +300,15 @@ static int get_sfu_uid_mode(struct inode * inode,
rc = CIFSSMBQueryEA(xid, cifs_sb->tcon, path, "SETFILEBITS",
ea_value, 4 /* size of buf */, cifs_sb->local_nls,
- cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
+ cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
if (rc < 0)
return (int)rc;
else if (rc > 3) {
mode = le32_to_cpu(*((__le32 *)ea_value));
- inode->i_mode &= ~SFBITS_MASK;
- cFYI(1,("special bits 0%o org mode 0%o", mode, inode->i_mode));
+ inode->i_mode &= ~SFBITS_MASK;
+ cFYI(1, ("special bits 0%o org mode 0%o", mode, inode->i_mode));
inode->i_mode = (mode & SFBITS_MASK) | inode->i_mode;
- cFYI(1,("special mode bits 0%o", mode));
+ cFYI(1, ("special mode bits 0%o", mode));
return 0;
} else {
return 0;
@@ -317,8 +316,6 @@ static int get_sfu_uid_mode(struct inode * inode,
#else
return -EOPNOTSUPP;
#endif
-
-
}
int cifs_get_inode_info(struct inode **pinode,
@@ -334,11 +331,11 @@ int cifs_get_inode_info(struct inode **pinode,
int adjustTZ = FALSE;
pTcon = cifs_sb->tcon;
- cFYI(1,("Getting info on %s", search_path));
+ cFYI(1, ("Getting info on %s", search_path));
if ((pfindData == NULL) && (*pinode != NULL)) {
if (CIFS_I(*pinode)->clientCanCacheRead) {
- cFYI(1,("No need to revalidate cached inode sizes"));
+ cFYI(1, ("No need to revalidate cached inode sizes"));
return rc;
}
}
@@ -359,12 +356,11 @@ int cifs_get_inode_info(struct inode **pinode,
failed at least once - set flag in tcon or mount */
if ((rc == -EOPNOTSUPP) || (rc == -EINVAL)) {
rc = SMBQueryInformation(xid, pTcon, search_path,
- pfindData, cifs_sb->local_nls,
+ pfindData, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
adjustTZ = TRUE;
}
-
}
/* dump_mem("\nQPathInfo return data",&findData, sizeof(findData)); */
if (rc) {
@@ -384,8 +380,8 @@ int cifs_get_inode_info(struct inode **pinode,
strncat(tmp_path, search_path, MAX_PATHCONF);
rc = connect_to_dfs_path(xid, pTcon->ses,
/* treename + */ tmp_path,
- cifs_sb->local_nls,
- cifs_sb->mnt_cifs_flags &
+ cifs_sb->local_nls,
+ cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
kfree(tmp_path);
/* BB fix up inode etc. */
@@ -419,17 +415,17 @@ int cifs_get_inode_info(struct inode **pinode,
there Windows server or network appliances for which
IndexNumber field is not guaranteed unique? */
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM){
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
int rc1 = 0;
__u64 inode_num;
- rc1 = CIFSGetSrvInodeNumber(xid, pTcon,
- search_path, &inode_num,
+ rc1 = CIFSGetSrvInodeNumber(xid, pTcon,
+ search_path, &inode_num,
cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
if (rc1) {
- cFYI(1,("GetSrvInodeNum rc %d", rc1));
+ cFYI(1, ("GetSrvInodeNum rc %d", rc1));
/* BB EOPNOSUPP disable SERVER_INUM? */
} else /* do we need cast or hash to ino? */
(*pinode)->i_ino = inode_num;
@@ -463,7 +459,7 @@ int cifs_get_inode_info(struct inode **pinode,
cFYI(0, ("Attributes came in as 0x%x", attr));
if (adjustTZ && (pTcon->ses) && (pTcon->ses->server)) {
inode->i_ctime.tv_sec += pTcon->ses->server->timeAdj;
- inode->i_mtime.tv_sec += pTcon->ses->server->timeAdj;
+ inode->i_mtime.tv_sec += pTcon->ses->server->timeAdj;
}
/* set default mode. will override for dirs below */
@@ -471,8 +467,9 @@ int cifs_get_inode_info(struct inode **pinode,
/* new inode, can safely set these fields */
inode->i_mode = cifs_sb->mnt_file_mode;
else /* since we set the inode type below we need to mask off
- to avoid strange results if type changes and both get orred in */
- inode->i_mode &= ~S_IFMT;
+ to avoid strange results if type changes and both
+ get orred in */
+ inode->i_mode &= ~S_IFMT;
/* if (attr & ATTR_REPARSE) */
/* We no longer handle these as symlinks because we could not
follow them due to the absolute path with drive letter */
@@ -490,13 +487,13 @@ int cifs_get_inode_info(struct inode **pinode,
/* BB Finish for SFU style symlinks and devices */
} else if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) &&
(cifsInfo->cifsAttrs & ATTR_SYSTEM)) {
- if (decode_sfu_inode(inode,
+ if (decode_sfu_inode(inode,
le64_to_cpu(pfindData->EndOfFile),
search_path,
cifs_sb, xid)) {
- cFYI(1,("Unrecognized sfu inode type"));
+ cFYI(1, ("Unrecognized sfu inode type"));
}
- cFYI(1,("sfu mode 0%o",inode->i_mode));
+ cFYI(1, ("sfu mode 0%o", inode->i_mode));
} else {
inode->i_mode |= S_IFREG;
/* treat the dos attribute of read-only as read-only
@@ -512,12 +509,12 @@ int cifs_get_inode_info(struct inode **pinode,
/* BB add code here -
validate if device or weird share or device type? */
}
-
+
spin_lock(&inode->i_lock);
if (is_size_safe_to_change(cifsInfo, le64_to_cpu(pfindData->EndOfFile))) {
/* can not safely shrink the file size here if the
client is writing to it due to potential races */
- i_size_write(inode,le64_to_cpu(pfindData->EndOfFile));
+ i_size_write(inode, le64_to_cpu(pfindData->EndOfFile));
/* 512 bytes (2**9) is the fake blocksize that must be
used for this calculation */
@@ -528,7 +525,7 @@ int cifs_get_inode_info(struct inode **pinode,
inode->i_nlink = le32_to_cpu(pfindData->NumberOfLinks);
- /* BB fill in uid and gid here? with help from winbind?
+ /* BB fill in uid and gid here? with help from winbind?
or retrieve from NTFS stream extended attribute */
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) {
/* fill in uid, gid, mode from server ACL */
@@ -540,7 +537,7 @@ int cifs_get_inode_info(struct inode **pinode,
inode->i_gid = cifs_sb->mnt_gid;
/* set so we do not keep refreshing these fields with
bad data after user has changed them in memory */
- atomic_set(&cifsInfo->inUse,1);
+ atomic_set(&cifsInfo->inUse, 1);
}
if (S_ISREG(inode->i_mode)) {
@@ -557,7 +554,7 @@ int cifs_get_inode_info(struct inode **pinode,
else /* not direct, send byte range locks */
inode->i_fop = &cifs_file_ops;
- if (pTcon->ses->server->maxBuf <
+ if (pTcon->ses->server->maxBuf <
PAGE_CACHE_SIZE + MAX_CIFS_HDR_SIZE)
inode->i_data.a_ops = &cifs_addr_ops_smallbuf;
else
@@ -586,10 +583,11 @@ void cifs_read_inode(struct inode *inode)
cifs_sb = CIFS_SB(inode->i_sb);
xid = GetXid();
- if (cifs_sb->tcon->ses->capabilities & CAP_UNIX)
- cifs_get_inode_info_unix(&inode, "", inode->i_sb,xid);
+
+ if (cifs_sb->tcon->unix_ext)
+ cifs_get_inode_info_unix(&inode, "", inode->i_sb, xid);
else
- cifs_get_inode_info(&inode, "", NULL, inode->i_sb,xid);
+ cifs_get_inode_info(&inode, "", NULL, inode->i_sb, xid);
/* can not call macro FreeXid here since in a void func */
_FreeXid(xid);
}
@@ -623,9 +621,21 @@ int cifs_unlink(struct inode *inode, struct dentry *direntry)
FreeXid(xid);
return -ENOMEM;
}
- rc = CIFSSMBDelFile(xid, pTcon, full_path, cifs_sb->local_nls,
+
+ if ((pTcon->ses->capabilities & CAP_UNIX) &&
+ (CIFS_UNIX_POSIX_PATH_OPS_CAP &
+ le64_to_cpu(pTcon->fsUnixInfo.Capability))) {
+ rc = CIFSPOSIXDelFile(xid, pTcon, full_path,
+ SMB_POSIX_UNLINK_FILE_TARGET, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
+ cFYI(1, ("posix del rc %d", rc));
+ if ((rc == 0) || (rc == -ENOENT))
+ goto psx_del_no_retry;
+ }
+ rc = CIFSSMBDelFile(xid, pTcon, full_path, cifs_sb->local_nls,
+ cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
+psx_del_no_retry:
if (!rc) {
if (direntry->d_inode)
drop_nlink(direntry->d_inode);
@@ -638,12 +648,12 @@ int cifs_unlink(struct inode *inode, struct dentry *direntry)
rc = CIFSSMBOpen(xid, pTcon, full_path, FILE_OPEN, DELETE,
CREATE_NOT_DIR | CREATE_DELETE_ON_CLOSE,
&netfid, &oplock, NULL, cifs_sb->local_nls,
- cifs_sb->mnt_cifs_flags &
+ cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
- if (rc==0) {
+ if (rc == 0) {
CIFSSMBRenameOpenFile(xid, pTcon, netfid, NULL,
- cifs_sb->local_nls,
- cifs_sb->mnt_cifs_flags &
+ cifs_sb->local_nls,
+ cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
CIFSSMBClose(xid, pTcon, netfid);
if (direntry->d_inode)
@@ -659,7 +669,7 @@ int cifs_unlink(struct inode *inode, struct dentry *direntry)
rc = CIFSSMBSetTimes(xid, pTcon, full_path,
pinfo_buf,
cifs_sb->local_nls,
- cifs_sb->mnt_cifs_flags &
+ cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
else
rc = -EOPNOTSUPP;
@@ -670,7 +680,7 @@ int cifs_unlink(struct inode *inode, struct dentry *direntry)
/* rc = CIFSSMBSetAttrLegacy(xid, pTcon,
full_path,
(__u16)ATTR_NORMAL,
- cifs_sb->local_nls);
+ cifs_sb->local_nls);
For some strange reason it seems that NT4 eats the
old setattr call without actually setting the
attributes so on to the third attempted workaround
@@ -683,9 +693,9 @@ int cifs_unlink(struct inode *inode, struct dentry *direntry)
FILE_WRITE_ATTRIBUTES, 0,
&netfid, &oplock, NULL,
cifs_sb->local_nls,
- cifs_sb->mnt_cifs_flags &
+ cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
- if (rc==0) {
+ if (rc == 0) {
rc = CIFSSMBSetFileTimes(xid, pTcon,
pinfo_buf,
netfid);
@@ -694,10 +704,10 @@ int cifs_unlink(struct inode *inode, struct dentry *direntry)
}
kfree(pinfo_buf);
}
- if (rc==0) {
- rc = CIFSSMBDelFile(xid, pTcon, full_path,
- cifs_sb->local_nls,
- cifs_sb->mnt_cifs_flags &
+ if (rc == 0) {
+ rc = CIFSSMBDelFile(xid, pTcon, full_path,
+ cifs_sb->local_nls,
+ cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
if (!rc) {
if (direntry->d_inode)
@@ -711,10 +721,10 @@ int cifs_unlink(struct inode *inode, struct dentry *direntry)
CREATE_NOT_DIR |
CREATE_DELETE_ON_CLOSE,
&netfid, &oplock, NULL,
- cifs_sb->local_nls,
- cifs_sb->mnt_cifs_flags &
+ cifs_sb->local_nls,
+ cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
- if (rc==0) {
+ if (rc == 0) {
CIFSSMBRenameOpenFile(xid, pTcon,
netfid, NULL,
cifs_sb->local_nls,
@@ -773,8 +783,8 @@ static void posix_fill_in_inode(struct inode *tmp_inode,
tmp_inode->i_mode = le64_to_cpu(pData->Permissions);
/* since we set the inode type below we need to mask off type
- to avoid strange results if bits above were corrupt */
- tmp_inode->i_mode &= ~S_IFMT;
+ to avoid strange results if bits above were corrupt */
+ tmp_inode->i_mode &= ~S_IFMT;
if (type == UNIX_FILE) {
*pobject_type = DT_REG;
tmp_inode->i_mode |= S_IFREG;
@@ -804,11 +814,11 @@ static void posix_fill_in_inode(struct inode *tmp_inode,
/* safest to just call it a file */
*pobject_type = DT_REG;
tmp_inode->i_mode |= S_IFREG;
- cFYI(1,("unknown inode type %d",type));
+ cFYI(1, ("unknown inode type %d", type));
}
#ifdef CONFIG_CIFS_DEBUG2
- cFYI(1,("object type: %d", type));
+ cFYI(1, ("object type: %d", type));
#endif
tmp_inode->i_uid = le64_to_cpu(pData->Uid);
tmp_inode->i_gid = le64_to_cpu(pData->Gid);
@@ -816,7 +826,7 @@ static void posix_fill_in_inode(struct inode *tmp_inode,
spin_lock(&tmp_inode->i_lock);
if (is_size_safe_to_change(cifsInfo, end_of_file)) {
- /* can not safely change the file size here if the
+ /* can not safely change the file size here if the
client is writing to it due to potential races */
i_size_write(tmp_inode, end_of_file);
@@ -830,27 +840,28 @@ static void posix_fill_in_inode(struct inode *tmp_inode,
cFYI(1, ("File inode"));
tmp_inode->i_op = &cifs_file_inode_ops;
- if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
- if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
tmp_inode->i_fop = &cifs_file_direct_nobrl_ops;
else
tmp_inode->i_fop = &cifs_file_direct_ops;
-
- } else if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
+
+ } else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
tmp_inode->i_fop = &cifs_file_nobrl_ops;
else
tmp_inode->i_fop = &cifs_file_ops;
- if((cifs_sb->tcon) && (cifs_sb->tcon->ses) &&
- (cifs_sb->tcon->ses->server->maxBuf <
+ if ((cifs_sb->tcon) && (cifs_sb->tcon->ses) &&
+ (cifs_sb->tcon->ses->server->maxBuf <
PAGE_CACHE_SIZE + MAX_CIFS_HDR_SIZE))
tmp_inode->i_data.a_ops = &cifs_addr_ops_smallbuf;
else
tmp_inode->i_data.a_ops = &cifs_addr_ops;
- if(isNewInode)
- return; /* No sense invalidating pages for new inode since we
- have not started caching readahead file data yet */
+ if (isNewInode)
+ return; /* No sense invalidating pages for new inode
+ since we we have not started caching
+ readahead file data yet */
if (timespec_equal(&tmp_inode->i_mtime, &local_mtime) &&
(local_size == tmp_inode->i_size)) {
@@ -869,10 +880,10 @@ static void posix_fill_in_inode(struct inode *tmp_inode,
tmp_inode->i_op = &cifs_symlink_inode_ops;
/* tmp_inode->i_fop = *//* do not need to set to anything */
} else {
- cFYI(1, ("Special inode"));
+ cFYI(1, ("Special inode"));
init_special_inode(tmp_inode, tmp_inode->i_mode,
tmp_inode->i_rdev);
- }
+ }
}
int cifs_mkdir(struct inode *inode, struct dentry *direntry, int mode)
@@ -896,22 +907,22 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, int mode)
FreeXid(xid);
return -ENOMEM;
}
-
- if((pTcon->ses->capabilities & CAP_UNIX) &&
- (CIFS_UNIX_POSIX_PATH_OPS_CAP &
+
+ if ((pTcon->ses->capabilities & CAP_UNIX) &&
+ (CIFS_UNIX_POSIX_PATH_OPS_CAP &
le64_to_cpu(pTcon->fsUnixInfo.Capability))) {
u32 oplock = 0;
- FILE_UNIX_BASIC_INFO * pInfo =
+ FILE_UNIX_BASIC_INFO * pInfo =
kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
- if(pInfo == NULL) {
+ if (pInfo == NULL) {
rc = -ENOMEM;
goto mkdir_out;
}
-
+
rc = CIFSPOSIXCreate(xid, pTcon, SMB_O_DIRECTORY | SMB_O_CREAT,
mode, NULL /* netfid */, pInfo, &oplock,
- full_path, cifs_sb->local_nls,
- cifs_sb->mnt_cifs_flags &
+ full_path, cifs_sb->local_nls,
+ cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
if (rc) {
cFYI(1, ("posix mkdir returned 0x%x", rc));
@@ -919,8 +930,9 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, int mode)
} else {
int obj_type;
if (pInfo->Type == -1) /* no return info - go query */
- goto mkdir_get_info;
-/*BB check (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID ) to see if need to set uid/gid */
+ goto mkdir_get_info;
+/*BB check (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID ) to see if need
+ to set uid/gid */
inc_nlink(inode);
if (pTcon->nocase)
direntry->d_op = &cifs_ci_dentry_ops;
@@ -937,7 +949,7 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, int mode)
newinode->i_ino =
(unsigned long)pInfo->UniqueId;
} /* note ino incremented to unique num in new_inode */
- if(inode->i_sb->s_flags & MS_NOATIME)
+ if (inode->i_sb->s_flags & MS_NOATIME)
newinode->i_flags |= S_NOATIME | S_NOCMTIME;
newinode->i_nlink = 2;
@@ -949,18 +961,18 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, int mode)
posix_fill_in_inode(direntry->d_inode,
pInfo, &obj_type, 1 /* NewInode */);
#ifdef CONFIG_CIFS_DEBUG2
- cFYI(1,("instantiated dentry %p %s to inode %p",
+ cFYI(1, ("instantiated dentry %p %s to inode %p",
direntry, direntry->d_name.name, newinode));
- if(newinode->i_nlink != 2)
- cFYI(1,("unexpected number of links %d",
+ if (newinode->i_nlink != 2)
+ cFYI(1, ("unexpected number of links %d",
newinode->i_nlink));
#endif
}
kfree(pInfo);
goto mkdir_out;
- }
-
+ }
+
/* BB add setting the equivalent of mode via CreateX w/ACLs */
rc = CIFSSMBMkDir(xid, pTcon, full_path, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
@@ -968,14 +980,14 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, int mode)
cFYI(1, ("cifs_mkdir returned 0x%x", rc));
d_drop(direntry);
} else {
-mkdir_get_info:
+mkdir_get_info:
inc_nlink(inode);
- if (pTcon->ses->capabilities & CAP_UNIX)
+ if (pTcon->unix_ext)
rc = cifs_get_inode_info_unix(&newinode, full_path,
- inode->i_sb,xid);
+ inode->i_sb, xid);
else
rc = cifs_get_inode_info(&newinode, full_path, NULL,
- inode->i_sb,xid);
+ inode->i_sb, xid);
if (pTcon->nocase)
direntry->d_op = &cifs_ci_dentry_ops;
@@ -983,10 +995,10 @@ mkdir_get_info:
direntry->d_op = &cifs_dentry_ops;
d_instantiate(direntry, newinode);
/* setting nlink not necessary except in cases where we
- * failed to get it from the server or was set bogus */
+ * failed to get it from the server or was set bogus */
if ((direntry->d_inode) && (direntry->d_inode->i_nlink < 2))
- direntry->d_inode->i_nlink = 2;
- if (cifs_sb->tcon->ses->capabilities & CAP_UNIX) {
+ direntry->d_inode->i_nlink = 2;
+ if (pTcon->unix_ext) {
mode &= ~current->fs->umask;
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) {
CIFSSMBUnixSetPerms(xid, pTcon, full_path,
@@ -1002,27 +1014,27 @@ mkdir_get_info:
mode, (__u64)-1,
(__u64)-1, 0 /* dev_t */,
cifs_sb->local_nls,
- cifs_sb->mnt_cifs_flags &
+ cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
}
} else {
/* BB to be implemented via Windows secrty descriptors
eg CIFSSMBWinSetPerms(xid, pTcon, full_path, mode,
-1, -1, local_nls); */
- if(direntry->d_inode) {
+ if (direntry->d_inode) {
direntry->d_inode->i_mode = mode;
direntry->d_inode->i_mode |= S_IFDIR;
- if(cifs_sb->mnt_cifs_flags &
+ if (cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_SET_UID) {
- direntry->d_inode->i_uid =
+ direntry->d_inode->i_uid =
current->fsuid;
- direntry->d_inode->i_gid =
+ direntry->d_inode->i_gid =
current->fsgid;
}
}
}
}
-mkdir_out:
+mkdir_out:
kfree(full_path);
FreeXid(xid);
return rc;
@@ -1056,7 +1068,7 @@ int cifs_rmdir(struct inode *inode, struct dentry *direntry)
if (!rc) {
drop_nlink(inode);
spin_lock(&direntry->d_inode->i_lock);
- i_size_write(direntry->d_inode,0);
+ i_size_write(direntry->d_inode, 0);
clear_nlink(direntry->d_inode);
spin_unlock(&direntry->d_inode->i_lock);
}
@@ -1119,9 +1131,9 @@ int cifs_rename(struct inode *source_inode, struct dentry *source_direntry,
kmalloc(2 * sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
if (info_buf_source != NULL) {
info_buf_target = info_buf_source + 1;
- if (pTcon->ses->capabilities & CAP_UNIX)
+ if (pTcon->unix_ext)
rc = CIFSSMBUnixQPathInfo(xid, pTcon, fromName,
- info_buf_source,
+ info_buf_source,
cifs_sb_source->local_nls,
cifs_sb_source->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
@@ -1171,12 +1183,12 @@ int cifs_rename(struct inode *source_inode, struct dentry *source_direntry,
might not right be right access to request */
rc = CIFSSMBOpen(xid, pTcon, fromName, FILE_OPEN, GENERIC_READ,
CREATE_NOT_DIR, &netfid, &oplock, NULL,
- cifs_sb_source->local_nls,
- cifs_sb_source->mnt_cifs_flags &
+ cifs_sb_source->local_nls,
+ cifs_sb_source->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
- if (rc==0) {
+ if (rc == 0) {
rc = CIFSSMBRenameOpenFile(xid, pTcon, netfid, toName,
- cifs_sb_source->local_nls,
+ cifs_sb_source->local_nls,
cifs_sb_source->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
CIFSSMBClose(xid, pTcon, netfid);
@@ -1247,9 +1259,9 @@ int cifs_revalidate(struct dentry *direntry)
local_mtime = direntry->d_inode->i_mtime;
local_size = direntry->d_inode->i_size;
- if (cifs_sb->tcon->ses->capabilities & CAP_UNIX) {
+ if (cifs_sb->tcon->unix_ext) {
rc = cifs_get_inode_info_unix(&direntry->d_inode, full_path,
- direntry->d_sb,xid);
+ direntry->d_sb, xid);
if (rc) {
cFYI(1, ("error on getting revalidate info %d", rc));
/* if (rc != -ENOENT)
@@ -1258,7 +1270,7 @@ int cifs_revalidate(struct dentry *direntry)
}
} else {
rc = cifs_get_inode_info(&direntry->d_inode, full_path, NULL,
- direntry->d_sb,xid);
+ direntry->d_sb, xid);
if (rc) {
cFYI(1, ("error on getting revalidate info %d", rc));
/* if (rc != -ENOENT)
@@ -1271,7 +1283,7 @@ int cifs_revalidate(struct dentry *direntry)
/* if not oplocked, we invalidate inode pages if mtime or file size
had changed on server */
- if (timespec_equal(&local_mtime,&direntry->d_inode->i_mtime) &&
+ if (timespec_equal(&local_mtime, &direntry->d_inode->i_mtime) &&
(local_size == direntry->d_inode->i_size)) {
cFYI(1, ("cifs_revalidate - inode unchanged"));
} else {
@@ -1298,7 +1310,7 @@ int cifs_revalidate(struct dentry *direntry)
if (invalidate_inode) {
/* shrink_dcache not necessary now that cifs dentry ops
are exported for negative dentries */
-/* if(S_ISDIR(direntry->d_inode->i_mode))
+/* if (S_ISDIR(direntry->d_inode->i_mode))
shrink_dcache_parent(direntry); */
if (S_ISREG(direntry->d_inode->i_mode)) {
if (direntry->d_inode->i_mapping)
@@ -1313,7 +1325,7 @@ int cifs_revalidate(struct dentry *direntry)
}
}
/* mutex_unlock(&direntry->d_inode->i_mutex); */
-
+
kfree(full_path);
FreeXid(xid);
return rc;
@@ -1335,23 +1347,19 @@ static int cifs_truncate_page(struct address_space *mapping, loff_t from)
pgoff_t index = from >> PAGE_CACHE_SHIFT;
unsigned offset = from & (PAGE_CACHE_SIZE - 1);
struct page *page;
- char *kaddr;
int rc = 0;
page = grab_cache_page(mapping, index);
if (!page)
return -ENOMEM;
- kaddr = kmap_atomic(page, KM_USER0);
- memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
- flush_dcache_page(page);
- kunmap_atomic(kaddr, KM_USER0);
+ zero_user_page(page, offset, PAGE_CACHE_SIZE - offset, KM_USER0);
unlock_page(page);
page_cache_release(page);
return rc;
}
-static int cifs_vmtruncate(struct inode * inode, loff_t offset)
+static int cifs_vmtruncate(struct inode *inode, loff_t offset)
{
struct address_space *mapping = inode->i_mapping;
unsigned long limit;
@@ -1424,13 +1432,13 @@ int cifs_setattr(struct dentry *direntry, struct iattr *attrs)
if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) == 0) {
/* check if we have permission to change attrs */
rc = inode_change_ok(direntry->d_inode, attrs);
- if(rc < 0) {
+ if (rc < 0) {
FreeXid(xid);
return rc;
} else
rc = 0;
}
-
+
full_path = build_path_from_dentry(direntry);
if (full_path == NULL) {
FreeXid(xid);
@@ -1459,16 +1467,16 @@ int cifs_setattr(struct dentry *direntry, struct iattr *attrs)
rc = CIFSSMBSetFileSize(xid, pTcon, attrs->ia_size,
nfid, npid, FALSE);
atomic_dec(&open_file->wrtPending);
- cFYI(1,("SetFSize for attrs rc = %d", rc));
- if((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
+ cFYI(1, ("SetFSize for attrs rc = %d", rc));
+ if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
int bytes_written;
rc = CIFSSMBWrite(xid, pTcon,
nfid, 0, attrs->ia_size,
&bytes_written, NULL, NULL,
1 /* 45 seconds */);
- cFYI(1,("Wrt seteof rc %d", rc));
+ cFYI(1, ("Wrt seteof rc %d", rc));
}
- } else
+ } else
rc = -EINVAL;
if (rc != 0) {
@@ -1478,11 +1486,11 @@ int cifs_setattr(struct dentry *direntry, struct iattr *attrs)
it by handle */
rc = CIFSSMBSetEOF(xid, pTcon, full_path,
attrs->ia_size, FALSE,
- cifs_sb->local_nls,
+ cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
cFYI(1, ("SetEOF by path (setattrs) rc = %d", rc));
- if((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
+ if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
__u16 netfid;
int oplock = FALSE;
@@ -1493,14 +1501,14 @@ int cifs_setattr(struct dentry *direntry, struct iattr *attrs)
NULL, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
- if (rc==0) {
+ if (rc == 0) {
int bytes_written;
rc = CIFSSMBWrite(xid, pTcon,
netfid, 0,
attrs->ia_size,
&bytes_written, NULL,
NULL, 1 /* 45 sec */);
- cFYI(1,("wrt seteof rc %d",rc));
+ cFYI(1, ("wrt seteof rc %d", rc));
CIFSSMBClose(xid, pTcon, netfid);
}
@@ -1517,7 +1525,7 @@ int cifs_setattr(struct dentry *direntry, struct iattr *attrs)
rc = cifs_vmtruncate(direntry->d_inode, attrs->ia_size);
cifs_truncate_page(direntry->d_inode->i_mapping,
direntry->d_inode->i_size);
- } else
+ } else
goto cifs_setattr_exit;
}
if (attrs->ia_valid & ATTR_UID) {
@@ -1535,11 +1543,11 @@ int cifs_setattr(struct dentry *direntry, struct iattr *attrs)
mode = attrs->ia_mode;
}
- if ((cifs_sb->tcon->ses->capabilities & CAP_UNIX)
+ if ((pTcon->unix_ext)
&& (attrs->ia_valid & (ATTR_MODE | ATTR_GID | ATTR_UID)))
rc = CIFSSMBUnixSetPerms(xid, pTcon, full_path, mode, uid, gid,
0 /* dev_t */, cifs_sb->local_nls,
- cifs_sb->mnt_cifs_flags &
+ cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
else if (attrs->ia_valid & ATTR_MODE) {
rc = 0;
@@ -1559,7 +1567,7 @@ int cifs_setattr(struct dentry *direntry, struct iattr *attrs)
time_buf.Attributes = cpu_to_le32(cifsInode->cifsAttrs &
(~ATTR_READONLY));
/* Windows ignores set to zero */
- if(time_buf.Attributes == 0)
+ if (time_buf.Attributes == 0)
time_buf.Attributes |= cpu_to_le32(ATTR_NORMAL);
}
/* BB to be implemented -
@@ -1585,7 +1593,7 @@ int cifs_setattr(struct dentry *direntry, struct iattr *attrs)
stamps are changed explicitly (i.e. by utime()
since we would then have a mix of client and
server times */
-
+
if (set_time && (attrs->ia_valid & ATTR_CTIME)) {
set_time = TRUE;
/* Although Samba throws this field away
@@ -1624,7 +1632,7 @@ int cifs_setattr(struct dentry *direntry, struct iattr *attrs)
NULL, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
- if (rc==0) {
+ if (rc == 0) {
rc = CIFSSMBSetFileTimes(xid, pTcon, &time_buf,
netfid);
CIFSSMBClose(xid, pTcon, netfid);
@@ -1634,7 +1642,7 @@ int cifs_setattr(struct dentry *direntry, struct iattr *attrs)
granularity */
/* rc = CIFSSMBSetTimesLegacy(xid, pTcon, full_path,
- &time_buf, cifs_sb->local_nls); */
+ &time_buf, cifs_sb->local_nls); */
}
}
/* Even if error on time set, no sense failing the call if
@@ -1642,7 +1650,7 @@ int cifs_setattr(struct dentry *direntry, struct iattr *attrs)
and this check ensures that we are not being called from
sys_utimes in which case we ought to fail the call back to
the user when the server rejects the call */
- if((rc) && (attrs->ia_valid &
+ if ((rc) && (attrs->ia_valid &
(ATTR_MODE | ATTR_GID | ATTR_UID | ATTR_SIZE)))
rc = 0;
}
diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c
index a414f17..d24fe68 100644
--- a/fs/cifs/ioctl.c
+++ b/fs/cifs/ioctl.c
@@ -3,7 +3,7 @@
*
* vfs operations that deal with io control
*
- * Copyright (C) International Business Machines Corp., 2005
+ * Copyright (C) International Business Machines Corp., 2005,2007
* Author(s): Steve French (sfrench@us.ibm.com)
*
* This library is free software; you can redistribute it and/or modify
@@ -30,7 +30,7 @@
#define CIFS_IOC_CHECKUMOUNT _IO(0xCF, 2)
-int cifs_ioctl (struct inode * inode, struct file * filep,
+int cifs_ioctl (struct inode *inode, struct file *filep,
unsigned int command, unsigned long arg)
{
int rc = -ENOTTY; /* strange error - but the precedent */
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
index 6baea85..6a85ef7 100644
--- a/fs/cifs/link.c
+++ b/fs/cifs/link.c
@@ -50,32 +50,33 @@ cifs_hardlink(struct dentry *old_file, struct inode *inode,
fromName = build_path_from_dentry(old_file);
toName = build_path_from_dentry(direntry);
- if((fromName == NULL) || (toName == NULL)) {
+ if ((fromName == NULL) || (toName == NULL)) {
rc = -ENOMEM;
goto cifs_hl_exit;
}
- if (cifs_sb_target->tcon->ses->capabilities & CAP_UNIX)
+/* if (cifs_sb_target->tcon->ses->capabilities & CAP_UNIX)*/
+ if (pTcon->unix_ext)
rc = CIFSUnixCreateHardLink(xid, pTcon, fromName, toName,
- cifs_sb_target->local_nls,
+ cifs_sb_target->local_nls,
cifs_sb_target->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
else {
rc = CIFSCreateHardLink(xid, pTcon, fromName, toName,
- cifs_sb_target->local_nls,
+ cifs_sb_target->local_nls,
cifs_sb_target->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
- if((rc == -EIO) || (rc == -EINVAL))
- rc = -EOPNOTSUPP;
+ if ((rc == -EIO) || (rc == -EINVAL))
+ rc = -EOPNOTSUPP;
}
d_drop(direntry); /* force new lookup from server of target */
/* if source file is cached (oplocked) revalidate will not go to server
until the file is closed or oplock broken so update nlinks locally */
- if(old_file->d_inode) {
+ if (old_file->d_inode) {
cifsInode = CIFS_I(old_file->d_inode);
- if(rc == 0) {
+ if (rc == 0) {
old_file->d_inode->i_nlink++;
/* BB should we make this contingent on superblock flag NOATIME? */
/* old_file->d_inode->i_ctime = CURRENT_TIME;*/
@@ -84,14 +85,14 @@ cifs_hardlink(struct dentry *old_file, struct inode *inode,
to set the parent dir cifs inode time to zero
to force revalidate (faster) for it too? */
}
- /* if not oplocked will force revalidate to get info
+ /* if not oplocked will force revalidate to get info
on source file from srv */
cifsInode->time = 0;
- /* Will update parent dir timestamps from srv within a second.
+ /* Will update parent dir timestamps from srv within a second.
Would it really be worth it to set the parent dir (cifs
inode) time field to zero to force revalidate on parent
- directory faster ie
+ directory faster ie
CIFS_I(inode)->time = 0; */
}
@@ -109,7 +110,7 @@ cifs_follow_link(struct dentry *direntry, struct nameidata *nd)
int rc = -EACCES;
int xid;
char *full_path = NULL;
- char * target_path = ERR_PTR(-ENOMEM);
+ char *target_path = ERR_PTR(-ENOMEM);
struct cifs_sb_info *cifs_sb;
struct cifsTconInfo *pTcon;
@@ -129,13 +130,19 @@ cifs_follow_link(struct dentry *direntry, struct nameidata *nd)
goto out;
}
-/* BB add read reparse point symlink code and Unix extensions symlink code here BB */
+ /* We could change this to:
+ if (pTcon->unix_ext)
+ but there does not seem any point in refusing to
+ get symlink info if we can, even if unix extensions
+ turned off for this mount */
+
if (pTcon->ses->capabilities & CAP_UNIX)
rc = CIFSSMBUnixQuerySymLink(xid, pTcon, full_path,
target_path,
PATH_MAX-1,
cifs_sb->local_nls);
else {
+ /* BB add read reparse point symlink code here */
/* rc = CIFSSMBQueryReparseLinkInfo */
/* BB Add code to Query ReparsePoint info */
/* BB Add MAC style xsymlink check here if enabled */
@@ -176,7 +183,7 @@ cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname)
full_path = build_path_from_dentry(direntry);
- if(full_path == NULL) {
+ if (full_path == NULL) {
FreeXid(xid);
return -ENOMEM;
}
@@ -185,19 +192,20 @@ cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname)
cFYI(1, ("symname is %s", symname));
/* BB what if DFS and this volume is on different share? BB */
- if (cifs_sb->tcon->ses->capabilities & CAP_UNIX)
+ if (pTcon->unix_ext)
rc = CIFSUnixCreateSymLink(xid, pTcon, full_path, symname,
cifs_sb->local_nls);
/* else
- rc = CIFSCreateReparseSymLink(xid, pTcon, fromName, toName,cifs_sb_target->local_nls); */
+ rc = CIFSCreateReparseSymLink(xid, pTcon, fromName, toName,
+ cifs_sb_target->local_nls); */
if (rc == 0) {
- if (pTcon->ses->capabilities & CAP_UNIX)
+ if (pTcon->unix_ext)
rc = cifs_get_inode_info_unix(&newinode, full_path,
- inode->i_sb,xid);
+ inode->i_sb, xid);
else
rc = cifs_get_inode_info(&newinode, full_path, NULL,
- inode->i_sb,xid);
+ inode->i_sb, xid);
if (rc != 0) {
cFYI(1, ("Create symlink ok, getinodeinfo fail rc = %d",
@@ -226,9 +234,9 @@ cifs_readlink(struct dentry *direntry, char __user *pBuffer, int buflen)
struct cifs_sb_info *cifs_sb;
struct cifsTconInfo *pTcon;
char *full_path = NULL;
- char *tmp_path = NULL;
- char * tmpbuffer;
- unsigned char * referrals = NULL;
+ char *tmp_path = NULL;
+ char *tmpbuffer;
+ unsigned char *referrals = NULL;
int num_referrals = 0;
int len;
__u16 fid;
@@ -237,13 +245,13 @@ cifs_readlink(struct dentry *direntry, char __user *pBuffer, int buflen)
cifs_sb = CIFS_SB(inode->i_sb);
pTcon = cifs_sb->tcon;
-/* BB would it be safe against deadlock to grab this sem
+/* BB would it be safe against deadlock to grab this sem
even though rename itself grabs the sem and calls lookup? */
/* mutex_lock(&inode->i_sb->s_vfs_rename_mutex);*/
full_path = build_path_from_dentry(direntry);
/* mutex_unlock(&inode->i_sb->s_vfs_rename_mutex);*/
- if(full_path == NULL) {
+ if (full_path == NULL) {
FreeXid(xid);
return -ENOMEM;
}
@@ -251,70 +259,80 @@ cifs_readlink(struct dentry *direntry, char __user *pBuffer, int buflen)
cFYI(1,
("Full path: %s inode = 0x%p pBuffer = 0x%p buflen = %d",
full_path, inode, pBuffer, buflen));
- if(buflen > PATH_MAX)
+ if (buflen > PATH_MAX)
len = PATH_MAX;
else
len = buflen;
- tmpbuffer = kmalloc(len,GFP_KERNEL);
- if(tmpbuffer == NULL) {
+ tmpbuffer = kmalloc(len, GFP_KERNEL);
+ if (tmpbuffer == NULL) {
kfree(full_path);
FreeXid(xid);
return -ENOMEM;
}
-/* BB add read reparse point symlink code and Unix extensions symlink code here BB */
+/* BB add read reparse point symlink code and
+ Unix extensions symlink code here BB */
+/* We could disable this based on pTcon->unix_ext flag instead ... but why? */
if (cifs_sb->tcon->ses->capabilities & CAP_UNIX)
rc = CIFSSMBUnixQuerySymLink(xid, pTcon, full_path,
tmpbuffer,
len - 1,
cifs_sb->local_nls);
else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) {
- cERROR(1,("SFU style symlinks not implemented yet"));
+ cERROR(1, ("SFU style symlinks not implemented yet"));
/* add open and read as in fs/cifs/inode.c */
-
} else {
rc = CIFSSMBOpen(xid, pTcon, full_path, FILE_OPEN, GENERIC_READ,
- OPEN_REPARSE_POINT,&fid, &oplock, NULL,
- cifs_sb->local_nls,
- cifs_sb->mnt_cifs_flags &
+ OPEN_REPARSE_POINT, &fid, &oplock, NULL,
+ cifs_sb->local_nls,
+ cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
- if(!rc) {
+ if (!rc) {
rc = CIFSSMBQueryReparseLinkInfo(xid, pTcon, full_path,
tmpbuffer,
- len - 1,
+ len - 1,
fid,
cifs_sb->local_nls);
- if(CIFSSMBClose(xid, pTcon, fid)) {
- cFYI(1,("Error closing junction point (open for ioctl)"));
+ if (CIFSSMBClose(xid, pTcon, fid)) {
+ cFYI(1, ("Error closing junction point "
+ "(open for ioctl)"));
}
- if(rc == -EIO) {
+ if (rc == -EIO) {
/* Query if DFS Junction */
tmp_path =
kmalloc(MAX_TREE_SIZE + MAX_PATHCONF + 1,
GFP_KERNEL);
if (tmp_path) {
- strncpy(tmp_path, pTcon->treeName, MAX_TREE_SIZE);
- strncat(tmp_path, full_path, MAX_PATHCONF);
- rc = get_dfs_path(xid, pTcon->ses, tmp_path,
+ strncpy(tmp_path, pTcon->treeName,
+ MAX_TREE_SIZE);
+ strncat(tmp_path, full_path,
+ MAX_PATHCONF);
+ rc = get_dfs_path(xid, pTcon->ses,
+ tmp_path,
cifs_sb->local_nls,
&num_referrals, &referrals,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
- cFYI(1,("Get DFS for %s rc = %d ",tmp_path, rc));
- if((num_referrals == 0) && (rc == 0))
+ cFYI(1, ("Get DFS for %s rc = %d ",
+ tmp_path, rc));
+ if ((num_referrals == 0) && (rc == 0))
rc = -EACCES;
else {
- cFYI(1,("num referral: %d",num_referrals));
- if(referrals) {
- cFYI(1,("referral string: %s",referrals));
- strncpy(tmpbuffer, referrals, len-1);
+ cFYI(1, ("num referral: %d",
+ num_referrals));
+ if (referrals) {
+ cFYI(1,("referral string: %s", referrals));
+ strncpy(tmpbuffer,
+ referrals,
+ len-1);
}
}
kfree(referrals);
kfree(tmp_path);
}
- /* BB add code like else decode referrals then memcpy to
- tmpbuffer and free referrals string array BB */
+ /* BB add code like else decode referrals
+ then memcpy to tmpbuffer and free referrals
+ string array BB */
}
}
}
diff --git a/fs/cifs/md4.c b/fs/cifs/md4.c
index 46d62c9..a2415c1 100644
--- a/fs/cifs/md4.c
+++ b/fs/cifs/md4.c
@@ -1,20 +1,20 @@
-/*
+/*
Unix SMB/Netbios implementation.
Version 1.9.
a implementation of MD4 designed for use in the SMB authentication protocol
Copyright (C) Andrew Tridgell 1997-1998.
Modified by Steve French (sfrench@us.ibm.com) 2002-2003
-
+
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
-
+
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
+
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
@@ -170,7 +170,7 @@ mdfour(unsigned char *out, unsigned char *in, int n)
while (n > 64) {
copy64(M, in);
- mdfour64(M,&A,&B, &C, &D);
+ mdfour64(M, &A, &B, &C, &D);
in += 64;
n -= 64;
}
diff --git a/fs/cifs/md5.c b/fs/cifs/md5.c
index ccebf9b..e5c3e12 100644
--- a/fs/cifs/md5.c
+++ b/fs/cifs/md5.c
@@ -15,9 +15,9 @@
* will fill a supplied 16-byte array with the digest.
*/
-/* This code slightly modified to fit into Samba by
- abartlet@samba.org Jun 2001
- and to fit the cifs vfs by
+/* This code slightly modified to fit into Samba by
+ abartlet@samba.org Jun 2001
+ and to fit the cifs vfs by
Steve French sfrench@us.ibm.com */
#include <linux/string.h>
@@ -106,7 +106,7 @@ MD5Update(struct MD5Context *ctx, unsigned char const *buf, unsigned len)
}
/*
- * Final wrapup - pad to 64-byte boundary with the bit pattern
+ * Final wrapup - pad to 64-byte boundary with the bit pattern
* 1 0* (64-bit count of bits processed, MSB-first)
*/
void
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index 19cc294..0bcec08 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -1,7 +1,7 @@
/*
* fs/cifs/misc.c
*
- * Copyright (C) International Business Machines Corp., 2002,2005
+ * Copyright (C) International Business Machines Corp., 2002,2007
* Author(s): Steve French (sfrench@us.ibm.com)
*
* This library is free software; you can redistribute it and/or modify
@@ -16,7 +16,7 @@
*
* You should have received a copy of the GNU Lesser General Public License
* along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/slab.h>
@@ -32,12 +32,12 @@
extern mempool_t *cifs_sm_req_poolp;
extern mempool_t *cifs_req_poolp;
-extern struct task_struct * oplockThread;
+extern struct task_struct *oplockThread;
-/* The xid serves as a useful identifier for each incoming vfs request,
- in a similar way to the mid which is useful to track each sent smb,
- and CurrentXid can also provide a running counter (although it
- will eventually wrap past zero) of the total vfs operations handled
+/* The xid serves as a useful identifier for each incoming vfs request,
+ in a similar way to the mid which is useful to track each sent smb,
+ and CurrentXid can also provide a running counter (although it
+ will eventually wrap past zero) of the total vfs operations handled
since the cifs fs was mounted */
unsigned int
@@ -47,10 +47,12 @@ _GetXid(void)
spin_lock(&GlobalMid_Lock);
GlobalTotalActiveXid++;
+
+ /* keep high water mark for number of simultaneous ops in filesystem */
if (GlobalTotalActiveXid > GlobalMaxActiveXid)
- GlobalMaxActiveXid = GlobalTotalActiveXid; /* keep high water mark for number of simultaneous vfs ops in our filesystem */
- if(GlobalTotalActiveXid > 65000)
- cFYI(1,("warning: more than 65000 requests active"));
+ GlobalMaxActiveXid = GlobalTotalActiveXid;
+ if (GlobalTotalActiveXid > 65000)
+ cFYI(1, ("warning: more than 65000 requests active"));
xid = GlobalCurrentXid++;
spin_unlock(&GlobalMid_Lock);
return xid;
@@ -60,7 +62,7 @@ void
_FreeXid(unsigned int xid)
{
spin_lock(&GlobalMid_Lock);
- /* if(GlobalTotalActiveXid == 0)
+ /* if (GlobalTotalActiveXid == 0)
BUG(); */
GlobalTotalActiveXid--;
spin_unlock(&GlobalMid_Lock);
@@ -144,12 +146,12 @@ cifs_buf_get(void)
{
struct smb_hdr *ret_buf = NULL;
-/* We could use negotiated size instead of max_msgsize -
- but it may be more efficient to always alloc same size
- albeit slightly larger than necessary and maxbuffersize
+/* We could use negotiated size instead of max_msgsize -
+ but it may be more efficient to always alloc same size
+ albeit slightly larger than necessary and maxbuffersize
defaults to this and can not be bigger */
- ret_buf =
- (struct smb_hdr *) mempool_alloc(cifs_req_poolp, GFP_KERNEL | GFP_NOFS);
+ ret_buf = (struct smb_hdr *) mempool_alloc(cifs_req_poolp,
+ GFP_KERNEL | GFP_NOFS);
/* clear the first few header bytes */
/* for most paths, more is cleared in header_assemble */
@@ -172,7 +174,7 @@ cifs_buf_release(void *buf_to_free)
/* cFYI(1, ("Null buffer passed to cifs_buf_release"));*/
return;
}
- mempool_free(buf_to_free,cifs_req_poolp);
+ mempool_free(buf_to_free, cifs_req_poolp);
atomic_dec(&bufAllocCount);
return;
@@ -183,12 +185,12 @@ cifs_small_buf_get(void)
{
struct smb_hdr *ret_buf = NULL;
-/* We could use negotiated size instead of max_msgsize -
- but it may be more efficient to always alloc same size
- albeit slightly larger than necessary and maxbuffersize
+/* We could use negotiated size instead of max_msgsize -
+ but it may be more efficient to always alloc same size
+ albeit slightly larger than necessary and maxbuffersize
defaults to this and can not be bigger */
- ret_buf =
- (struct smb_hdr *) mempool_alloc(cifs_sm_req_poolp, GFP_KERNEL | GFP_NOFS);
+ ret_buf = (struct smb_hdr *) mempool_alloc(cifs_sm_req_poolp,
+ GFP_KERNEL | GFP_NOFS);
if (ret_buf) {
/* No need to clear memory here, cleared in header assemble */
/* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
@@ -209,30 +211,30 @@ cifs_small_buf_release(void *buf_to_free)
cFYI(1, ("Null buffer passed to cifs_small_buf_release"));
return;
}
- mempool_free(buf_to_free,cifs_sm_req_poolp);
+ mempool_free(buf_to_free, cifs_sm_req_poolp);
atomic_dec(&smBufAllocCount);
return;
}
-/*
+/*
Find a free multiplex id (SMB mid). Otherwise there could be
mid collisions which might cause problems, demultiplexing the
wrong response to this request. Multiplex ids could collide if
one of a series requests takes much longer than the others, or
if a very large number of long lived requests (byte range
locks or FindNotify requests) are pending. No more than
- 64K-1 requests can be outstanding at one time. If no
+ 64K-1 requests can be outstanding at one time. If no
mids are available, return zero. A future optimization
could make the combination of mids and uid the key we use
- to demultiplex on (rather than mid alone).
+ to demultiplex on (rather than mid alone).
In addition to the above check, the cifs demultiplex
code already used the command code as a secondary
check of the frame and if signing is negotiated the
response would be discarded if the mid were the same
but the signature was wrong. Since the mid is not put in the
pending queue until later (when it is about to be dispatched)
- we do have to limit the number of outstanding requests
+ we do have to limit the number of outstanding requests
to somewhat less than 64K-1 although it is hard to imagine
so many threads being in the vfs at one time.
*/
@@ -240,27 +242,27 @@ __u16 GetNextMid(struct TCP_Server_Info *server)
{
__u16 mid = 0;
__u16 last_mid;
- int collision;
+ int collision;
- if(server == NULL)
+ if (server == NULL)
return mid;
spin_lock(&GlobalMid_Lock);
last_mid = server->CurrentMid; /* we do not want to loop forever */
server->CurrentMid++;
/* This nested loop looks more expensive than it is.
- In practice the list of pending requests is short,
+ In practice the list of pending requests is short,
fewer than 50, and the mids are likely to be unique
on the first pass through the loop unless some request
takes longer than the 64 thousand requests before it
(and it would also have to have been a request that
did not time out) */
- while(server->CurrentMid != last_mid) {
+ while (server->CurrentMid != last_mid) {
struct list_head *tmp;
struct mid_q_entry *mid_entry;
collision = 0;
- if(server->CurrentMid == 0)
+ if (server->CurrentMid == 0)
server->CurrentMid++;
list_for_each(tmp, &server->pending_mid_q) {
@@ -273,7 +275,7 @@ __u16 GetNextMid(struct TCP_Server_Info *server)
break;
}
}
- if(collision == 0) {
+ if (collision == 0) {
mid = server->CurrentMid;
break;
}
@@ -290,11 +292,11 @@ header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
const struct cifsTconInfo *treeCon, int word_count
/* length of fixed section (word count) in two byte units */)
{
- struct list_head* temp_item;
- struct cifsSesInfo * ses;
+ struct list_head *temp_item;
+ struct cifsSesInfo *ses;
char *temp = (char *) buffer;
- memset(temp,0,256); /* bigger than MAX_CIFS_HDR_SIZE */
+ memset(temp, 0, 256); /* bigger than MAX_CIFS_HDR_SIZE */
buffer->smb_buf_length =
(2 * word_count) + sizeof (struct smb_hdr) -
@@ -325,7 +327,7 @@ header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
/* Uid is not converted */
buffer->Uid = treeCon->ses->Suid;
buffer->Mid = GetNextMid(treeCon->ses->server);
- if(multiuser_mount != 0) {
+ if (multiuser_mount != 0) {
/* For the multiuser case, there are few obvious technically */
/* possible mechanisms to match the local linux user (uid) */
/* to a valid remote smb user (smb_uid): */
@@ -348,21 +350,22 @@ header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
/* flag were disabled. */
/* BB Add support for establishing new tCon and SMB Session */
- /* with userid/password pairs found on the smb session */
+ /* with userid/password pairs found on the smb session */
/* for other target tcp/ip addresses BB */
- if(current->fsuid != treeCon->ses->linux_uid) {
- cFYI(1,("Multiuser mode and UID did not match tcon uid"));
+ if (current->fsuid != treeCon->ses->linux_uid) {
+ cFYI(1, ("Multiuser mode and UID "
+ "did not match tcon uid"));
read_lock(&GlobalSMBSeslock);
list_for_each(temp_item, &GlobalSMBSessionList) {
ses = list_entry(temp_item, struct cifsSesInfo, cifsSessionList);
- if(ses->linux_uid == current->fsuid) {
- if(ses->server == treeCon->ses->server) {
- cFYI(1,("found matching uid substitute right smb_uid"));
+ if (ses->linux_uid == current->fsuid) {
+ if (ses->server == treeCon->ses->server) {
+ cFYI(1, ("found matching uid substitute right smb_uid"));
buffer->Uid = ses->Suid;
break;
} else {
- /* BB eventually call cifs_setup_session here */
- cFYI(1,("local UID found but smb sess with this server does not exist"));
+ /* BB eventually call cifs_setup_session here */
+ cFYI(1, ("local UID found but no smb sess with this server exists"));
}
}
}
@@ -374,8 +377,8 @@ header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
buffer->Flags2 |= SMBFLG2_DFS;
if (treeCon->nocase)
buffer->Flags |= SMBFLG_CASELESS;
- if((treeCon->ses) && (treeCon->ses->server))
- if(treeCon->ses->server->secMode &
+ if ((treeCon->ses) && (treeCon->ses->server))
+ if (treeCon->ses->server->secMode &
(SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
}
@@ -388,18 +391,18 @@ header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
static int
checkSMBhdr(struct smb_hdr *smb, __u16 mid)
{
- /* Make sure that this really is an SMB, that it is a response,
+ /* Make sure that this really is an SMB, that it is a response,
and that the message ids match */
- if ((*(__le32 *) smb->Protocol == cpu_to_le32(0x424d53ff)) &&
- (mid == smb->Mid)) {
- if(smb->Flags & SMBFLG_RESPONSE)
- return 0;
- else {
+ if ((*(__le32 *) smb->Protocol == cpu_to_le32(0x424d53ff)) &&
+ (mid == smb->Mid)) {
+ if (smb->Flags & SMBFLG_RESPONSE)
+ return 0;
+ else {
/* only one valid case where server sends us request */
- if(smb->Command == SMB_COM_LOCKING_ANDX)
+ if (smb->Command == SMB_COM_LOCKING_ANDX)
return 0;
else
- cERROR(1, ("Rcvd Request not response"));
+ cERROR(1, ("Received Request not response"));
}
} else { /* bad signature or mid */
if (*(__le32 *) smb->Protocol != cpu_to_le32(0x424d53ff))
@@ -426,9 +429,9 @@ checkSMB(struct smb_hdr *smb, __u16 mid, unsigned int length)
smb->WordCount = 0;
/* some error cases do not return wct and bcc */
return 0;
- } else if ((length == sizeof(struct smb_hdr) + 1) &&
+ } else if ((length == sizeof(struct smb_hdr) + 1) &&
(smb->WordCount == 0)) {
- char * tmp = (char *)smb;
+ char *tmp = (char *)smb;
/* Need to work around a bug in two servers here */
/* First, check if the part of bcc they sent was zero */
if (tmp[sizeof(struct smb_hdr)] == 0) {
@@ -442,7 +445,7 @@ checkSMB(struct smb_hdr *smb, __u16 mid, unsigned int length)
tmp[sizeof(struct smb_hdr)+1] = 0;
return 0;
}
- cERROR(1,("rcvd invalid byte count (bcc)"));
+ cERROR(1, ("rcvd invalid byte count (bcc)"));
} else {
cERROR(1, ("Length less than smb header size"));
}
@@ -458,32 +461,33 @@ checkSMB(struct smb_hdr *smb, __u16 mid, unsigned int length)
return 1;
clc_len = smbCalcSize_LE(smb);
- if(4 + len != length) {
- cERROR(1, ("Length read does not match RFC1001 length %d",len));
+ if (4 + len != length) {
+ cERROR(1, ("Length read does not match RFC1001 length %d",
+ len));
return 1;
}
if (4 + len != clc_len) {
/* check if bcc wrapped around for large read responses */
- if((len > 64 * 1024) && (len > clc_len)) {
+ if ((len > 64 * 1024) && (len > clc_len)) {
/* check if lengths match mod 64K */
- if(((4 + len) & 0xFFFF) == (clc_len & 0xFFFF))
- return 0; /* bcc wrapped */
+ if (((4 + len) & 0xFFFF) == (clc_len & 0xFFFF))
+ return 0; /* bcc wrapped */
}
cFYI(1, ("Calculated size %d vs length %d mismatch for mid %d",
clc_len, 4 + len, smb->Mid));
/* Windows XP can return a few bytes too much, presumably
- an illegal pad, at the end of byte range lock responses
+ an illegal pad, at the end of byte range lock responses
so we allow for that three byte pad, as long as actual
received length is as long or longer than calculated length */
- /* We have now had to extend this more, since there is a
+ /* We have now had to extend this more, since there is a
case in which it needs to be bigger still to handle a
malformed response to transact2 findfirst from WinXP when
access denied is returned and thus bcc and wct are zero
but server says length is 0x21 bytes too long as if the server
forget to reset the smb rfc1001 length when it reset the
wct and bcc to minimum size and drop the t2 parms and data */
- if((4+len > clc_len) && (len <= clc_len + 512))
+ if ((4+len > clc_len) && (len <= clc_len + 512))
return 0;
else {
cERROR(1, ("RFC1001 size %d bigger than SMB for Mid=%d",
@@ -495,61 +499,64 @@ checkSMB(struct smb_hdr *smb, __u16 mid, unsigned int length)
}
int
is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
-{
- struct smb_com_lock_req * pSMB = (struct smb_com_lock_req *)buf;
+{
+ struct smb_com_lock_req *pSMB = (struct smb_com_lock_req *)buf;
struct list_head *tmp;
struct list_head *tmp1;
struct cifsTconInfo *tcon;
struct cifsFileInfo *netfile;
- cFYI(1,("Checking for oplock break or dnotify response"));
- if((pSMB->hdr.Command == SMB_COM_NT_TRANSACT) &&
+ cFYI(1, ("Checking for oplock break or dnotify response"));
+ if ((pSMB->hdr.Command == SMB_COM_NT_TRANSACT) &&
(pSMB->hdr.Flags & SMBFLG_RESPONSE)) {
- struct smb_com_transaction_change_notify_rsp * pSMBr =
+ struct smb_com_transaction_change_notify_rsp *pSMBr =
(struct smb_com_transaction_change_notify_rsp *)buf;
- struct file_notify_information * pnotify;
+ struct file_notify_information *pnotify;
__u32 data_offset = 0;
- if(pSMBr->ByteCount > sizeof(struct file_notify_information)) {
+ if (pSMBr->ByteCount > sizeof(struct file_notify_information)) {
data_offset = le32_to_cpu(pSMBr->DataOffset);
pnotify = (struct file_notify_information *)
((char *)&pSMBr->hdr.Protocol + data_offset);
- cFYI(1,("dnotify on %s Action: 0x%x",pnotify->FileName,
+ cFYI(1, ("dnotify on %s Action: 0x%x",
+ pnotify->FileName,
pnotify->Action)); /* BB removeme BB */
- /* cifs_dump_mem("Rcvd notify Data: ",buf,
+ /* cifs_dump_mem("Rcvd notify Data: ",buf,
sizeof(struct smb_hdr)+60); */
return TRUE;
}
- if(pSMBr->hdr.Status.CifsError) {
- cFYI(1,("notify err 0x%d",pSMBr->hdr.Status.CifsError));
+ if (pSMBr->hdr.Status.CifsError) {
+ cFYI(1, ("notify err 0x%d",
+ pSMBr->hdr.Status.CifsError));
return TRUE;
}
return FALSE;
- }
- if(pSMB->hdr.Command != SMB_COM_LOCKING_ANDX)
+ }
+ if (pSMB->hdr.Command != SMB_COM_LOCKING_ANDX)
return FALSE;
- if(pSMB->hdr.Flags & SMBFLG_RESPONSE) {
+ if (pSMB->hdr.Flags & SMBFLG_RESPONSE) {
/* no sense logging error on invalid handle on oplock
break - harmless race between close request and oplock
break response is expected from time to time writing out
large dirty files cached on the client */
- if ((NT_STATUS_INVALID_HANDLE) ==
- le32_to_cpu(pSMB->hdr.Status.CifsError)) {
- cFYI(1,("invalid handle on oplock break"));
+ if ((NT_STATUS_INVALID_HANDLE) ==
+ le32_to_cpu(pSMB->hdr.Status.CifsError)) {
+ cFYI(1, ("invalid handle on oplock break"));
return TRUE;
- } else if (ERRbadfid ==
+ } else if (ERRbadfid ==
le16_to_cpu(pSMB->hdr.Status.DosError.Error)) {
- return TRUE;
+ return TRUE;
} else {
return FALSE; /* on valid oplock brk we get "request" */
}
}
- if(pSMB->hdr.WordCount != 8)
+ if (pSMB->hdr.WordCount != 8)
return FALSE;
- cFYI(1,(" oplock type 0x%d level 0x%d",pSMB->LockType,pSMB->OplockLevel));
- if(!(pSMB->LockType & LOCKING_ANDX_OPLOCK_RELEASE))
- return FALSE;
+ cFYI(1, ("oplock type 0x%d level 0x%d",
+ pSMB->LockType, pSMB->OplockLevel));
+ if (!(pSMB->LockType & LOCKING_ANDX_OPLOCK_RELEASE))
+ return FALSE;
/* look up tcon based on tid & uid */
read_lock(&GlobalSMBSeslock);
@@ -557,36 +564,38 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
tcon = list_entry(tmp, struct cifsTconInfo, cifsConnectionList);
if ((tcon->tid == buf->Tid) && (srv == tcon->ses->server)) {
cifs_stats_inc(&tcon->num_oplock_brks);
- list_for_each(tmp1,&tcon->openFileList){
- netfile = list_entry(tmp1,struct cifsFileInfo,
+ list_for_each(tmp1, &tcon->openFileList) {
+ netfile = list_entry(tmp1, struct cifsFileInfo,
tlist);
- if(pSMB->Fid == netfile->netfid) {
+ if (pSMB->Fid == netfile->netfid) {
struct cifsInodeInfo *pCifsInode;
read_unlock(&GlobalSMBSeslock);
- cFYI(1,("file id match, oplock break"));
- pCifsInode =
+ cFYI(1,
+ ("file id match, oplock break"));
+ pCifsInode =
CIFS_I(netfile->pInode);
pCifsInode->clientCanCacheAll = FALSE;
- if(pSMB->OplockLevel == 0)
+ if (pSMB->OplockLevel == 0)
pCifsInode->clientCanCacheRead
= FALSE;
pCifsInode->oplockPending = TRUE;
AllocOplockQEntry(netfile->pInode,
netfile->netfid,
tcon);
- cFYI(1,("about to wake up oplock thd"));
- if(oplockThread)
+ cFYI(1,
+ ("about to wake up oplock thread"));
+ if (oplockThread)
wake_up_process(oplockThread);
return TRUE;
}
}
read_unlock(&GlobalSMBSeslock);
- cFYI(1,("No matching file for oplock break"));
+ cFYI(1, ("No matching file for oplock break"));
return TRUE;
}
}
read_unlock(&GlobalSMBSeslock);
- cFYI(1,("Can not process oplock break for non-existent connection"));
+ cFYI(1, ("Can not process oplock break for non-existent connection"));
return TRUE;
}
@@ -643,13 +652,13 @@ dump_smb(struct smb_hdr *smb_buf, int smb_buf_length)
only legal in POSIX-like OS (if they are present in the string). Path
names are little endian 16 bit Unicode on the wire */
int
-cifs_convertUCSpath(char *target, const __le16 * source, int maxlen,
- const struct nls_table * cp)
+cifs_convertUCSpath(char *target, const __le16 *source, int maxlen,
+ const struct nls_table *cp)
{
- int i,j,len;
+ int i, j, len;
__u16 src_char;
- for(i = 0, j = 0; i < maxlen; i++) {
+ for (i = 0, j = 0; i < maxlen; i++) {
src_char = le16_to_cpu(source[i]);
switch (src_char) {
case 0:
@@ -678,10 +687,10 @@ cifs_convertUCSpath(char *target, const __le16 * source, int maxlen,
case UNI_LESSTHAN:
target[j] = '<';
break;
- default:
- len = cp->uni2char(src_char, &target[j],
+ default:
+ len = cp->uni2char(src_char, &target[j],
NLS_MAX_CHARSET_SIZE);
- if(len > 0) {
+ if (len > 0) {
j += len;
continue;
} else {
@@ -690,7 +699,7 @@ cifs_convertUCSpath(char *target, const __le16 * source, int maxlen,
}
j++;
/* make sure we do not overrun callers allocated temp buffer */
- if(j >= (2 * NAME_MAX))
+ if (j >= (2 * NAME_MAX))
break;
}
cUCS_out:
@@ -703,18 +712,18 @@ cUCS_out:
only legal in POSIX-like OS (if they are present in the string). Path
names are little endian 16 bit Unicode on the wire */
int
-cifsConvertToUCS(__le16 * target, const char *source, int maxlen,
- const struct nls_table * cp, int mapChars)
+cifsConvertToUCS(__le16 *target, const char *source, int maxlen,
+ const struct nls_table *cp, int mapChars)
{
- int i,j,charlen;
+ int i, j, charlen;
int len_remaining = maxlen;
char src_char;
__u16 temp;
- if(!mapChars)
+ if (!mapChars)
return cifs_strtoUCS(target, source, PATH_MAX, cp);
- for(i = 0, j = 0; i < maxlen; j++) {
+ for (i = 0, j = 0; i < maxlen; j++) {
src_char = source[i];
switch (src_char) {
case 0:
@@ -737,7 +746,7 @@ cifsConvertToUCS(__le16 * target, const char *source, int maxlen,
break;
case '|':
target[j] = cpu_to_le16(UNI_PIPE);
- break;
+ break;
/* BB We can not handle remapping slash until
all the calls to build_path_from_dentry
are modified, as they use slash as separator BB */
@@ -749,7 +758,7 @@ cifsConvertToUCS(__le16 * target, const char *source, int maxlen,
len_remaining, &temp);
/* if no match, use question mark, which
at least in some cases servers as wild card */
- if(charlen < 1) {
+ if (charlen < 1) {
target[j] = cpu_to_le16(0x003f);
charlen = 1;
} else
@@ -758,7 +767,7 @@ cifsConvertToUCS(__le16 * target, const char *source, int maxlen,
/* character may take more than one byte in the
the source string, but will take exactly two
bytes in the target string */
- i+= charlen;
+ i += charlen;
continue;
}
i++; /* move to next char in source string */
diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c
index 53e304d..2bfed3f 100644
--- a/fs/cifs/netmisc.c
+++ b/fs/cifs/netmisc.c
@@ -3,23 +3,22 @@
*
* Copyright (c) International Business Machines Corp., 2002
* Author(s): Steve French (sfrench@us.ibm.com)
- *
+ *
* Error mapping routines from Samba libsmb/errormap.c
* Copyright (C) Andrew Tridgell 2001
*
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
+ * the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
- *
+ *
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
+ * along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
@@ -30,9 +29,7 @@
#include <linux/fs.h>
#include <asm/div64.h>
#include <asm/byteorder.h>
-#ifdef CONFIG_CIFS_EXPERIMENTAL
#include <linux/inet.h>
-#endif
#include "cifsfs.h"
#include "cifspdu.h"
#include "cifsglob.h"
@@ -67,22 +64,22 @@ static const struct smb_to_posix_error mapping_table_ERRDOS[] = {
{ERRbadshare, -ETXTBSY},
{ERRlock, -EACCES},
{ERRunsup, -EINVAL},
- {ERRnosuchshare,-ENXIO},
+ {ERRnosuchshare, -ENXIO},
{ERRfilexists, -EEXIST},
{ERRinvparm, -EINVAL},
{ERRdiskfull, -ENOSPC},
{ERRinvname, -ENOENT},
- {ERRinvlevel,-EOPNOTSUPP},
+ {ERRinvlevel, -EOPNOTSUPP},
{ERRdirnotempty, -ENOTEMPTY},
{ERRnotlocked, -ENOLCK},
{ERRcancelviolation, -ENOLCK},
{ERRalreadyexists, -EEXIST},
{ERRmoredata, -EOVERFLOW},
- {ERReasnotsupported,-EOPNOTSUPP},
+ {ERReasnotsupported, -EOPNOTSUPP},
{ErrQuota, -EDQUOT},
{ErrNotALink, -ENOLINK},
- {ERRnetlogonNotStarted,-ENOPROTOOPT},
- {ErrTooManyLinks,-EMLINK},
+ {ERRnetlogonNotStarted, -ENOPROTOOPT},
+ {ErrTooManyLinks, -EMLINK},
{0, 0}
};
@@ -133,85 +130,24 @@ static const struct smb_to_posix_error mapping_table_ERRHRD[] = {
/* returns 0 if invalid address */
int
-cifs_inet_pton(int address_family, char *cp,void *dst)
+cifs_inet_pton(int address_family, char *cp, void *dst)
{
-#ifdef CONFIG_CIFS_EXPERIMENTAL
int ret = 0;
/* calculate length by finding first slash or NULL */
- /* BB Should we convert '/' slash to '\' here since it seems already done
- before this */
- if( address_family == AF_INET ){
- ret = in4_pton(cp, -1 /* len */, dst , '\\', NULL);
- } else if( address_family == AF_INET6 ){
+ /* BB Should we convert '/' slash to '\' here since it seems already
+ * done before this */
+ if ( address_family == AF_INET ) {
+ ret = in4_pton(cp, -1 /* len */, dst , '\\', NULL);
+ } else if ( address_family == AF_INET6 ) {
ret = in6_pton(cp, -1 /* len */, dst , '\\', NULL);
}
#ifdef CONFIG_CIFS_DEBUG2
- cFYI(1,("address conversion returned %d for %s", ret, cp));
+ cFYI(1, ("address conversion returned %d for %s", ret, cp));
#endif
if (ret > 0)
ret = 1;
return ret;
-#else
- int value;
- int digit;
- int i;
- char temp;
- char bytes[4];
- char *end = bytes;
- static const int addr_class_max[4] =
- { 0xffffffff, 0xffffff, 0xffff, 0xff };
-
- if(address_family != AF_INET)
- return -EAFNOSUPPORT;
-
- for (i = 0; i < 4; i++) {
- bytes[i] = 0;
- }
-
- temp = *cp;
-
- while (TRUE) {
- if (!isdigit(temp))
- return 0;
-
- value = 0;
- digit = 0;
- for (;;) {
- if (isascii(temp) && isdigit(temp)) {
- value = (value * 10) + temp - '0';
- temp = *++cp;
- digit = 1;
- } else
- break;
- }
-
- if (temp == '.') {
- if ((end > bytes + 2) || (value > 255))
- return 0;
- *end++ = value;
- temp = *++cp;
- } else if (temp == ':') {
- cFYI(1,("IPv6 addresses not supported for CIFS mounts yet"));
- return -1;
- } else
- break;
- }
-
- /* check for last characters */
- if (temp != '\0' && (!isascii(temp) || !isspace(temp)))
- if (temp != '\\') {
- if (temp != '/')
- return 0;
- else
- (*cp = '\\'); /* switch the slash the expected way */
- }
- if (value > addr_class_max[end - bytes])
- return 0;
-
- *((__be32 *)dst) = *((__be32 *) bytes) | htonl(value);
- return 1; /* success */
-#endif /* EXPERIMENTAL */
}
/*****************************************************************************
@@ -246,7 +182,7 @@ static const struct {
ERRHRD, ERRgeneral, NT_STATUS_UNRECOGNIZED_MEDIA}, {
ERRDOS, 27, NT_STATUS_NONEXISTENT_SECTOR},
/* { This NT error code was 'sqashed'
- from NT_STATUS_MORE_PROCESSING_REQUIRED to NT_STATUS_OK
+ from NT_STATUS_MORE_PROCESSING_REQUIRED to NT_STATUS_OK
during the session setup } */
{
ERRDOS, ERRnomem, NT_STATUS_NO_MEMORY}, {
@@ -261,7 +197,7 @@ static const struct {
ERRDOS, 193, NT_STATUS_INVALID_FILE_FOR_SECTION}, {
ERRDOS, ERRnoaccess, NT_STATUS_ALREADY_COMMITTED},
/* { This NT error code was 'sqashed'
- from NT_STATUS_ACCESS_DENIED to NT_STATUS_TRUSTED_RELATIONSHIP_FAILURE
+ from NT_STATUS_ACCESS_DENIED to NT_STATUS_TRUSTED_RELATIONSHIP_FAILURE
during the session setup } */
{
ERRDOS, ERRnoaccess, NT_STATUS_ACCESS_DENIED}, {
@@ -331,7 +267,7 @@ static const struct {
ERRHRD, ERRgeneral, NT_STATUS_INVALID_ACCOUNT_NAME}, {
ERRHRD, ERRgeneral, NT_STATUS_USER_EXISTS},
/* { This NT error code was 'sqashed'
- from NT_STATUS_NO_SUCH_USER to NT_STATUS_LOGON_FAILURE
+ from NT_STATUS_NO_SUCH_USER to NT_STATUS_LOGON_FAILURE
during the session setup } */
{
ERRDOS, ERRnoaccess, NT_STATUS_NO_SUCH_USER}, {
@@ -341,7 +277,7 @@ static const struct {
ERRHRD, ERRgeneral, NT_STATUS_MEMBER_NOT_IN_GROUP}, {
ERRHRD, ERRgeneral, NT_STATUS_LAST_ADMIN},
/* { This NT error code was 'sqashed'
- from NT_STATUS_WRONG_PASSWORD to NT_STATUS_LOGON_FAILURE
+ from NT_STATUS_WRONG_PASSWORD to NT_STATUS_LOGON_FAILURE
during the session setup } */
{
ERRSRV, ERRbadpw, NT_STATUS_WRONG_PASSWORD}, {
@@ -393,8 +329,8 @@ static const struct {
ERRHRD, ERRgeneral, NT_STATUS_FILE_INVALID}, {
ERRHRD, ERRgeneral, NT_STATUS_ALLOTTED_SPACE_EXCEEDED},
/* { This NT error code was 'sqashed'
- from NT_STATUS_INSUFFICIENT_RESOURCES to NT_STATUS_INSUFF_SERVER_RESOURCES
- during the session setup } */
+ from NT_STATUS_INSUFFICIENT_RESOURCES to
+ NT_STATUS_INSUFF_SERVER_RESOURCES during the session setup } */
{
ERRDOS, ERRnomem, NT_STATUS_INSUFFICIENT_RESOURCES}, {
ERRDOS, ERRbadpath, NT_STATUS_DFS_EXIT_PATH_FOUND}, {
@@ -638,8 +574,8 @@ static const struct {
ERRDOS, 19, NT_STATUS_TOO_LATE}, {
ERRDOS, ERRnoaccess, NT_STATUS_NO_TRUST_LSA_SECRET},
/* { This NT error code was 'sqashed'
- from NT_STATUS_NO_TRUST_SAM_ACCOUNT to NT_STATUS_TRUSTED_RELATIONSHIP_FAILURE
- during the session setup } */
+ from NT_STATUS_NO_TRUST_SAM_ACCOUNT to
+ NT_STATUS_TRUSTED_RELATIONSHIP_FAILURE during the session setup } */
{
ERRDOS, ERRnoaccess, NT_STATUS_NO_TRUST_SAM_ACCOUNT}, {
ERRDOS, ERRnoaccess, NT_STATUS_TRUSTED_DOMAIN_FAILURE}, {
@@ -658,7 +594,7 @@ static const struct {
ERRDOS, ERRnoaccess, NT_STATUS_NOLOGON_WORKSTATION_TRUST_ACCOUNT}, {
ERRDOS, ERRnoaccess, NT_STATUS_NOLOGON_SERVER_TRUST_ACCOUNT},
/* { This NT error code was 'sqashed'
- from NT_STATUS_DOMAIN_TRUST_INCONSISTENT to NT_STATUS_LOGON_FAILURE
+ from NT_STATUS_DOMAIN_TRUST_INCONSISTENT to NT_STATUS_LOGON_FAILURE
during the session setup } */
{
ERRDOS, ERRnoaccess, NT_STATUS_DOMAIN_TRUST_INCONSISTENT}, {
@@ -789,7 +725,7 @@ cifs_print_status(__u32 status_code)
if (((nt_errs[idx].nt_errcode) & 0xFFFFFF) ==
(status_code & 0xFFFFFF)) {
printk(KERN_NOTICE "Status code returned 0x%08x %s\n",
- status_code,nt_errs[idx].nt_errstr);
+ status_code, nt_errs[idx].nt_errstr);
}
idx++;
}
@@ -821,7 +757,7 @@ int
map_smb_to_linux_error(struct smb_hdr *smb)
{
unsigned int i;
- int rc = -EIO; /* if transport error smb error may not be set */
+ int rc = -EIO; /* if transport error smb error may not be set */
__u8 smberrclass;
__u16 smberrcode;
@@ -832,9 +768,10 @@ map_smb_to_linux_error(struct smb_hdr *smb)
return 0;
if (smb->Flags2 & SMBFLG2_ERR_STATUS) {
- /* translate the newer STATUS codes to old style errors and then to POSIX errors */
+ /* translate the newer STATUS codes to old style SMB errors
+ * and then to POSIX errors */
__u32 err = le32_to_cpu(smb->Status.CifsError);
- if(cifsFYI & CIFS_RC)
+ if (cifsFYI & CIFS_RC)
cifs_print_status(err);
ntstatus_to_dos(err, &smberrclass, &smberrcode);
} else {
@@ -845,38 +782,42 @@ map_smb_to_linux_error(struct smb_hdr *smb)
/* old style errors */
/* DOS class smb error codes - map DOS */
- if (smberrclass == ERRDOS) { /* one byte field no need to byte reverse */
+ if (smberrclass == ERRDOS) { /* 1 byte field no need to byte reverse */
for (i = 0;
i <
sizeof (mapping_table_ERRDOS) /
sizeof (struct smb_to_posix_error); i++) {
if (mapping_table_ERRDOS[i].smb_err == 0)
break;
- else if (mapping_table_ERRDOS[i].smb_err == smberrcode) {
+ else if (mapping_table_ERRDOS[i].smb_err ==
+ smberrcode) {
rc = mapping_table_ERRDOS[i].posix_code;
break;
}
- /* else try the next error mapping one to see if it will match */
+ /* else try next error mapping one to see if match */
}
- } else if (smberrclass == ERRSRV) { /* server class of error codes */
+ } else if (smberrclass == ERRSRV) { /* server class of error codes */
for (i = 0;
i <
sizeof (mapping_table_ERRSRV) /
sizeof (struct smb_to_posix_error); i++) {
if (mapping_table_ERRSRV[i].smb_err == 0)
break;
- else if (mapping_table_ERRSRV[i].smb_err == smberrcode) {
+ else if (mapping_table_ERRSRV[i].smb_err ==
+ smberrcode) {
rc = mapping_table_ERRSRV[i].posix_code;
break;
}
- /* else try the next error mapping one to see if it will match */
+ /* else try next error mapping to see if match */
}
}
/* else ERRHRD class errors or junk - return EIO */
- cFYI(1, (" !!Mapping smb error code %d to POSIX err %d !!", smberrcode,rc));
+ cFYI(1, (" !!Mapping smb error code %d to POSIX err %d !!",
+ smberrcode, rc));
- /* generic corrective action e.g. reconnect SMB session on ERRbaduid could be added */
+ /* generic corrective action e.g. reconnect SMB session on
+ * ERRbaduid could be added */
return rc;
}
@@ -910,7 +851,7 @@ smbCalcSize_LE(struct smb_hdr *ptr)
struct timespec
cifs_NTtimeToUnix(u64 ntutc)
{
- struct timespec ts;
+ struct timespec ts;
/* BB what about the timezone? BB */
/* Subtract the NTFS time offset, then convert to 1s intervals. */
@@ -918,7 +859,7 @@ cifs_NTtimeToUnix(u64 ntutc)
t = ntutc - NTFS_TIME_OFFSET;
ts.tv_nsec = do_div(t, 10000000) * 100;
- ts.tv_sec = t;
+ ts.tv_sec = t;
return ts;
}
@@ -946,20 +887,20 @@ struct timespec cnvrtDosUnixTm(__u16 date, __u16 time)
SMB_TIME * st = (SMB_TIME *)&time;
SMB_DATE * sd = (SMB_DATE *)&date;
- cFYI(1,("date %d time %d",date, time));
+ cFYI(1, ("date %d time %d", date, time));
sec = 2 * st->TwoSeconds;
min = st->Minutes;
- if((sec > 59) || (min > 59))
- cERROR(1,("illegal time min %d sec %d", min, sec));
+ if ((sec > 59) || (min > 59))
+ cERROR(1, ("illegal time min %d sec %d", min, sec));
sec += (min * 60);
sec += 60 * 60 * st->Hours;
- if(st->Hours > 24)
- cERROR(1,("illegal hours %d",st->Hours));
+ if (st->Hours > 24)
+ cERROR(1, ("illegal hours %d", st->Hours));
days = sd->Day;
month = sd->Month;
- if((days > 31) || (month > 12))
- cERROR(1,("illegal date, month %d day: %d", month, days));
+ if ((days > 31) || (month > 12))
+ cERROR(1, ("illegal date, month %d day: %d", month, days));
month -= 1;
days += total_days_of_prev_months[month];
days += 3652; /* account for difference in days between 1980 and 1970 */
@@ -970,15 +911,15 @@ struct timespec cnvrtDosUnixTm(__u16 date, __u16 time)
for years/100 except for years/400, but since the maximum number for DOS
year is 2**7, the last year is 1980+127, which means we need only
consider 2 special case years, ie the years 2000 and 2100, and only
- adjust for the lack of leap year for the year 2100, as 2000 was a
+ adjust for the lack of leap year for the year 2100, as 2000 was a
leap year (divisable by 400) */
- if(year >= 120) /* the year 2100 */
+ if (year >= 120) /* the year 2100 */
days = days - 1; /* do not count leap year for the year 2100 */
/* adjust for leap year where we are still before leap day */
- if(year != 120)
+ if (year != 120)
days -= ((year & 0x03) == 0) && (month < 2 ? 1 : 0);
- sec += 24 * 60 * 60 * days;
+ sec += 24 * 60 * 60 * days;
ts.tv_sec = sec;
@@ -986,4 +927,4 @@ struct timespec cnvrtDosUnixTm(__u16 date, __u16 time)
ts.tv_nsec = 0;
return ts;
-}
+}
diff --git a/fs/cifs/nterr.c b/fs/cifs/nterr.c
index 4da50cd..819fd99 100644
--- a/fs/cifs/nterr.c
+++ b/fs/cifs/nterr.c
@@ -1,19 +1,19 @@
-/*
+/*
* Unix SMB/Netbios implementation.
* Version 1.9.
* RPC Pipe client / server routines
* Copyright (C) Luke Kenneth Casson Leighton 1997-2001.
- *
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
- *
+ *
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
+ *
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
diff --git a/fs/cifs/nterr.h b/fs/cifs/nterr.h
index d2fb06c..588abbb 100644
--- a/fs/cifs/nterr.h
+++ b/fs/cifs/nterr.h
@@ -1,4 +1,4 @@
-/*
+/*
Unix SMB/Netbios implementation.
Version 1.9.
NT error code constants
@@ -6,17 +6,17 @@
Copyright (C) John H Terpstra 1996-2000
Copyright (C) Luke Kenneth Casson Leighton 1996-2000
Copyright (C) Paul Ashton 1998-2000
-
+
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
-
+
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
+
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
diff --git a/fs/cifs/ntlmssp.h b/fs/cifs/ntlmssp.h
index d39b712..7170a9b 100644
--- a/fs/cifs/ntlmssp.h
+++ b/fs/cifs/ntlmssp.h
@@ -1,7 +1,7 @@
/*
* fs/cifs/ntlmssp.h
*
- * Copyright (c) International Business Machines Corp., 2002,2006
+ * Copyright (c) International Business Machines Corp., 2002,2007
* Author(s): Steve French (sfrench@us.ibm.com)
*
* This library is free software; you can redistribute it and/or modify
@@ -16,7 +16,7 @@
*
* You should have received a copy of the GNU Lesser General Public License
* along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#define NTLMSSP_SIGNATURE "NTLMSSP"
@@ -27,18 +27,18 @@
#define UnknownMessage cpu_to_le32(8)
/* Negotiate Flags */
-#define NTLMSSP_NEGOTIATE_UNICODE 0x01 // Text strings are in unicode
-#define NTLMSSP_NEGOTIATE_OEM 0x02 // Text strings are in OEM
-#define NTLMSSP_REQUEST_TARGET 0x04 // Server return its auth realm
-#define NTLMSSP_NEGOTIATE_SIGN 0x0010 // Request signature capability
-#define NTLMSSP_NEGOTIATE_SEAL 0x0020 // Request confidentiality
+#define NTLMSSP_NEGOTIATE_UNICODE 0x01 /* Text strings are in unicode */
+#define NTLMSSP_NEGOTIATE_OEM 0x02 /* Text strings are in OEM */
+#define NTLMSSP_REQUEST_TARGET 0x04 /* Server return its auth realm */
+#define NTLMSSP_NEGOTIATE_SIGN 0x0010 /* Request signature capability */
+#define NTLMSSP_NEGOTIATE_SEAL 0x0020 /* Request confidentiality */
#define NTLMSSP_NEGOTIATE_DGRAM 0x0040
-#define NTLMSSP_NEGOTIATE_LM_KEY 0x0080 // Use LM session key for sign/seal
-#define NTLMSSP_NEGOTIATE_NTLM 0x0200 // NTLM authentication
+#define NTLMSSP_NEGOTIATE_LM_KEY 0x0080 /* Sign/seal use LM session key */
+#define NTLMSSP_NEGOTIATE_NTLM 0x0200 /* NTLM authentication */
#define NTLMSSP_NEGOTIATE_DOMAIN_SUPPLIED 0x1000
#define NTLMSSP_NEGOTIATE_WORKSTATION_SUPPLIED 0x2000
-#define NTLMSSP_NEGOTIATE_LOCAL_CALL 0x4000 // client/server on same machine
-#define NTLMSSP_NEGOTIATE_ALWAYS_SIGN 0x8000 // Sign for all security levels
+#define NTLMSSP_NEGOTIATE_LOCAL_CALL 0x4000 /* client/server on same machine */
+#define NTLMSSP_NEGOTIATE_ALWAYS_SIGN 0x8000 /* Sign for all security levels */
#define NTLMSSP_TARGET_TYPE_DOMAIN 0x10000
#define NTLMSSP_TARGET_TYPE_SERVER 0x20000
#define NTLMSSP_TARGET_TYPE_SHARE 0x40000
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index c08bda9..916df94 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -2,7 +2,7 @@
* fs/cifs/readdir.c
*
* Directory search handling
- *
+ *
* Copyright (C) International Business Machines Corp., 2004, 2007
* Author(s): Steve French (sfrench@us.ibm.com)
*
@@ -34,24 +34,23 @@
#ifdef CONFIG_CIFS_DEBUG2
static void dump_cifs_file_struct(struct file *file, char *label)
{
- struct cifsFileInfo * cf;
+ struct cifsFileInfo *cf;
if (file) {
cf = file->private_data;
if (cf == NULL) {
- cFYI(1,("empty cifs private file data"));
+ cFYI(1, ("empty cifs private file data"));
return;
}
if (cf->invalidHandle) {
- cFYI(1,("invalid handle"));
+ cFYI(1, ("invalid handle"));
}
if (cf->srch_inf.endOfSearch) {
- cFYI(1,("end of search"));
+ cFYI(1, ("end of search"));
}
if (cf->srch_inf.emptyDir) {
- cFYI(1,("empty dir"));
+ cFYI(1, ("empty dir"));
}
-
}
}
#endif /* DEBUG2 */
@@ -73,7 +72,8 @@ static int construct_dentry(struct qstr *qstring, struct file *file,
qstring->hash = full_name_hash(qstring->name, qstring->len);
tmp_dentry = d_lookup(file->f_path.dentry, qstring);
if (tmp_dentry) {
- cFYI(0, ("existing dentry with inode 0x%p", tmp_dentry->d_inode));
+ cFYI(0, ("existing dentry with inode 0x%p",
+ tmp_dentry->d_inode));
*ptmp_inode = tmp_dentry->d_inode;
/* BB overwrite old name? i.e. tmp_dentry->d_name and tmp_dentry->d_name.len??*/
if (*ptmp_inode == NULL) {
@@ -87,7 +87,7 @@ static int construct_dentry(struct qstr *qstring, struct file *file,
} else {
tmp_dentry = d_alloc(file->f_path.dentry, qstring);
if (tmp_dentry == NULL) {
- cERROR(1,("Failed allocating dentry"));
+ cERROR(1, ("Failed allocating dentry"));
*ptmp_inode = NULL;
return rc;
}
@@ -100,7 +100,7 @@ static int construct_dentry(struct qstr *qstring, struct file *file,
if (*ptmp_inode == NULL)
return rc;
if (file->f_path.dentry->d_sb->s_flags & MS_NOATIME)
- (*ptmp_inode)->i_flags |= S_NOATIME | S_NOCMTIME;
+ (*ptmp_inode)->i_flags |= S_NOATIME | S_NOCMTIME;
rc = 2;
}
@@ -109,7 +109,7 @@ static int construct_dentry(struct qstr *qstring, struct file *file,
return rc;
}
-static void AdjustForTZ(struct cifsTconInfo * tcon, struct inode * inode)
+static void AdjustForTZ(struct cifsTconInfo *tcon, struct inode *inode)
{
if ((tcon) && (tcon->ses) && (tcon->ses->server)) {
inode->i_ctime.tv_sec += tcon->ses->server->timeAdj;
@@ -121,7 +121,7 @@ static void AdjustForTZ(struct cifsTconInfo * tcon, struct inode * inode)
static void fill_in_inode(struct inode *tmp_inode, int new_buf_type,
- char * buf, int *pobject_type, int isNewInode)
+ char *buf, int *pobject_type, int isNewInode)
{
loff_t local_size;
struct timespec local_mtime;
@@ -150,7 +150,7 @@ static void fill_in_inode(struct inode *tmp_inode, int new_buf_type,
cifs_NTtimeToUnix(le64_to_cpu(pfindData->ChangeTime));
} else { /* legacy, OS2 and DOS style */
/* struct timespec ts;*/
- FIND_FILE_STANDARD_INFO * pfindData =
+ FIND_FILE_STANDARD_INFO * pfindData =
(FIND_FILE_STANDARD_INFO *)buf;
tmp_inode->i_mtime = cnvrtDosUnixTm(
@@ -175,7 +175,7 @@ static void fill_in_inode(struct inode *tmp_inode, int new_buf_type,
/* treat dos attribute of read-only as read-only mode bit e.g. 555? */
/* 2767 perms - indicate mandatory locking */
- /* BB fill in uid and gid here? with help from winbind?
+ /* BB fill in uid and gid here? with help from winbind?
or retrieve from NTFS stream extended attribute */
if (atomic_read(&cifsInfo->inUse) == 0) {
tmp_inode->i_uid = cifs_sb->mnt_uid;
@@ -196,7 +196,7 @@ static void fill_in_inode(struct inode *tmp_inode, int new_buf_type,
tmp_inode->i_mode = cifs_sb->mnt_dir_mode;
}
tmp_inode->i_mode |= S_IFDIR;
- } else if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) &&
+ } else if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) &&
(attr & ATTR_SYSTEM)) {
if (end_of_file == 0) {
*pobject_type = DT_FIFO;
@@ -206,13 +206,13 @@ static void fill_in_inode(struct inode *tmp_inode, int new_buf_type,
inode as needing revalidate and get the real type
(blk vs chr vs. symlink) later ie in lookup */
*pobject_type = DT_REG;
- tmp_inode->i_mode |= S_IFREG;
- cifsInfo->time = 0;
+ tmp_inode->i_mode |= S_IFREG;
+ cifsInfo->time = 0;
}
/* we no longer mark these because we could not follow them */
/* } else if (attr & ATTR_REPARSE) {
- *pobject_type = DT_LNK;
- tmp_inode->i_mode |= S_IFLNK; */
+ *pobject_type = DT_LNK;
+ tmp_inode->i_mode |= S_IFLNK; */
} else {
*pobject_type = DT_REG;
tmp_inode->i_mode |= S_IFREG;
@@ -220,7 +220,7 @@ static void fill_in_inode(struct inode *tmp_inode, int new_buf_type,
tmp_inode->i_mode &= ~(S_IWUGO);
else if ((tmp_inode->i_mode & S_IWUGO) == 0)
/* the ATTR_READONLY flag may have been changed on */
- /* server -- set any w bits allowed by mnt_file_mode */
+ /* server -- set any w bits allowed by mnt_file_mode */
tmp_inode->i_mode |= (S_IWUGO & cifs_sb->mnt_file_mode);
} /* could add code here - to validate if device or weird share type? */
@@ -231,7 +231,7 @@ static void fill_in_inode(struct inode *tmp_inode, int new_buf_type,
spin_lock(&tmp_inode->i_lock);
if (is_size_safe_to_change(cifsInfo, end_of_file)) {
- /* can not safely change the file size here if the
+ /* can not safely change the file size here if the
client is writing to it due to potential races */
i_size_write(tmp_inode, end_of_file);
@@ -254,7 +254,6 @@ static void fill_in_inode(struct inode *tmp_inode, int new_buf_type,
tmp_inode->i_fop = &cifs_file_direct_nobrl_ops;
else
tmp_inode->i_fop = &cifs_file_direct_ops;
-
} else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
tmp_inode->i_fop = &cifs_file_nobrl_ops;
else
@@ -322,8 +321,8 @@ static void unix_fill_in_inode(struct inode *tmp_inode,
tmp_inode->i_mode = le64_to_cpu(pfindData->Permissions);
/* since we set the inode type below we need to mask off type
- to avoid strange results if bits above were corrupt */
- tmp_inode->i_mode &= ~S_IFMT;
+ to avoid strange results if bits above were corrupt */
+ tmp_inode->i_mode &= ~S_IFMT;
if (type == UNIX_FILE) {
*pobject_type = DT_REG;
tmp_inode->i_mode |= S_IFREG;
@@ -353,7 +352,7 @@ static void unix_fill_in_inode(struct inode *tmp_inode,
/* safest to just call it a file */
*pobject_type = DT_REG;
tmp_inode->i_mode |= S_IFREG;
- cFYI(1,("unknown inode type %d",type));
+ cFYI(1, ("unknown inode type %d", type));
}
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
@@ -368,7 +367,7 @@ static void unix_fill_in_inode(struct inode *tmp_inode,
spin_lock(&tmp_inode->i_lock);
if (is_size_safe_to_change(cifsInfo, end_of_file)) {
- /* can not safely change the file size here if the
+ /* can not safely change the file size here if the
client is writing to it due to potential races */
i_size_write(tmp_inode, end_of_file);
@@ -393,15 +392,16 @@ static void unix_fill_in_inode(struct inode *tmp_inode,
tmp_inode->i_fop = &cifs_file_ops;
if ((cifs_sb->tcon) && (cifs_sb->tcon->ses) &&
- (cifs_sb->tcon->ses->server->maxBuf <
+ (cifs_sb->tcon->ses->server->maxBuf <
PAGE_CACHE_SIZE + MAX_CIFS_HDR_SIZE))
tmp_inode->i_data.a_ops = &cifs_addr_ops_smallbuf;
else
tmp_inode->i_data.a_ops = &cifs_addr_ops;
if (isNewInode)
- return; /* No sense invalidating pages for new inode since we
- have not started caching readahead file data yet */
+ return; /* No sense invalidating pages for new inode
+ since we have not started caching readahead
+ file data for it yet */
if (timespec_equal(&tmp_inode->i_mtime, &local_mtime) &&
(local_size == tmp_inode->i_size)) {
@@ -420,7 +420,7 @@ static void unix_fill_in_inode(struct inode *tmp_inode,
tmp_inode->i_op = &cifs_symlink_inode_ops;
/* tmp_inode->i_fop = *//* do not need to set to anything */
} else {
- cFYI(1, ("Special inode"));
+ cFYI(1, ("Special inode"));
init_special_inode(tmp_inode, tmp_inode->i_mode,
tmp_inode->i_rdev);
}
@@ -429,14 +429,14 @@ static void unix_fill_in_inode(struct inode *tmp_inode,
static int initiate_cifs_search(const int xid, struct file *file)
{
int rc = 0;
- char * full_path;
- struct cifsFileInfo * cifsFile;
+ char *full_path;
+ struct cifsFileInfo *cifsFile;
struct cifs_sb_info *cifs_sb;
struct cifsTconInfo *pTcon;
if (file->private_data == NULL) {
- file->private_data =
- kzalloc(sizeof(struct cifsFileInfo),GFP_KERNEL);
+ file->private_data =
+ kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
}
if (file->private_data == NULL)
@@ -463,9 +463,11 @@ static int initiate_cifs_search(const int xid, struct file *file)
ffirst_retry:
/* test for Unix extensions */
- if (pTcon->ses->capabilities & CAP_UNIX) {
+ /* but now check for them on the share/mount not on the SMB session */
+/* if (pTcon->ses->capabilities & CAP_UNIX) { */
+ if (pTcon->unix_ext) {
cifsFile->srch_inf.info_level = SMB_FIND_FILE_UNIX;
- } else if ((pTcon->ses->capabilities &
+ } else if ((pTcon->ses->capabilities &
(CAP_NT_SMBS | CAP_NT_FIND)) == 0) {
cifsFile->srch_inf.info_level = SMB_FIND_FILE_INFO_STANDARD;
} else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
@@ -474,13 +476,13 @@ ffirst_retry:
cifsFile->srch_inf.info_level = SMB_FIND_FILE_DIRECTORY_INFO;
}
- rc = CIFSFindFirst(xid, pTcon,full_path,cifs_sb->local_nls,
+ rc = CIFSFindFirst(xid, pTcon, full_path, cifs_sb->local_nls,
&cifsFile->netfid, &cifsFile->srch_inf,
- cifs_sb->mnt_cifs_flags &
+ cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR, CIFS_DIR_SEP(cifs_sb));
if (rc == 0)
cifsFile->invalidHandle = FALSE;
- if ((rc == -EOPNOTSUPP) &&
+ if ((rc == -EOPNOTSUPP) &&
(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)) {
cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM;
goto ffirst_retry;
@@ -495,17 +497,17 @@ static int cifs_unicode_bytelen(char *str)
int len;
__le16 * ustr = (__le16 *)str;
- for(len=0;len <= PATH_MAX;len++) {
+ for (len = 0; len <= PATH_MAX; len++) {
if (ustr[len] == 0)
return len << 1;
}
- cFYI(1,("Unicode string longer than PATH_MAX found"));
+ cFYI(1, ("Unicode string longer than PATH_MAX found"));
return len << 1;
}
static char *nxt_dir_entry(char *old_entry, char *end_of_smb, int level)
{
- char * new_entry;
+ char *new_entry;
FILE_DIRECTORY_INFO * pDirInfo = (FILE_DIRECTORY_INFO *)old_entry;
if (level == SMB_FIND_FILE_INFO_STANDARD) {
@@ -516,21 +518,21 @@ static char *nxt_dir_entry(char *old_entry, char *end_of_smb, int level)
pfData->FileNameLength;
} else
new_entry = old_entry + le32_to_cpu(pDirInfo->NextEntryOffset);
- cFYI(1,("new entry %p old entry %p",new_entry,old_entry));
+ cFYI(1, ("new entry %p old entry %p", new_entry, old_entry));
/* validate that new_entry is not past end of SMB */
if (new_entry >= end_of_smb) {
cERROR(1,
("search entry %p began after end of SMB %p old entry %p",
- new_entry, end_of_smb, old_entry));
+ new_entry, end_of_smb, old_entry));
return NULL;
} else if (((level == SMB_FIND_FILE_INFO_STANDARD) &&
- (new_entry + sizeof(FIND_FILE_STANDARD_INFO) > end_of_smb)) ||
- ((level != SMB_FIND_FILE_INFO_STANDARD) &&
+ (new_entry + sizeof(FIND_FILE_STANDARD_INFO) > end_of_smb))
+ || ((level != SMB_FIND_FILE_INFO_STANDARD) &&
(new_entry + sizeof(FILE_DIRECTORY_INFO) > end_of_smb))) {
- cERROR(1,("search entry %p extends after end of SMB %p",
+ cERROR(1, ("search entry %p extends after end of SMB %p",
new_entry, end_of_smb));
return NULL;
- } else
+ } else
return new_entry;
}
@@ -541,8 +543,8 @@ static char *nxt_dir_entry(char *old_entry, char *end_of_smb, int level)
static int cifs_entry_is_dot(char *current_entry, struct cifsFileInfo *cfile)
{
int rc = 0;
- char * filename = NULL;
- int len = 0;
+ char *filename = NULL;
+ int len = 0;
if (cfile->srch_inf.info_level == SMB_FIND_FILE_UNIX) {
FILE_UNIX_INFO * pFindData = (FILE_UNIX_INFO *)current_entry;
@@ -554,25 +556,25 @@ static int cifs_entry_is_dot(char *current_entry, struct cifsFileInfo *cfile)
len = strnlen(filename, 5);
}
} else if (cfile->srch_inf.info_level == SMB_FIND_FILE_DIRECTORY_INFO) {
- FILE_DIRECTORY_INFO * pFindData =
+ FILE_DIRECTORY_INFO * pFindData =
(FILE_DIRECTORY_INFO *)current_entry;
filename = &pFindData->FileName[0];
len = le32_to_cpu(pFindData->FileNameLength);
- } else if (cfile->srch_inf.info_level ==
+ } else if (cfile->srch_inf.info_level ==
SMB_FIND_FILE_FULL_DIRECTORY_INFO) {
- FILE_FULL_DIRECTORY_INFO * pFindData =
+ FILE_FULL_DIRECTORY_INFO * pFindData =
(FILE_FULL_DIRECTORY_INFO *)current_entry;
filename = &pFindData->FileName[0];
len = le32_to_cpu(pFindData->FileNameLength);
} else if (cfile->srch_inf.info_level ==
SMB_FIND_FILE_ID_FULL_DIR_INFO) {
- SEARCH_ID_FULL_DIR_INFO * pFindData =
+ SEARCH_ID_FULL_DIR_INFO * pFindData =
(SEARCH_ID_FULL_DIR_INFO *)current_entry;
filename = &pFindData->FileName[0];
len = le32_to_cpu(pFindData->FileNameLength);
- } else if (cfile->srch_inf.info_level ==
+ } else if (cfile->srch_inf.info_level ==
SMB_FIND_FILE_BOTH_DIRECTORY_INFO) {
- FILE_BOTH_DIRECTORY_INFO * pFindData =
+ FILE_BOTH_DIRECTORY_INFO * pFindData =
(FILE_BOTH_DIRECTORY_INFO *)current_entry;
filename = &pFindData->FileName[0];
len = le32_to_cpu(pFindData->FileNameLength);
@@ -582,7 +584,8 @@ static int cifs_entry_is_dot(char *current_entry, struct cifsFileInfo *cfile)
filename = &pFindData->FileName[0];
len = pFindData->FileNameLength;
} else {
- cFYI(1,("Unknown findfirst level %d",cfile->srch_inf.info_level));
+ cFYI(1, ("Unknown findfirst level %d",
+ cfile->srch_inf.info_level));
}
if (filename) {
@@ -595,15 +598,15 @@ static int cifs_entry_is_dot(char *current_entry, struct cifsFileInfo *cfile)
} else if (len == 4) {
/* check for .. */
if ((ufilename[0] == UNICODE_DOT)
- &&(ufilename[1] == UNICODE_DOT))
+ && (ufilename[1] == UNICODE_DOT))
rc = 2;
}
} else /* ASCII */ {
if (len == 1) {
- if (filename[0] == '.')
+ if (filename[0] == '.')
rc = 1;
} else if (len == 2) {
- if((filename[0] == '.') && (filename[1] == '.'))
+ if ((filename[0] == '.') && (filename[1] == '.'))
rc = 2;
}
}
@@ -614,7 +617,7 @@ static int cifs_entry_is_dot(char *current_entry, struct cifsFileInfo *cfile)
/* Check if directory that we are searching has changed so we can decide
whether we can use the cached search results from the previous search */
-static int is_dir_changed(struct file * file)
+static int is_dir_changed(struct file *file)
{
struct inode *inode = file->f_path.dentry->d_inode;
struct cifsInodeInfo *cifsInfo = CIFS_I(inode);
@@ -633,22 +636,22 @@ static int is_dir_changed(struct file * file)
/* We start counting in the buffer with entry 2 and increment for every
entry (do not increment for . or .. entry) */
static int find_cifs_entry(const int xid, struct cifsTconInfo *pTcon,
- struct file *file, char **ppCurrentEntry, int *num_to_ret)
+ struct file *file, char **ppCurrentEntry, int *num_to_ret)
{
int rc = 0;
int pos_in_buf = 0;
loff_t first_entry_in_buffer;
loff_t index_to_find = file->f_pos;
- struct cifsFileInfo * cifsFile = file->private_data;
+ struct cifsFileInfo *cifsFile = file->private_data;
/* check if index in the buffer */
-
- if ((cifsFile == NULL) || (ppCurrentEntry == NULL) ||
+
+ if ((cifsFile == NULL) || (ppCurrentEntry == NULL) ||
(num_to_ret == NULL))
return -ENOENT;
-
+
*ppCurrentEntry = NULL;
- first_entry_in_buffer =
- cifsFile->srch_inf.index_of_last_entry -
+ first_entry_in_buffer =
+ cifsFile->srch_inf.index_of_last_entry -
cifsFile->srch_inf.entries_in_buffer;
/* if first entry in buf is zero then is first buffer
@@ -660,17 +663,17 @@ static int find_cifs_entry(const int xid, struct cifsTconInfo *pTcon,
#ifdef CONFIG_CIFS_DEBUG2
dump_cifs_file_struct(file, "In fce ");
#endif
- if (((index_to_find < cifsFile->srch_inf.index_of_last_entry) &&
- is_dir_changed(file)) ||
+ if (((index_to_find < cifsFile->srch_inf.index_of_last_entry) &&
+ is_dir_changed(file)) ||
(index_to_find < first_entry_in_buffer)) {
/* close and restart search */
- cFYI(1,("search backing up - close and restart search"));
+ cFYI(1, ("search backing up - close and restart search"));
cifsFile->invalidHandle = TRUE;
CIFSFindClose(xid, pTcon, cifsFile->netfid);
kfree(cifsFile->search_resume_name);
cifsFile->search_resume_name = NULL;
if (cifsFile->srch_inf.ntwrk_buf_start) {
- cFYI(1,("freeing SMB ff cache buf on search rewind"));
+ cFYI(1, ("freeing SMB ff cache buf on search rewind"));
if (cifsFile->srch_inf.smallBuf)
cifs_small_buf_release(cifsFile->srch_inf.
ntwrk_buf_start);
@@ -678,17 +681,18 @@ static int find_cifs_entry(const int xid, struct cifsTconInfo *pTcon,
cifs_buf_release(cifsFile->srch_inf.
ntwrk_buf_start);
}
- rc = initiate_cifs_search(xid,file);
+ rc = initiate_cifs_search(xid, file);
if (rc) {
- cFYI(1,("error %d reinitiating a search on rewind",rc));
+ cFYI(1, ("error %d reinitiating a search on rewind",
+ rc));
return rc;
}
}
- while((index_to_find >= cifsFile->srch_inf.index_of_last_entry) &&
- (rc == 0) && (cifsFile->srch_inf.endOfSearch == FALSE)){
- cFYI(1,("calling findnext2"));
- rc = CIFSFindNext(xid,pTcon,cifsFile->netfid,
+ while ((index_to_find >= cifsFile->srch_inf.index_of_last_entry) &&
+ (rc == 0) && (cifsFile->srch_inf.endOfSearch == FALSE)) {
+ cFYI(1, ("calling findnext2"));
+ rc = CIFSFindNext(xid, pTcon, cifsFile->netfid,
&cifsFile->srch_inf);
if (rc)
return -ENOENT;
@@ -697,8 +701,8 @@ static int find_cifs_entry(const int xid, struct cifsTconInfo *pTcon,
/* we found the buffer that contains the entry */
/* scan and find it */
int i;
- char * current_entry;
- char * end_of_smb = cifsFile->srch_inf.ntwrk_buf_start +
+ char *current_entry;
+ char *end_of_smb = cifsFile->srch_inf.ntwrk_buf_start +
smbCalcSize((struct smb_hdr *)
cifsFile->srch_inf.ntwrk_buf_start);
@@ -706,28 +710,28 @@ static int find_cifs_entry(const int xid, struct cifsTconInfo *pTcon,
first_entry_in_buffer = cifsFile->srch_inf.index_of_last_entry
- cifsFile->srch_inf.entries_in_buffer;
pos_in_buf = index_to_find - first_entry_in_buffer;
- cFYI(1,("found entry - pos_in_buf %d",pos_in_buf));
+ cFYI(1, ("found entry - pos_in_buf %d", pos_in_buf));
- for(i=0;(i<(pos_in_buf)) && (current_entry != NULL);i++) {
+ for (i=0; (i < (pos_in_buf)) && (current_entry != NULL); i++) {
/* go entry by entry figuring out which is first */
- current_entry = nxt_dir_entry(current_entry,end_of_smb,
+ current_entry = nxt_dir_entry(current_entry, end_of_smb,
cifsFile->srch_inf.info_level);
}
- if((current_entry == NULL) && (i < pos_in_buf)) {
+ if ((current_entry == NULL) && (i < pos_in_buf)) {
/* BB fixme - check if we should flag this error */
- cERROR(1,("reached end of buf searching for pos in buf"
+ cERROR(1, ("reached end of buf searching for pos in buf"
" %d index to find %lld rc %d",
- pos_in_buf,index_to_find,rc));
+ pos_in_buf, index_to_find, rc));
}
rc = 0;
*ppCurrentEntry = current_entry;
} else {
- cFYI(1,("index not in buffer - could not findnext into it"));
+ cFYI(1, ("index not in buffer - could not findnext into it"));
return 0;
}
- if(pos_in_buf >= cifsFile->srch_inf.entries_in_buffer) {
- cFYI(1,("can not return entries pos_in_buf beyond last entry"));
+ if (pos_in_buf >= cifsFile->srch_inf.entries_in_buffer) {
+ cFYI(1, ("can not return entries pos_in_buf beyond last"));
*num_to_ret = 0;
} else
*num_to_ret = cifsFile->srch_inf.entries_in_buffer - pos_in_buf;
@@ -738,81 +742,81 @@ static int find_cifs_entry(const int xid, struct cifsTconInfo *pTcon,
/* inode num, inode type and filename returned */
static int cifs_get_name_from_search_buf(struct qstr *pqst,
char *current_entry, __u16 level, unsigned int unicode,
- struct cifs_sb_info * cifs_sb, int max_len, ino_t *pinum)
+ struct cifs_sb_info *cifs_sb, int max_len, ino_t *pinum)
{
int rc = 0;
unsigned int len = 0;
- char * filename;
- struct nls_table * nlt = cifs_sb->local_nls;
+ char *filename;
+ struct nls_table *nlt = cifs_sb->local_nls;
*pinum = 0;
- if(level == SMB_FIND_FILE_UNIX) {
- FILE_UNIX_INFO * pFindData = (FILE_UNIX_INFO *)current_entry;
+ if (level == SMB_FIND_FILE_UNIX) {
+ FILE_UNIX_INFO *pFindData = (FILE_UNIX_INFO *)current_entry;
filename = &pFindData->FileName[0];
- if(unicode) {
+ if (unicode) {
len = cifs_unicode_bytelen(filename);
} else {
/* BB should we make this strnlen of PATH_MAX? */
len = strnlen(filename, PATH_MAX);
}
- /* BB fixme - hash low and high 32 bits if not 64 bit arch BB fixme */
- if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
+ /* BB fixme - hash low and high 32 bits if not 64 bit arch BB */
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
*pinum = pFindData->UniqueId;
- } else if(level == SMB_FIND_FILE_DIRECTORY_INFO) {
- FILE_DIRECTORY_INFO * pFindData =
+ } else if (level == SMB_FIND_FILE_DIRECTORY_INFO) {
+ FILE_DIRECTORY_INFO *pFindData =
(FILE_DIRECTORY_INFO *)current_entry;
filename = &pFindData->FileName[0];
len = le32_to_cpu(pFindData->FileNameLength);
- } else if(level == SMB_FIND_FILE_FULL_DIRECTORY_INFO) {
- FILE_FULL_DIRECTORY_INFO * pFindData =
+ } else if (level == SMB_FIND_FILE_FULL_DIRECTORY_INFO) {
+ FILE_FULL_DIRECTORY_INFO *pFindData =
(FILE_FULL_DIRECTORY_INFO *)current_entry;
filename = &pFindData->FileName[0];
len = le32_to_cpu(pFindData->FileNameLength);
- } else if(level == SMB_FIND_FILE_ID_FULL_DIR_INFO) {
- SEARCH_ID_FULL_DIR_INFO * pFindData =
+ } else if (level == SMB_FIND_FILE_ID_FULL_DIR_INFO) {
+ SEARCH_ID_FULL_DIR_INFO *pFindData =
(SEARCH_ID_FULL_DIR_INFO *)current_entry;
filename = &pFindData->FileName[0];
len = le32_to_cpu(pFindData->FileNameLength);
*pinum = pFindData->UniqueId;
- } else if(level == SMB_FIND_FILE_BOTH_DIRECTORY_INFO) {
- FILE_BOTH_DIRECTORY_INFO * pFindData =
+ } else if (level == SMB_FIND_FILE_BOTH_DIRECTORY_INFO) {
+ FILE_BOTH_DIRECTORY_INFO *pFindData =
(FILE_BOTH_DIRECTORY_INFO *)current_entry;
filename = &pFindData->FileName[0];
len = le32_to_cpu(pFindData->FileNameLength);
- } else if(level == SMB_FIND_FILE_INFO_STANDARD) {
+ } else if (level == SMB_FIND_FILE_INFO_STANDARD) {
FIND_FILE_STANDARD_INFO * pFindData =
(FIND_FILE_STANDARD_INFO *)current_entry;
filename = &pFindData->FileName[0];
/* one byte length, no name conversion */
len = (unsigned int)pFindData->FileNameLength;
} else {
- cFYI(1,("Unknown findfirst level %d",level));
+ cFYI(1, ("Unknown findfirst level %d", level));
return -EINVAL;
}
- if(len > max_len) {
- cERROR(1,("bad search response length %d past smb end", len));
+ if (len > max_len) {
+ cERROR(1, ("bad search response length %d past smb end", len));
return -EINVAL;
}
- if(unicode) {
+ if (unicode) {
/* BB fixme - test with long names */
/* Note converted filename can be longer than in unicode */
- if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
pqst->len = cifs_convertUCSpath((char *)pqst->name,
(__le16 *)filename, len/2, nlt);
else
pqst->len = cifs_strfromUCS_le((char *)pqst->name,
- (__le16 *)filename,len/2,nlt);
+ (__le16 *)filename, len/2, nlt);
} else {
pqst->name = filename;
pqst->len = len;
}
- pqst->hash = full_name_hash(pqst->name,pqst->len);
-/* cFYI(1,("filldir on %s",pqst->name)); */
+ pqst->hash = full_name_hash(pqst->name, pqst->len);
+/* cFYI(1, ("filldir on %s",pqst->name)); */
return rc;
}
@@ -821,49 +825,50 @@ static int cifs_filldir(char *pfindEntry, struct file *file,
{
int rc = 0;
struct qstr qstring;
- struct cifsFileInfo * pCifsF;
+ struct cifsFileInfo *pCifsF;
unsigned obj_type;
ino_t inum;
- struct cifs_sb_info * cifs_sb;
+ struct cifs_sb_info *cifs_sb;
struct inode *tmp_inode;
struct dentry *tmp_dentry;
/* get filename and len into qstring */
/* get dentry */
/* decide whether to create and populate ionde */
- if((direntry == NULL) || (file == NULL))
+ if ((direntry == NULL) || (file == NULL))
return -EINVAL;
pCifsF = file->private_data;
-
- if((scratch_buf == NULL) || (pfindEntry == NULL) || (pCifsF == NULL))
+
+ if ((scratch_buf == NULL) || (pfindEntry == NULL) || (pCifsF == NULL))
return -ENOENT;
- rc = cifs_entry_is_dot(pfindEntry,pCifsF);
+ rc = cifs_entry_is_dot(pfindEntry, pCifsF);
/* skip . and .. since we added them first */
- if(rc != 0)
+ if (rc != 0)
return 0;
cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
qstring.name = scratch_buf;
- rc = cifs_get_name_from_search_buf(&qstring,pfindEntry,
+ rc = cifs_get_name_from_search_buf(&qstring, pfindEntry,
pCifsF->srch_inf.info_level,
- pCifsF->srch_inf.unicode,cifs_sb,
+ pCifsF->srch_inf.unicode, cifs_sb,
max_len,
&inum /* returned */);
- if(rc)
+ if (rc)
return rc;
- rc = construct_dentry(&qstring,file,&tmp_inode, &tmp_dentry);
- if((tmp_inode == NULL) || (tmp_dentry == NULL))
+ rc = construct_dentry(&qstring, file, &tmp_inode, &tmp_dentry);
+ if ((tmp_inode == NULL) || (tmp_dentry == NULL))
return -ENOMEM;
- if(rc) {
+ if (rc) {
/* inode created, we need to hash it with right inode number */
- if(inum != 0) {
- /* BB fixme - hash the 2 32 quantities bits together if necessary BB */
+ if (inum != 0) {
+ /* BB fixme - hash the 2 32 quantities bits together if
+ * necessary BB */
tmp_inode->i_ino = inum;
}
insert_inode_hash(tmp_inode);
@@ -872,27 +877,27 @@ static int cifs_filldir(char *pfindEntry, struct file *file,
/* we pass in rc below, indicating whether it is a new inode,
so we can figure out whether to invalidate the inode cached
data if the file has changed */
- if(pCifsF->srch_inf.info_level == SMB_FIND_FILE_UNIX)
+ if (pCifsF->srch_inf.info_level == SMB_FIND_FILE_UNIX)
unix_fill_in_inode(tmp_inode,
(FILE_UNIX_INFO *)pfindEntry,
&obj_type, rc);
- else if(pCifsF->srch_inf.info_level == SMB_FIND_FILE_INFO_STANDARD)
+ else if (pCifsF->srch_inf.info_level == SMB_FIND_FILE_INFO_STANDARD)
fill_in_inode(tmp_inode, 0 /* old level 1 buffer type */,
pfindEntry, &obj_type, rc);
else
fill_in_inode(tmp_inode, 1 /* NT */, pfindEntry, &obj_type, rc);
- if(rc) /* new inode - needs to be tied to dentry */ {
+ if (rc) /* new inode - needs to be tied to dentry */ {
d_instantiate(tmp_dentry, tmp_inode);
- if(rc == 2)
+ if (rc == 2)
d_rehash(tmp_dentry);
}
-
-
- rc = filldir(direntry,qstring.name,qstring.len,file->f_pos,
- tmp_inode->i_ino,obj_type);
- if(rc) {
- cFYI(1,("filldir rc = %d",rc));
+
+
+ rc = filldir(direntry, qstring.name, qstring.len, file->f_pos,
+ tmp_inode->i_ino, obj_type);
+ if (rc) {
+ cFYI(1, ("filldir rc = %d", rc));
/* we can not return filldir errors to the caller
since they are "normal" when the stat blocksize
is too small - we return remapped error instead */
@@ -909,57 +914,57 @@ static int cifs_save_resume_key(const char *current_entry,
int rc = 0;
unsigned int len = 0;
__u16 level;
- char * filename;
+ char *filename;
- if((cifsFile == NULL) || (current_entry == NULL))
+ if ((cifsFile == NULL) || (current_entry == NULL))
return -EINVAL;
level = cifsFile->srch_inf.info_level;
- if(level == SMB_FIND_FILE_UNIX) {
+ if (level == SMB_FIND_FILE_UNIX) {
FILE_UNIX_INFO * pFindData = (FILE_UNIX_INFO *)current_entry;
filename = &pFindData->FileName[0];
- if(cifsFile->srch_inf.unicode) {
+ if (cifsFile->srch_inf.unicode) {
len = cifs_unicode_bytelen(filename);
} else {
/* BB should we make this strnlen of PATH_MAX? */
len = strnlen(filename, PATH_MAX);
}
cifsFile->srch_inf.resume_key = pFindData->ResumeKey;
- } else if(level == SMB_FIND_FILE_DIRECTORY_INFO) {
- FILE_DIRECTORY_INFO * pFindData =
+ } else if (level == SMB_FIND_FILE_DIRECTORY_INFO) {
+ FILE_DIRECTORY_INFO *pFindData =
(FILE_DIRECTORY_INFO *)current_entry;
filename = &pFindData->FileName[0];
len = le32_to_cpu(pFindData->FileNameLength);
cifsFile->srch_inf.resume_key = pFindData->FileIndex;
- } else if(level == SMB_FIND_FILE_FULL_DIRECTORY_INFO) {
- FILE_FULL_DIRECTORY_INFO * pFindData =
+ } else if (level == SMB_FIND_FILE_FULL_DIRECTORY_INFO) {
+ FILE_FULL_DIRECTORY_INFO *pFindData =
(FILE_FULL_DIRECTORY_INFO *)current_entry;
filename = &pFindData->FileName[0];
len = le32_to_cpu(pFindData->FileNameLength);
cifsFile->srch_inf.resume_key = pFindData->FileIndex;
- } else if(level == SMB_FIND_FILE_ID_FULL_DIR_INFO) {
- SEARCH_ID_FULL_DIR_INFO * pFindData =
+ } else if (level == SMB_FIND_FILE_ID_FULL_DIR_INFO) {
+ SEARCH_ID_FULL_DIR_INFO *pFindData =
(SEARCH_ID_FULL_DIR_INFO *)current_entry;
filename = &pFindData->FileName[0];
len = le32_to_cpu(pFindData->FileNameLength);
cifsFile->srch_inf.resume_key = pFindData->FileIndex;
- } else if(level == SMB_FIND_FILE_BOTH_DIRECTORY_INFO) {
- FILE_BOTH_DIRECTORY_INFO * pFindData =
+ } else if (level == SMB_FIND_FILE_BOTH_DIRECTORY_INFO) {
+ FILE_BOTH_DIRECTORY_INFO *pFindData =
(FILE_BOTH_DIRECTORY_INFO *)current_entry;
filename = &pFindData->FileName[0];
len = le32_to_cpu(pFindData->FileNameLength);
cifsFile->srch_inf.resume_key = pFindData->FileIndex;
- } else if(level == SMB_FIND_FILE_INFO_STANDARD) {
- FIND_FILE_STANDARD_INFO * pFindData =
+ } else if (level == SMB_FIND_FILE_INFO_STANDARD) {
+ FIND_FILE_STANDARD_INFO *pFindData =
(FIND_FILE_STANDARD_INFO *)current_entry;
filename = &pFindData->FileName[0];
/* one byte length, no name conversion */
len = (unsigned int)pFindData->FileNameLength;
cifsFile->srch_inf.resume_key = pFindData->ResumeKey;
} else {
- cFYI(1,("Unknown findfirst level %d",level));
+ cFYI(1, ("Unknown findfirst level %d", level));
return -EINVAL;
}
cifsFile->srch_inf.resume_name_len = len;
@@ -970,21 +975,21 @@ static int cifs_save_resume_key(const char *current_entry,
int cifs_readdir(struct file *file, void *direntry, filldir_t filldir)
{
int rc = 0;
- int xid,i;
+ int xid, i;
struct cifs_sb_info *cifs_sb;
struct cifsTconInfo *pTcon;
struct cifsFileInfo *cifsFile = NULL;
- char * current_entry;
+ char *current_entry;
int num_to_fill = 0;
- char * tmp_buf = NULL;
- char * end_of_smb;
+ char *tmp_buf = NULL;
+ char *end_of_smb;
int max_len;
xid = GetXid();
cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
pTcon = cifs_sb->tcon;
- if(pTcon == NULL)
+ if (pTcon == NULL)
return -EINVAL;
switch ((int) file->f_pos) {
@@ -1005,27 +1010,27 @@ int cifs_readdir(struct file *file, void *direntry, filldir_t filldir)
}
file->f_pos++;
default:
- /* 1) If search is active,
- is in current search buffer?
+ /* 1) If search is active,
+ is in current search buffer?
if it before then restart search
if after then keep searching till find it */
- if(file->private_data == NULL) {
- rc = initiate_cifs_search(xid,file);
- cFYI(1,("initiate cifs search rc %d",rc));
- if(rc) {
+ if (file->private_data == NULL) {
+ rc = initiate_cifs_search(xid, file);
+ cFYI(1, ("initiate cifs search rc %d", rc));
+ if (rc) {
FreeXid(xid);
return rc;
}
}
- if(file->private_data == NULL) {
+ if (file->private_data == NULL) {
rc = -EINVAL;
FreeXid(xid);
return rc;
}
cifsFile = file->private_data;
if (cifsFile->srch_inf.endOfSearch) {
- if(cifsFile->srch_inf.emptyDir) {
+ if (cifsFile->srch_inf.emptyDir) {
cFYI(1, ("End of search, empty dir"));
rc = 0;
break;
@@ -1033,23 +1038,23 @@ int cifs_readdir(struct file *file, void *direntry, filldir_t filldir)
} /* else {
cifsFile->invalidHandle = TRUE;
CIFSFindClose(xid, pTcon, cifsFile->netfid);
- }
+ }
kfree(cifsFile->search_resume_name);
cifsFile->search_resume_name = NULL; */
- rc = find_cifs_entry(xid,pTcon, file,
- &current_entry,&num_to_fill);
- if(rc) {
- cFYI(1,("fce error %d",rc));
+ rc = find_cifs_entry(xid, pTcon, file,
+ &current_entry, &num_to_fill);
+ if (rc) {
+ cFYI(1, ("fce error %d", rc));
goto rddir2_exit;
} else if (current_entry != NULL) {
- cFYI(1,("entry %lld found",file->f_pos));
+ cFYI(1, ("entry %lld found", file->f_pos));
} else {
- cFYI(1,("could not find entry"));
+ cFYI(1, ("could not find entry"));
goto rddir2_exit;
}
- cFYI(1,("loop through %d times filling dir for net buf %p",
- num_to_fill,cifsFile->srch_inf.ntwrk_buf_start));
+ cFYI(1, ("loop through %d times filling dir for net buf %p",
+ num_to_fill, cifsFile->srch_inf.ntwrk_buf_start));
max_len = smbCalcSize((struct smb_hdr *)
cifsFile->srch_inf.ntwrk_buf_start);
end_of_smb = cifsFile->srch_inf.ntwrk_buf_start + max_len;
@@ -1059,8 +1064,8 @@ int cifs_readdir(struct file *file, void *direntry, filldir_t filldir)
such multibyte target UTF-8 characters. cifs_unicode.c,
which actually does the conversion, has the same limit */
tmp_buf = kmalloc((2 * NAME_MAX) + 4, GFP_KERNEL);
- for(i=0;(i<num_to_fill) && (rc == 0);i++) {
- if(current_entry == NULL) {
+ for (i = 0; (i < num_to_fill) && (rc == 0); i++) {
+ if (current_entry == NULL) {
/* evaluate whether this case is an error */
cERROR(1,("past end of SMB num to fill %d i %d",
num_to_fill, i));
@@ -1070,20 +1075,20 @@ int cifs_readdir(struct file *file, void *direntry, filldir_t filldir)
we want to check for that here? */
rc = cifs_filldir(current_entry, file,
filldir, direntry, tmp_buf, max_len);
- if(rc == -EOVERFLOW) {
+ if (rc == -EOVERFLOW) {
rc = 0;
break;
}
file->f_pos++;
- if(file->f_pos ==
+ if (file->f_pos ==
cifsFile->srch_inf.index_of_last_entry) {
- cFYI(1,("last entry in buf at pos %lld %s",
- file->f_pos,tmp_buf));
- cifs_save_resume_key(current_entry,cifsFile);
+ cFYI(1, ("last entry in buf at pos %lld %s",
+ file->f_pos, tmp_buf));
+ cifs_save_resume_key(current_entry, cifsFile);
break;
- } else
- current_entry =
+ } else
+ current_entry =
nxt_dir_entry(current_entry, end_of_smb,
cifsFile->srch_inf.info_level);
}
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index 7584646..892be9b 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -3,7 +3,7 @@
*
* SMB/CIFS session setup handling routines
*
- * Copyright (c) International Business Machines Corp., 2006
+ * Copyright (c) International Business Machines Corp., 2006, 2007
* Author(s): Steve French (sfrench@us.ibm.com)
*
* This library is free software; you can redistribute it and/or modify
@@ -31,7 +31,7 @@
#include <linux/utsname.h>
extern void SMBNTencrypt(unsigned char *passwd, unsigned char *c8,
- unsigned char *p24);
+ unsigned char *p24);
static __u32 cifs_ssetup_hdr(struct cifsSesInfo *ses, SESSION_SETUP_ANDX *pSMB)
{
@@ -45,13 +45,14 @@ static __u32 cifs_ssetup_hdr(struct cifsSesInfo *ses, SESSION_SETUP_ANDX *pSMB)
/* Now no need to set SMBFLG_CASELESS or obsolete CANONICAL PATH */
- /* BB verify whether signing required on neg or just on auth frame
+ /* BB verify whether signing required on neg or just on auth frame
(and NTLM case) */
capabilities = CAP_LARGE_FILES | CAP_NT_SMBS | CAP_LEVEL_II_OPLOCKS |
CAP_LARGE_WRITE_X | CAP_LARGE_READ_X;
- if(ses->server->secMode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
+ if (ses->server->secMode &
+ (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
pSMB->req.hdr.Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
if (ses->capabilities & CAP_UNICODE) {
@@ -74,10 +75,10 @@ static __u32 cifs_ssetup_hdr(struct cifsSesInfo *ses, SESSION_SETUP_ANDX *pSMB)
return capabilities;
}
-static void unicode_ssetup_strings(char ** pbcc_area, struct cifsSesInfo *ses,
- const struct nls_table * nls_cp)
+static void unicode_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses,
+ const struct nls_table *nls_cp)
{
- char * bcc_ptr = *pbcc_area;
+ char *bcc_ptr = *pbcc_area;
int bytes_ret = 0;
/* BB FIXME add check that strings total less
@@ -89,7 +90,7 @@ static void unicode_ssetup_strings(char ** pbcc_area, struct cifsSesInfo *ses,
bcc_ptr++;
} */
/* copy user */
- if(ses->userName == NULL) {
+ if (ses->userName == NULL) {
/* null user mount */
*bcc_ptr = 0;
*(bcc_ptr+1) = 0;
@@ -100,14 +101,14 @@ static void unicode_ssetup_strings(char ** pbcc_area, struct cifsSesInfo *ses,
bcc_ptr += 2 * bytes_ret;
bcc_ptr += 2; /* account for null termination */
/* copy domain */
- if(ses->domainName == NULL) {
+ if (ses->domainName == NULL) {
/* Sending null domain better than using a bogus domain name (as
we did briefly in 2.6.18) since server will use its default */
*bcc_ptr = 0;
*(bcc_ptr+1) = 0;
bytes_ret = 0;
} else
- bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, ses->domainName,
+ bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, ses->domainName,
256, nls_cp);
bcc_ptr += 2 * bytes_ret;
bcc_ptr += 2; /* account for null terminator */
@@ -122,37 +123,37 @@ static void unicode_ssetup_strings(char ** pbcc_area, struct cifsSesInfo *ses,
bcc_ptr += 2; /* trailing null */
bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, CIFS_NETWORK_OPSYS,
- 32, nls_cp);
+ 32, nls_cp);
bcc_ptr += 2 * bytes_ret;
bcc_ptr += 2; /* trailing null */
*pbcc_area = bcc_ptr;
}
-static void ascii_ssetup_strings(char ** pbcc_area, struct cifsSesInfo *ses,
- const struct nls_table * nls_cp)
+static void ascii_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses,
+ const struct nls_table *nls_cp)
{
- char * bcc_ptr = *pbcc_area;
+ char *bcc_ptr = *pbcc_area;
/* copy user */
/* BB what about null user mounts - check that we do this BB */
- /* copy user */
- if(ses->userName == NULL) {
- /* BB what about null user mounts - check that we do this BB */
- } else { /* 300 should be long enough for any conceivable user name */
- strncpy(bcc_ptr, ses->userName, 300);
- }
+ /* copy user */
+ if (ses->userName == NULL) {
+ /* BB what about null user mounts - check that we do this BB */
+ } else { /* 300 should be long enough for any conceivable user name */
+ strncpy(bcc_ptr, ses->userName, 300);
+ }
/* BB improve check for overflow */
- bcc_ptr += strnlen(ses->userName, 300);
+ bcc_ptr += strnlen(ses->userName, 300);
*bcc_ptr = 0;
- bcc_ptr++; /* account for null termination */
+ bcc_ptr++; /* account for null termination */
- /* copy domain */
-
- if(ses->domainName != NULL) {
- strncpy(bcc_ptr, ses->domainName, 256);
+ /* copy domain */
+
+ if (ses->domainName != NULL) {
+ strncpy(bcc_ptr, ses->domainName, 256);
bcc_ptr += strnlen(ses->domainName, 256);
- } /* else we will send a null domain name
+ } /* else we will send a null domain name
so the server will default to its own domain */
*bcc_ptr = 0;
bcc_ptr++;
@@ -167,19 +168,20 @@ static void ascii_ssetup_strings(char ** pbcc_area, struct cifsSesInfo *ses,
strcpy(bcc_ptr, CIFS_NETWORK_OPSYS);
bcc_ptr += strlen(CIFS_NETWORK_OPSYS) + 1;
- *pbcc_area = bcc_ptr;
+ *pbcc_area = bcc_ptr;
}
-static int decode_unicode_ssetup(char ** pbcc_area, int bleft, struct cifsSesInfo *ses,
- const struct nls_table * nls_cp)
+static int decode_unicode_ssetup(char **pbcc_area, int bleft,
+ struct cifsSesInfo *ses,
+ const struct nls_table *nls_cp)
{
int rc = 0;
int words_left, len;
- char * data = *pbcc_area;
+ char *data = *pbcc_area;
- cFYI(1,("bleft %d",bleft));
+ cFYI(1, ("bleft %d", bleft));
/* SMB header is unaligned, so cifs servers word align start of
@@ -189,7 +191,7 @@ static int decode_unicode_ssetup(char ** pbcc_area, int bleft, struct cifsSesInf
their final Unicode string - in which case we
now will not attempt to decode the byte of junk
which follows it */
-
+
words_left = bleft / 2;
/* save off server operating system */
@@ -198,14 +200,14 @@ static int decode_unicode_ssetup(char ** pbcc_area, int bleft, struct cifsSesInf
/* We look for obvious messed up bcc or strings in response so we do not go off
the end since (at least) WIN2K and Windows XP have a major bug in not null
terminating last Unicode string in response */
- if(len >= words_left)
+ if (len >= words_left)
return rc;
- if(ses->serverOS)
+ if (ses->serverOS)
kfree(ses->serverOS);
/* UTF-8 string will not grow more than four times as big as UCS-16 */
ses->serverOS = kzalloc(4 * len, GFP_KERNEL);
- if(ses->serverOS != NULL) {
+ if (ses->serverOS != NULL) {
cifs_strfromUCS_le(ses->serverOS, (__le16 *)data, len,
nls_cp);
}
@@ -215,67 +217,68 @@ static int decode_unicode_ssetup(char ** pbcc_area, int bleft, struct cifsSesInf
/* save off server network operating system */
len = UniStrnlen((wchar_t *) data, words_left);
- if(len >= words_left)
+ if (len >= words_left)
return rc;
- if(ses->serverNOS)
+ if (ses->serverNOS)
kfree(ses->serverNOS);
ses->serverNOS = kzalloc(4 * len, GFP_KERNEL); /* BB this is wrong length FIXME BB */
- if(ses->serverNOS != NULL) {
+ if (ses->serverNOS != NULL) {
cifs_strfromUCS_le(ses->serverNOS, (__le16 *)data, len,
nls_cp);
- if(strncmp(ses->serverNOS, "NT LAN Manager 4",16) == 0) {
- cFYI(1,("NT4 server"));
+ if (strncmp(ses->serverNOS, "NT LAN Manager 4", 16) == 0) {
+ cFYI(1, ("NT4 server"));
ses->flags |= CIFS_SES_NT4;
}
}
data += 2 * (len + 1);
words_left -= len + 1;
- /* save off server domain */
- len = UniStrnlen((wchar_t *) data, words_left);
-
- if(len > words_left)
- return rc;
-
- if(ses->serverDomain)
- kfree(ses->serverDomain);
- ses->serverDomain = kzalloc(2 * (len + 1), GFP_KERNEL); /* BB FIXME wrong length */
- if(ses->serverDomain != NULL) {
- cifs_strfromUCS_le(ses->serverDomain, (__le16 *)data, len,
- nls_cp);
- ses->serverDomain[2*len] = 0;
- ses->serverDomain[(2*len) + 1] = 0;
- }
- data += 2 * (len + 1);
- words_left -= len + 1;
-
- cFYI(1,("words left: %d",words_left));
+ /* save off server domain */
+ len = UniStrnlen((wchar_t *) data, words_left);
+
+ if (len > words_left)
+ return rc;
+
+ if (ses->serverDomain)
+ kfree(ses->serverDomain);
+ ses->serverDomain = kzalloc(2 * (len + 1), GFP_KERNEL); /* BB FIXME wrong length */
+ if (ses->serverDomain != NULL) {
+ cifs_strfromUCS_le(ses->serverDomain, (__le16 *)data, len,
+ nls_cp);
+ ses->serverDomain[2*len] = 0;
+ ses->serverDomain[(2*len) + 1] = 0;
+ }
+ data += 2 * (len + 1);
+ words_left -= len + 1;
+
+ cFYI(1, ("words left: %d", words_left));
return rc;
}
-static int decode_ascii_ssetup(char ** pbcc_area, int bleft, struct cifsSesInfo *ses,
- const struct nls_table * nls_cp)
+static int decode_ascii_ssetup(char **pbcc_area, int bleft,
+ struct cifsSesInfo *ses,
+ const struct nls_table *nls_cp)
{
int rc = 0;
int len;
- char * bcc_ptr = *pbcc_area;
+ char *bcc_ptr = *pbcc_area;
+
+ cFYI(1, ("decode sessetup ascii. bleft %d", bleft));
- cFYI(1,("decode sessetup ascii. bleft %d", bleft));
-
len = strnlen(bcc_ptr, bleft);
- if(len >= bleft)
+ if (len >= bleft)
return rc;
-
- if(ses->serverOS)
+
+ if (ses->serverOS)
kfree(ses->serverOS);
ses->serverOS = kzalloc(len + 1, GFP_KERNEL);
- if(ses->serverOS)
+ if (ses->serverOS)
strncpy(ses->serverOS, bcc_ptr, len);
- if(strncmp(ses->serverOS, "OS/2",4) == 0) {
- cFYI(1,("OS/2 server"));
+ if (strncmp(ses->serverOS, "OS/2", 4) == 0) {
+ cFYI(1, ("OS/2 server"));
ses->flags |= CIFS_SES_OS2;
}
@@ -283,34 +286,34 @@ static int decode_ascii_ssetup(char ** pbcc_area, int bleft, struct cifsSesInfo
bleft -= len + 1;
len = strnlen(bcc_ptr, bleft);
- if(len >= bleft)
+ if (len >= bleft)
return rc;
- if(ses->serverNOS)
+ if (ses->serverNOS)
kfree(ses->serverNOS);
ses->serverNOS = kzalloc(len + 1, GFP_KERNEL);
- if(ses->serverNOS)
+ if (ses->serverNOS)
strncpy(ses->serverNOS, bcc_ptr, len);
bcc_ptr += len + 1;
bleft -= len + 1;
- len = strnlen(bcc_ptr, bleft);
- if(len > bleft)
- return rc;
+ len = strnlen(bcc_ptr, bleft);
+ if (len > bleft)
+ return rc;
/* No domain field in LANMAN case. Domain is
returned by old servers in the SMB negprot response */
/* BB For newer servers which do not support Unicode,
but thus do return domain here we could add parsing
for it later, but it is not very important */
- cFYI(1,("ascii: bytes left %d",bleft));
+ cFYI(1, ("ascii: bytes left %d", bleft));
return rc;
}
-int
+int
CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses, int first_time,
const struct nls_table *nls_cp)
{
@@ -328,13 +331,13 @@ CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses, int first_time,
__u16 action;
int bytes_remaining;
- if(ses == NULL)
+ if (ses == NULL)
return -EINVAL;
type = ses->server->secType;
- cFYI(1,("sess setup type %d",type));
- if(type == LANMAN) {
+ cFYI(1, ("sess setup type %d", type));
+ if (type == LANMAN) {
#ifndef CONFIG_CIFS_WEAK_PW_HASH
/* LANMAN and plaintext are less secure and off by default.
So we make this explicitly be turned on in kconfig (in the
@@ -344,15 +347,15 @@ CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses, int first_time,
return -EOPNOTSUPP;
#endif
wct = 10; /* lanman 2 style sessionsetup */
- } else if((type == NTLM) || (type == NTLMv2)) {
+ } else if ((type == NTLM) || (type == NTLMv2)) {
/* For NTLMv2 failures eventually may need to retry NTLM */
wct = 13; /* old style NTLM sessionsetup */
- } else /* same size for negotiate or auth, NTLMSSP or extended security */
+ } else /* same size: negotiate or auth, NTLMSSP or extended security */
wct = 12;
rc = small_smb_init_no_tc(SMB_COM_SESSION_SETUP_ANDX, wct, ses,
(void **)&smb_buf);
- if(rc)
+ if (rc)
return rc;
pSMB = (SESSION_SETUP_ANDX *)smb_buf;
@@ -364,27 +367,31 @@ CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses, int first_time,
second part which will include the strings
and rest of bcc area, in order to avoid having
to do a large buffer 17K allocation */
- iov[0].iov_base = (char *)pSMB;
- iov[0].iov_len = smb_buf->smb_buf_length + 4;
+ iov[0].iov_base = (char *)pSMB;
+ iov[0].iov_len = smb_buf->smb_buf_length + 4;
/* 2000 big enough to fit max user, domain, NOS name etc. */
str_area = kmalloc(2000, GFP_KERNEL);
+ if (str_area == NULL) {
+ cifs_small_buf_release(smb_buf);
+ return -ENOMEM;
+ }
bcc_ptr = str_area;
ses->flags &= ~CIFS_SES_LANMAN;
- if(type == LANMAN) {
+ if (type == LANMAN) {
#ifdef CONFIG_CIFS_WEAK_PW_HASH
char lnm_session_key[CIFS_SESS_KEY_SIZE];
/* no capabilities flags in old lanman negotiation */
- pSMB->old_req.PasswordLength = cpu_to_le16(CIFS_SESS_KEY_SIZE);
+ pSMB->old_req.PasswordLength = cpu_to_le16(CIFS_SESS_KEY_SIZE);
/* BB calculate hash with password */
/* and copy into bcc */
calc_lanman_hash(ses, lnm_session_key);
- ses->flags |= CIFS_SES_LANMAN;
+ ses->flags |= CIFS_SES_LANMAN;
/* #ifdef CONFIG_CIFS_DEBUG2
cifs_dump_mem("cryptkey: ",ses->server->cryptKey,
CIFS_SESS_KEY_SIZE);
@@ -397,10 +404,10 @@ CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses, int first_time,
changed to do higher than lanman dialect and
we reconnected would we ever calc signing_key? */
- cFYI(1,("Negotiating LANMAN setting up strings"));
+ cFYI(1, ("Negotiating LANMAN setting up strings"));
/* Unicode not allowed for LANMAN dialects */
ascii_ssetup_strings(&bcc_ptr, ses, nls_cp);
-#endif
+#endif
} else if (type == NTLM) {
char ntlm_session_key[CIFS_SESS_KEY_SIZE];
@@ -409,38 +416,38 @@ CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses, int first_time,
cpu_to_le16(CIFS_SESS_KEY_SIZE);
pSMB->req_no_secext.CaseSensitivePasswordLength =
cpu_to_le16(CIFS_SESS_KEY_SIZE);
-
+
/* calculate session key */
SMBNTencrypt(ses->password, ses->server->cryptKey,
ntlm_session_key);
- if(first_time) /* should this be moved into common code
+ if (first_time) /* should this be moved into common code
with similar ntlmv2 path? */
- cifs_calculate_mac_key(ses->server->mac_signing_key,
+ cifs_calculate_mac_key(&ses->server->mac_signing_key,
ntlm_session_key, ses->password);
/* copy session key */
- memcpy(bcc_ptr, (char *)ntlm_session_key,CIFS_SESS_KEY_SIZE);
+ memcpy(bcc_ptr, (char *)ntlm_session_key, CIFS_SESS_KEY_SIZE);
bcc_ptr += CIFS_SESS_KEY_SIZE;
- memcpy(bcc_ptr, (char *)ntlm_session_key,CIFS_SESS_KEY_SIZE);
+ memcpy(bcc_ptr, (char *)ntlm_session_key, CIFS_SESS_KEY_SIZE);
bcc_ptr += CIFS_SESS_KEY_SIZE;
- if(ses->capabilities & CAP_UNICODE) {
+ if (ses->capabilities & CAP_UNICODE) {
/* unicode strings must be word aligned */
if (iov[0].iov_len % 2) {
*bcc_ptr = 0;
- bcc_ptr++;
- }
+ bcc_ptr++;
+ }
unicode_ssetup_strings(&bcc_ptr, ses, nls_cp);
} else
ascii_ssetup_strings(&bcc_ptr, ses, nls_cp);
} else if (type == NTLMv2) {
- char * v2_sess_key =
+ char *v2_sess_key =
kmalloc(sizeof(struct ntlmv2_resp), GFP_KERNEL);
/* BB FIXME change all users of v2_sess_key to
struct ntlmv2_resp */
- if(v2_sess_key == NULL) {
+ if (v2_sess_key == NULL) {
cifs_small_buf_release(smb_buf);
return -ENOMEM;
}
@@ -456,8 +463,8 @@ CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses, int first_time,
/* calculate session key */
setup_ntlmv2_rsp(ses, v2_sess_key, nls_cp);
- if(first_time) /* should this be moved into common code
- with similar ntlmv2 path? */
+ if (first_time) /* should this be moved into common code
+ with similar ntlmv2 path? */
/* cifs_calculate_ntlmv2_mac_key(ses->server->mac_signing_key,
response BB FIXME, v2_sess_key); */
@@ -465,11 +472,12 @@ CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses, int first_time,
/* memcpy(bcc_ptr, (char *)ntlm_session_key,LM2_SESS_KEY_SIZE);
bcc_ptr += LM2_SESS_KEY_SIZE; */
- memcpy(bcc_ptr, (char *)v2_sess_key, sizeof(struct ntlmv2_resp));
+ memcpy(bcc_ptr, (char *)v2_sess_key,
+ sizeof(struct ntlmv2_resp));
bcc_ptr += sizeof(struct ntlmv2_resp);
kfree(v2_sess_key);
- if(ses->capabilities & CAP_UNICODE) {
- if(iov[0].iov_len % 2) {
+ if (ses->capabilities & CAP_UNICODE) {
+ if (iov[0].iov_len % 2) {
*bcc_ptr = 0;
} bcc_ptr++;
unicode_ssetup_strings(&bcc_ptr, ses, nls_cp);
@@ -488,20 +496,20 @@ CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses, int first_time,
BCC_LE(smb_buf) = cpu_to_le16(count);
iov[1].iov_base = str_area;
- iov[1].iov_len = count;
+ iov[1].iov_len = count;
rc = SendReceive2(xid, ses, iov, 2 /* num_iovecs */, &resp_buf_type, 0);
/* SMB request buf freed in SendReceive2 */
- cFYI(1,("ssetup rc from sendrecv2 is %d",rc));
- if(rc)
+ cFYI(1, ("ssetup rc from sendrecv2 is %d", rc));
+ if (rc)
goto ssetup_exit;
pSMB = (SESSION_SETUP_ANDX *)iov[0].iov_base;
smb_buf = (struct smb_hdr *)iov[0].iov_base;
- if((smb_buf->WordCount != 3) && (smb_buf->WordCount != 4)) {
+ if ((smb_buf->WordCount != 3) && (smb_buf->WordCount != 4)) {
rc = -EIO;
- cERROR(1,("bad word count %d", smb_buf->WordCount));
+ cERROR(1, ("bad word count %d", smb_buf->WordCount));
goto ssetup_exit;
}
action = le16_to_cpu(pSMB->resp.Action);
@@ -514,31 +522,32 @@ CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses, int first_time,
bytes_remaining = BCC(smb_buf);
bcc_ptr = pByteArea(smb_buf);
- if(smb_buf->WordCount == 4) {
+ if (smb_buf->WordCount == 4) {
__u16 blob_len;
blob_len = le16_to_cpu(pSMB->resp.SecurityBlobLength);
bcc_ptr += blob_len;
- if(blob_len > bytes_remaining) {
- cERROR(1,("bad security blob length %d", blob_len));
+ if (blob_len > bytes_remaining) {
+ cERROR(1, ("bad security blob length %d", blob_len));
rc = -EINVAL;
goto ssetup_exit;
}
bytes_remaining -= blob_len;
- }
+ }
/* BB check if Unicode and decode strings */
- if(smb_buf->Flags2 & SMBFLG2_UNICODE)
+ if (smb_buf->Flags2 & SMBFLG2_UNICODE)
rc = decode_unicode_ssetup(&bcc_ptr, bytes_remaining,
ses, nls_cp);
else
- rc = decode_ascii_ssetup(&bcc_ptr, bytes_remaining, ses,nls_cp);
-
+ rc = decode_ascii_ssetup(&bcc_ptr, bytes_remaining,
+ ses, nls_cp);
+
ssetup_exit:
kfree(str_area);
- if(resp_buf_type == CIFS_SMALL_BUFFER) {
- cFYI(1,("ssetup freeing small buf %p", iov[0].iov_base));
+ if (resp_buf_type == CIFS_SMALL_BUFFER) {
+ cFYI(1, ("ssetup freeing small buf %p", iov[0].iov_base));
cifs_small_buf_release(iov[0].iov_base);
- } else if(resp_buf_type == CIFS_LARGE_BUFFER)
+ } else if (resp_buf_type == CIFS_LARGE_BUFFER)
cifs_buf_release(iov[0].iov_base);
return rc;
diff --git a/fs/cifs/smbdes.c b/fs/cifs/smbdes.c
index 1b1daf6..cfa6d21 100644
--- a/fs/cifs/smbdes.c
+++ b/fs/cifs/smbdes.c
@@ -1,32 +1,32 @@
-/*
+/*
Unix SMB/Netbios implementation.
Version 1.9.
- a partial implementation of DES designed for use in the
+ a partial implementation of DES designed for use in the
SMB authentication protocol
Copyright (C) Andrew Tridgell 1998
Modified by Steve French (sfrench@us.ibm.com) 2002,2004
-
+
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
-
+
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
+
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-/* NOTES:
+/* NOTES:
This code makes no attempt to be fast! In fact, it is a very
- slow implementation
+ slow implementation
This code is NOT a complete DES implementation. It implements only
the minimum necessary for SMB authentication, as used by all SMB
@@ -153,7 +153,7 @@ static uchar sbox[8][4][16] = {
};
static void
-permute(char *out, char *in, uchar * p, int n)
+permute(char *out, char *in, uchar *p, int n)
{
int i;
for (i = 0; i < n; i++)
@@ -202,18 +202,18 @@ dohash(char *out, char *in, char *key, int forw)
char *rl;
/* Have to reduce stack usage */
- pk1 = kmalloc(56+56+64+64,GFP_KERNEL);
- if(pk1 == NULL)
+ pk1 = kmalloc(56+56+64+64, GFP_KERNEL);
+ if (pk1 == NULL)
return;
ki = kmalloc(16*48, GFP_KERNEL);
- if(ki == NULL) {
+ if (ki == NULL) {
kfree(pk1);
return;
}
cd = pk1 + 56;
- pd1= cd + 56;
+ pd1 = cd + 56;
rl = pd1 + 64;
permute(pk1, key, perm1, 56);
@@ -247,7 +247,7 @@ dohash(char *out, char *in, char *key, int forw)
char *r2; /* r2[32] */
er = kmalloc(48+48+32+32+32, GFP_KERNEL);
- if(er == NULL) {
+ if (er == NULL) {
kfree(pk1);
kfree(ki);
return;
@@ -327,8 +327,8 @@ smbhash(unsigned char *out, unsigned char *in, unsigned char *key, int forw)
char *keyb; /* keyb[64] */
unsigned char key2[8];
- outb = kmalloc(64 * 3,GFP_KERNEL);
- if(outb == NULL)
+ outb = kmalloc(64 * 3, GFP_KERNEL);
+ if (outb == NULL)
return;
inb = outb + 64;
diff --git a/fs/cifs/smbencrypt.c b/fs/cifs/smbencrypt.c
index 4b25ba9..90542a3 100644
--- a/fs/cifs/smbencrypt.c
+++ b/fs/cifs/smbencrypt.c
@@ -1,4 +1,4 @@
-/*
+/*
Unix SMB/Netbios implementation.
Version 1.9.
SMB parameters and setup
@@ -7,17 +7,17 @@
Modified by Jeremy Allison 1995.
Copyright (C) Andrew Bartlett <abartlet@samba.org> 2002-2003
Modified by Steve French (sfrench@us.ibm.com) 2002-2003
-
+
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
-
+
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
+
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
@@ -57,7 +57,7 @@ void SMBNTencrypt(unsigned char *passwd, unsigned char *c8, unsigned char *p24);
/*
This implements the X/Open SMB password encryption
- It takes a password, a 8 byte "crypt key" and puts 24 bytes of
+ It takes a password, a 8 byte "crypt key" and puts 24 bytes of
encrypted password into p24 */
/* Note that password must be uppercased and null terminated */
void
@@ -73,9 +73,9 @@ SMBencrypt(unsigned char *passwd, unsigned char *c8, unsigned char *p24)
E_P16(p14, p21);
SMBOWFencrypt(p21, c8, p24);
-
- memset(p14,0,15);
- memset(p21,0,21);
+
+ memset(p14, 0, 15);
+ memset(p21, 0, 21);
}
/* Routines for Windows NT MD4 Hash functions. */
@@ -90,14 +90,14 @@ _my_wcslen(__u16 * str)
/*
* Convert a string into an NT UNICODE string.
- * Note that regardless of processor type
+ * Note that regardless of processor type
* this must be in intel (little-endian)
* format.
*/
static int
_my_mbstowcs(__u16 * dst, const unsigned char *src, int len)
-{ /* not a very good conversion routine - change/fix */
+{ /* BB not a very good conversion routine - change/fix */
int i;
__u16 val;
@@ -112,7 +112,7 @@ _my_mbstowcs(__u16 * dst, const unsigned char *src, int len)
return i;
}
-/*
+/*
* Creates the MD4 Hash of the users password in NT UNICODE.
*/
@@ -123,7 +123,7 @@ E_md4hash(const unsigned char *passwd, unsigned char *p16)
__u16 wpwd[129];
/* Password cannot be longer than 128 characters */
- if(passwd) {
+ if (passwd) {
len = strlen((char *) passwd);
if (len > 128) {
len = 128;
@@ -138,7 +138,7 @@ E_md4hash(const unsigned char *passwd, unsigned char *p16)
len = _my_wcslen(wpwd) * sizeof (__u16);
mdfour(p16, (unsigned char *) wpwd, len);
- memset(wpwd,0,129 * 2);
+ memset(wpwd, 0, 129 * 2);
}
#if 0 /* currently unused */
@@ -178,17 +178,17 @@ ntv2_owf_gen(const unsigned char owf[16], const char *user_n,
const char *domain_n, unsigned char kr_buf[16],
const struct nls_table *nls_codepage)
{
- wchar_t * user_u;
- wchar_t * dom_u;
+ wchar_t *user_u;
+ wchar_t *dom_u;
int user_l, domain_l;
struct HMACMD5Context ctx;
/* might as well do one alloc to hold both (user_u and dom_u) */
- user_u = kmalloc(2048 * sizeof(wchar_t),GFP_KERNEL);
- if(user_u == NULL)
+ user_u = kmalloc(2048 * sizeof(wchar_t), GFP_KERNEL);
+ if (user_u == NULL)
return;
dom_u = user_u + 1024;
-
+
/* push_ucs2(NULL, user_u, user_n, (user_l+1)*2, STR_UNICODE|STR_NOALIGN|STR_TERMINATE|STR_UPPER);
push_ucs2(NULL, dom_u, domain_n, (domain_l+1)*2, STR_UNICODE|STR_NOALIGN|STR_TERMINATE|STR_UPPER); */
@@ -206,7 +206,7 @@ ntv2_owf_gen(const unsigned char owf[16], const char *user_n,
kfree(user_u);
}
-#endif
+#endif
/* Does the des encryption from the NT or LM MD4 hash. */
static void
@@ -256,15 +256,15 @@ SMBNTencrypt(unsigned char *passwd, unsigned char *c8, unsigned char *p24)
#if 0
static void
SMBOWFencrypt_ntv2(const unsigned char kr[16],
- const struct data_blob * srv_chal,
- const struct data_blob * cli_chal, unsigned char resp_buf[16])
+ const struct data_blob *srv_chal,
+ const struct data_blob *cli_chal, unsigned char resp_buf[16])
{
- struct HMACMD5Context ctx;
+ struct HMACMD5Context ctx;
- hmac_md5_init_limK_to_64(kr, 16, &ctx);
- hmac_md5_update(srv_chal->data, srv_chal->length, &ctx);
- hmac_md5_update(cli_chal->data, cli_chal->length, &ctx);
- hmac_md5_final(resp_buf, &ctx);
+ hmac_md5_init_limK_to_64(kr, 16, &ctx);
+ hmac_md5_update(srv_chal->data, srv_chal->length, &ctx);
+ hmac_md5_update(cli_chal->data, cli_chal->length, &ctx);
+ hmac_md5_final(resp_buf, &ctx);
}
static void
diff --git a/fs/cifs/smberr.h b/fs/cifs/smberr.h
index 212c3c2..2ef0be2 100644
--- a/fs/cifs/smberr.h
+++ b/fs/cifs/smberr.h
@@ -4,8 +4,8 @@
* Copyright (c) International Business Machines Corp., 2002,2004
* Author(s): Steve French (sfrench@us.ibm.com)
*
- * See Error Codes section of the SNIA CIFS Specification
- * for more information
+ * See Error Codes section of the SNIA CIFS Specification
+ * for more information
*
* This library is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published
@@ -19,7 +19,7 @@
*
* You should have received a copy of the GNU Lesser General Public License
* along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#define SUCCESS 0x00 /* The request was successful. */
@@ -110,7 +110,7 @@
/* Below errors are used internally (do not come over the wire) for passthrough
from STATUS codes to POSIX only */
-#define ErrTooManyLinks 0xFFFE
+#define ErrTooManyLinks 0xFFFE
/* Following error codes may be generated with the ERRSRV error class.*/
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 5f46845..746bc94 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -1,10 +1,10 @@
/*
* fs/cifs/transport.c
*
- * Copyright (C) International Business Machines Corp., 2002,2005
+ * Copyright (C) International Business Machines Corp., 2002,2007
* Author(s): Steve French (sfrench@us.ibm.com)
* Jeremy Allison (jra@samba.org) 2006.
- *
+ *
* This library is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation; either version 2.1 of the License, or
@@ -17,7 +17,7 @@
*
* You should have received a copy of the GNU Lesser General Public License
* along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/fs.h>
@@ -32,7 +32,7 @@
#include "cifsglob.h"
#include "cifsproto.h"
#include "cifs_debug.h"
-
+
extern mempool_t *cifs_mid_poolp;
extern struct kmem_cache *cifs_oplock_cachep;
@@ -49,7 +49,7 @@ AllocMidQEntry(const struct smb_hdr *smb_buffer, struct cifsSesInfo *ses)
cERROR(1, ("Null TCP session in AllocMidQEntry"));
return NULL;
}
-
+
temp = (struct mid_q_entry *) mempool_alloc(cifs_mid_poolp,
GFP_KERNEL | GFP_NOFS);
if (temp == NULL)
@@ -86,7 +86,7 @@ DeleteMidQEntry(struct mid_q_entry *midEntry)
list_del(&midEntry->qhead);
atomic_dec(&midCount);
spin_unlock(&GlobalMid_Lock);
- if(midEntry->largeBuf)
+ if (midEntry->largeBuf)
cifs_buf_release(midEntry->resp_buf);
else
cifs_small_buf_release(midEntry->resp_buf);
@@ -94,8 +94,8 @@ DeleteMidQEntry(struct mid_q_entry *midEntry)
now = jiffies;
/* commands taking longer than one second are indications that
something is wrong, unless it is quite a slow link or server */
- if((now - midEntry->when_alloc) > HZ) {
- if((cifsFYI & CIFS_TIMER) &&
+ if ((now - midEntry->when_alloc) > HZ) {
+ if ((cifsFYI & CIFS_TIMER) &&
(midEntry->command != SMB_COM_LOCKING_ANDX)) {
printk(KERN_DEBUG " CIFS slow rsp: cmd %d mid %d",
midEntry->command, midEntry->mid);
@@ -110,10 +110,10 @@ DeleteMidQEntry(struct mid_q_entry *midEntry)
}
struct oplock_q_entry *
-AllocOplockQEntry(struct inode * pinode, __u16 fid, struct cifsTconInfo * tcon)
+AllocOplockQEntry(struct inode *pinode, __u16 fid, struct cifsTconInfo *tcon)
{
struct oplock_q_entry *temp;
- if ((pinode== NULL) || (tcon == NULL)) {
+ if ((pinode == NULL) || (tcon == NULL)) {
cERROR(1, ("Null parms passed to AllocOplockQEntry"));
return NULL;
}
@@ -133,9 +133,9 @@ AllocOplockQEntry(struct inode * pinode, __u16 fid, struct cifsTconInfo * tcon)
}
-void DeleteOplockQEntry(struct oplock_q_entry * oplockEntry)
+void DeleteOplockQEntry(struct oplock_q_entry *oplockEntry)
{
- spin_lock(&GlobalMid_Lock);
+ spin_lock(&GlobalMid_Lock);
/* should we check if list empty first? */
list_del(&oplockEntry->qhead);
spin_unlock(&GlobalMid_Lock);
@@ -152,7 +152,7 @@ smb_send(struct socket *ssocket, struct smb_hdr *smb_buffer,
struct kvec iov;
unsigned len = smb_buf_length + 4;
- if(ssocket == NULL)
+ if (ssocket == NULL)
return -ENOTSOCK; /* BB eventually add reconnect code here */
iov.iov_base = smb_buffer;
iov.iov_len = len;
@@ -164,8 +164,8 @@ smb_send(struct socket *ssocket, struct smb_hdr *smb_buffer,
smb_msg.msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL; /* BB add more flags?*/
/* smb header is converted in header_assemble. bcc and rest of SMB word
- area, and byte area if necessary, is converted to littleendian in
- cifssmb.c and RFC1001 len is converted to bigendian in smb_send
+ area, and byte area if necessary, is converted to littleendian in
+ cifssmb.c and RFC1001 len is converted to bigendian in smb_send
Flags2 is converted in SendReceive */
smb_buffer->smb_buf_length = cpu_to_be32(smb_buffer->smb_buf_length);
@@ -177,9 +177,9 @@ smb_send(struct socket *ssocket, struct smb_hdr *smb_buffer,
if ((rc == -ENOSPC) || (rc == -EAGAIN)) {
i++;
/* smaller timeout here than send2 since smaller size */
- /* Although it may not be required, this also is smaller
- oplock break time */
- if(i > 12) {
+ /* Although it may not be required, this also is smaller
+ oplock break time */
+ if (i > 12) {
cERROR(1,
("sends on sock %p stuck for 7 seconds",
ssocket));
@@ -189,7 +189,7 @@ smb_send(struct socket *ssocket, struct smb_hdr *smb_buffer,
msleep(1 << i);
continue;
}
- if (rc < 0)
+ if (rc < 0)
break;
else
i = 0; /* reset i after each successful send */
@@ -199,7 +199,7 @@ smb_send(struct socket *ssocket, struct smb_hdr *smb_buffer,
}
if (rc < 0) {
- cERROR(1,("Error %d sending data on socket to server", rc));
+ cERROR(1, ("Error %d sending data on socket to server", rc));
} else {
rc = 0;
}
@@ -223,8 +223,8 @@ smb_send2(struct socket *ssocket, struct kvec *iov, int n_vec,
unsigned int total_len;
int first_vec = 0;
unsigned int smb_buf_length = smb_buffer->smb_buf_length;
-
- if(ssocket == NULL)
+
+ if (ssocket == NULL)
return -ENOTSOCK; /* BB eventually add reconnect code here */
smb_msg.msg_name = sin;
@@ -234,8 +234,8 @@ smb_send2(struct socket *ssocket, struct kvec *iov, int n_vec,
smb_msg.msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL; /* BB add more flags?*/
/* smb header is converted in header_assemble. bcc and rest of SMB word
- area, and byte area if necessary, is converted to littleendian in
- cifssmb.c and RFC1001 len is converted to bigendian in smb_send
+ area, and byte area if necessary, is converted to littleendian in
+ cifssmb.c and RFC1001 len is converted to bigendian in smb_send
Flags2 is converted in SendReceive */
@@ -252,7 +252,7 @@ smb_send2(struct socket *ssocket, struct kvec *iov, int n_vec,
n_vec - first_vec, total_len);
if ((rc == -ENOSPC) || (rc == -EAGAIN)) {
i++;
- if(i >= 14) {
+ if (i >= 14) {
cERROR(1,
("sends on sock %p stuck for 15 seconds",
ssocket));
@@ -262,17 +262,17 @@ smb_send2(struct socket *ssocket, struct kvec *iov, int n_vec,
msleep(1 << i);
continue;
}
- if (rc < 0)
+ if (rc < 0)
break;
if (rc >= total_len) {
WARN_ON(rc > total_len);
break;
}
- if(rc == 0) {
+ if (rc == 0) {
/* should never happen, letting socket clear before
retrying is our only obvious option here */
- cERROR(1,("tcp sent no data"));
+ cERROR(1, ("tcp sent no data"));
msleep(500);
continue;
}
@@ -295,7 +295,7 @@ smb_send2(struct socket *ssocket, struct kvec *iov, int n_vec,
}
if (rc < 0) {
- cERROR(1,("Error %d sending data on socket to server", rc));
+ cERROR(1, ("Error %d sending data on socket to server", rc));
} else
rc = 0;
@@ -308,13 +308,13 @@ smb_send2(struct socket *ssocket, struct kvec *iov, int n_vec,
static int wait_for_free_request(struct cifsSesInfo *ses, const int long_op)
{
- if(long_op == -1) {
+ if (long_op == -1) {
/* oplock breaks must not be held up */
atomic_inc(&ses->server->inFlight);
} else {
- spin_lock(&GlobalMid_Lock);
- while(1) {
- if(atomic_read(&ses->server->inFlight) >=
+ spin_lock(&GlobalMid_Lock);
+ while (1) {
+ if (atomic_read(&ses->server->inFlight) >=
cifs_max_pending){
spin_unlock(&GlobalMid_Lock);
#ifdef CONFIG_CIFS_STATS2
@@ -328,14 +328,14 @@ static int wait_for_free_request(struct cifsSesInfo *ses, const int long_op)
#endif
spin_lock(&GlobalMid_Lock);
} else {
- if(ses->server->tcpStatus == CifsExiting) {
+ if (ses->server->tcpStatus == CifsExiting) {
spin_unlock(&GlobalMid_Lock);
return -ENOENT;
}
- /* can not count locking commands against total since
- they are allowed to block on server */
-
+ /* can not count locking commands against total
+ as they are allowed to block on server */
+
/* update # of requests on the wire to server */
if (long_op < 3)
atomic_inc(&ses->server->inFlight);
@@ -353,11 +353,11 @@ static int allocate_mid(struct cifsSesInfo *ses, struct smb_hdr *in_buf,
if (ses->server->tcpStatus == CifsExiting) {
return -ENOENT;
} else if (ses->server->tcpStatus == CifsNeedReconnect) {
- cFYI(1,("tcp session dead - return to caller to retry"));
+ cFYI(1, ("tcp session dead - return to caller to retry"));
return -EAGAIN;
} else if (ses->status != CifsGood) {
/* check if SMB session is bad because we are setting it up */
- if((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
+ if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
(in_buf->Command != SMB_COM_NEGOTIATE)) {
return -EAGAIN;
} /* else ok - we are setting up session */
@@ -369,7 +369,7 @@ static int allocate_mid(struct cifsSesInfo *ses, struct smb_hdr *in_buf,
return 0;
}
-static int wait_for_response(struct cifsSesInfo *ses,
+static int wait_for_response(struct cifsSesInfo *ses,
struct mid_q_entry *midQ,
unsigned long timeout,
unsigned long time_to_wait)
@@ -379,8 +379,8 @@ static int wait_for_response(struct cifsSesInfo *ses,
for (;;) {
curr_timeout = timeout + jiffies;
wait_event(ses->server->response_q,
- (!(midQ->midState == MID_REQUEST_SUBMITTED)) ||
- time_after(jiffies, curr_timeout) ||
+ (!(midQ->midState == MID_REQUEST_SUBMITTED)) ||
+ time_after(jiffies, curr_timeout) ||
((ses->server->tcpStatus != CifsGood) &&
(ses->server->tcpStatus != CifsNew)));
@@ -398,16 +398,16 @@ static int wait_for_response(struct cifsSesInfo *ses,
spin_unlock(&GlobalMid_Lock);
/* Calculate time_to_wait past last receive time.
- Although we prefer not to time out if the
+ Although we prefer not to time out if the
server is still responding - we will time
- out if the server takes more than 15 (or 45
+ out if the server takes more than 15 (or 45
or 180) seconds to respond to this request
- and has not responded to any request from
+ and has not responded to any request from
other threads on the client within 10 seconds */
lrt += time_to_wait;
if (time_after(jiffies, lrt)) {
/* No replies for time_to_wait. */
- cERROR(1,("server not responding"));
+ cERROR(1, ("server not responding"));
return -1;
}
} else {
@@ -417,8 +417,8 @@ static int wait_for_response(struct cifsSesInfo *ses,
}
int
-SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
- struct kvec *iov, int n_vec, int * pRespBufType /* ret */,
+SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
+ struct kvec *iov, int n_vec, int *pRespBufType /* ret */,
const int long_op)
{
int rc = 0;
@@ -426,21 +426,21 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
unsigned long timeout;
struct mid_q_entry *midQ;
struct smb_hdr *in_buf = iov[0].iov_base;
-
+
*pRespBufType = CIFS_NO_BUFFER; /* no response buf yet */
if ((ses == NULL) || (ses->server == NULL)) {
cifs_small_buf_release(in_buf);
- cERROR(1,("Null session"));
+ cERROR(1, ("Null session"));
return -EIO;
}
- if(ses->server->tcpStatus == CifsExiting) {
+ if (ses->server->tcpStatus == CifsExiting) {
cifs_small_buf_release(in_buf);
return -ENOENT;
}
- /* Ensure that we do not send more than 50 overlapping requests
+ /* Ensure that we do not send more than 50 overlapping requests
to the same server. We may make this configurable later or
use ses->maxReq */
@@ -450,23 +450,23 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
return rc;
}
- /* make sure that we sign in the same order that we send on this socket
+ /* make sure that we sign in the same order that we send on this socket
and avoid races inside tcp sendmsg code that could cause corruption
of smb data */
- down(&ses->server->tcpSem);
+ down(&ses->server->tcpSem);
rc = allocate_mid(ses, in_buf, &midQ);
if (rc) {
up(&ses->server->tcpSem);
cifs_small_buf_release(in_buf);
/* Update # of requests on wire to server */
- atomic_dec(&ses->server->inFlight);
+ atomic_dec(&ses->server->inFlight);
wake_up(&ses->server->request_q);
return rc;
}
- rc = cifs_sign_smb2(iov, n_vec, ses->server, &midQ->sequence_number);
+ rc = cifs_sign_smb2(iov, n_vec, ses->server, &midQ->sequence_number);
midQ->midState = MID_REQUEST_SUBMITTED;
#ifdef CONFIG_CIFS_STATS2
@@ -482,7 +482,7 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
up(&ses->server->tcpSem);
cifs_small_buf_release(in_buf);
- if(rc < 0)
+ if (rc < 0)
goto out;
if (long_op == -1)
@@ -490,18 +490,18 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
else if (long_op == 2) /* writes past end of file can take loong time */
timeout = 180 * HZ;
else if (long_op == 1)
- timeout = 45 * HZ; /* should be greater than
+ timeout = 45 * HZ; /* should be greater than
servers oplock break timeout (about 43 seconds) */
else
timeout = 15 * HZ;
- /* wait for 15 seconds or until woken up due to response arriving or
+ /* wait for 15 seconds or until woken up due to response arriving or
due to last connection to this server being unmounted */
if (signal_pending(current)) {
/* if signal pending do not hold up user for full smb timeout
but we still give response a chance to complete */
timeout = 2 * HZ;
- }
+ }
/* No user interrupts in wait - wreaks havoc with performance */
wait_for_response(ses, midQ, timeout, 10 * HZ);
@@ -511,10 +511,10 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
spin_unlock(&GlobalMid_Lock);
receive_len = midQ->resp_buf->smb_buf_length;
} else {
- cERROR(1,("No response to cmd %d mid %d",
+ cERROR(1, ("No response to cmd %d mid %d",
midQ->command, midQ->mid));
- if(midQ->midState == MID_REQUEST_SUBMITTED) {
- if(ses->server->tcpStatus == CifsExiting)
+ if (midQ->midState == MID_REQUEST_SUBMITTED) {
+ if (ses->server->tcpStatus == CifsExiting)
rc = -EHOSTDOWN;
else {
ses->server->tcpStatus = CifsNeedReconnect;
@@ -523,9 +523,9 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
}
if (rc != -EHOSTDOWN) {
- if(midQ->midState == MID_RETRY_NEEDED) {
+ if (midQ->midState == MID_RETRY_NEEDED) {
rc = -EAGAIN;
- cFYI(1,("marking request for retry"));
+ cFYI(1, ("marking request for retry"));
} else {
rc = -EIO;
}
@@ -533,21 +533,21 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
spin_unlock(&GlobalMid_Lock);
DeleteMidQEntry(midQ);
/* Update # of requests on wire to server */
- atomic_dec(&ses->server->inFlight);
+ atomic_dec(&ses->server->inFlight);
wake_up(&ses->server->request_q);
return rc;
}
-
+
if (receive_len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE) {
cERROR(1, ("Frame too large received. Length: %d Xid: %d",
receive_len, xid));
rc = -EIO;
} else { /* rcvd frame is ok */
- if (midQ->resp_buf &&
+ if (midQ->resp_buf &&
(midQ->midState == MID_RESPONSE_RECEIVED)) {
iov[0].iov_base = (char *)midQ->resp_buf;
- if(midQ->largeBuf)
+ if (midQ->largeBuf)
*pRespBufType = CIFS_LARGE_BUFFER;
else
*pRespBufType = CIFS_SMALL_BUFFER;
@@ -555,14 +555,14 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
dump_smb(midQ->resp_buf, 80);
/* convert the length into a more usable form */
- if((receive_len > 24) &&
+ if ((receive_len > 24) &&
(ses->server->secMode & (SECMODE_SIGN_REQUIRED |
SECMODE_SIGN_ENABLED))) {
rc = cifs_verify_signature(midQ->resp_buf,
- ses->server->mac_signing_key,
+ &ses->server->mac_signing_key,
midQ->sequence_number+1);
- if(rc) {
- cERROR(1,("Unexpected SMB signature"));
+ if (rc) {
+ cERROR(1, ("Unexpected SMB signature"));
/* BB FIXME add code to kill session */
}
}
@@ -576,19 +576,19 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
sizeof (struct smb_hdr) -
4 /* do not count RFC1001 header */ +
(2 * midQ->resp_buf->WordCount) + 2 /* bcc */ )
- BCC(midQ->resp_buf) =
+ BCC(midQ->resp_buf) =
le16_to_cpu(BCC_LE(midQ->resp_buf));
midQ->resp_buf = NULL; /* mark it so will not be freed
by DeleteMidQEntry */
} else {
rc = -EIO;
- cFYI(1,("Bad MID state?"));
+ cFYI(1, ("Bad MID state?"));
}
}
out:
DeleteMidQEntry(midQ);
- atomic_dec(&ses->server->inFlight);
+ atomic_dec(&ses->server->inFlight);
wake_up(&ses->server->request_q);
return rc;
@@ -605,18 +605,18 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
struct mid_q_entry *midQ;
if (ses == NULL) {
- cERROR(1,("Null smb session"));
+ cERROR(1, ("Null smb session"));
return -EIO;
}
- if(ses->server == NULL) {
- cERROR(1,("Null tcp session"));
+ if (ses->server == NULL) {
+ cERROR(1, ("Null tcp session"));
return -EIO;
}
- if(ses->server->tcpStatus == CifsExiting)
+ if (ses->server->tcpStatus == CifsExiting)
return -ENOENT;
- /* Ensure that we do not send more than 50 overlapping requests
+ /* Ensure that we do not send more than 50 overlapping requests
to the same server. We may make this configurable later or
use ses->maxReq */
@@ -624,17 +624,17 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
if (rc)
return rc;
- /* make sure that we sign in the same order that we send on this socket
+ /* make sure that we sign in the same order that we send on this socket
and avoid races inside tcp sendmsg code that could cause corruption
of smb data */
- down(&ses->server->tcpSem);
+ down(&ses->server->tcpSem);
rc = allocate_mid(ses, in_buf, &midQ);
if (rc) {
up(&ses->server->tcpSem);
/* Update # of requests on wire to server */
- atomic_dec(&ses->server->inFlight);
+ atomic_dec(&ses->server->inFlight);
wake_up(&ses->server->request_q);
return rc;
}
@@ -645,7 +645,7 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
DeleteMidQEntry(midQ);
up(&ses->server->tcpSem);
/* Update # of requests on wire to server */
- atomic_dec(&ses->server->inFlight);
+ atomic_dec(&ses->server->inFlight);
wake_up(&ses->server->request_q);
return -EIO;
}
@@ -664,7 +664,7 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
#endif
up(&ses->server->tcpSem);
- if(rc < 0)
+ if (rc < 0)
goto out;
if (long_op == -1)
@@ -672,17 +672,17 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
else if (long_op == 2) /* writes past end of file can take loong time */
timeout = 180 * HZ;
else if (long_op == 1)
- timeout = 45 * HZ; /* should be greater than
+ timeout = 45 * HZ; /* should be greater than
servers oplock break timeout (about 43 seconds) */
else
timeout = 15 * HZ;
- /* wait for 15 seconds or until woken up due to response arriving or
+ /* wait for 15 seconds or until woken up due to response arriving or
due to last connection to this server being unmounted */
if (signal_pending(current)) {
/* if signal pending do not hold up user for full smb timeout
but we still give response a chance to complete */
timeout = 2 * HZ;
- }
+ }
/* No user interrupts in wait - wreaks havoc with performance */
wait_for_response(ses, midQ, timeout, 10 * HZ);
@@ -692,10 +692,10 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
spin_unlock(&GlobalMid_Lock);
receive_len = midQ->resp_buf->smb_buf_length;
} else {
- cERROR(1,("No response for cmd %d mid %d",
+ cERROR(1, ("No response for cmd %d mid %d",
midQ->command, midQ->mid));
- if(midQ->midState == MID_REQUEST_SUBMITTED) {
- if(ses->server->tcpStatus == CifsExiting)
+ if (midQ->midState == MID_REQUEST_SUBMITTED) {
+ if (ses->server->tcpStatus == CifsExiting)
rc = -EHOSTDOWN;
else {
ses->server->tcpStatus = CifsNeedReconnect;
@@ -704,9 +704,9 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
}
if (rc != -EHOSTDOWN) {
- if(midQ->midState == MID_RETRY_NEEDED) {
+ if (midQ->midState == MID_RETRY_NEEDED) {
rc = -EAGAIN;
- cFYI(1,("marking request for retry"));
+ cFYI(1, ("marking request for retry"));
} else {
rc = -EIO;
}
@@ -714,11 +714,11 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
spin_unlock(&GlobalMid_Lock);
DeleteMidQEntry(midQ);
/* Update # of requests on wire to server */
- atomic_dec(&ses->server->inFlight);
+ atomic_dec(&ses->server->inFlight);
wake_up(&ses->server->request_q);
return rc;
}
-
+
if (receive_len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE) {
cERROR(1, ("Frame too large received. Length: %d Xid: %d",
receive_len, xid));
@@ -734,14 +734,14 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
dump_smb(out_buf, 92);
/* convert the length into a more usable form */
- if((receive_len > 24) &&
+ if ((receive_len > 24) &&
(ses->server->secMode & (SECMODE_SIGN_REQUIRED |
SECMODE_SIGN_ENABLED))) {
rc = cifs_verify_signature(out_buf,
- ses->server->mac_signing_key,
+ &ses->server->mac_signing_key,
midQ->sequence_number+1);
- if(rc) {
- cERROR(1,("Unexpected SMB signature"));
+ if (rc) {
+ cERROR(1, ("Unexpected SMB signature"));
/* BB FIXME add code to kill session */
}
}
@@ -759,13 +759,13 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
BCC(out_buf) = le16_to_cpu(BCC_LE(out_buf));
} else {
rc = -EIO;
- cERROR(1,("Bad MID state?"));
+ cERROR(1, ("Bad MID state?"));
}
}
out:
DeleteMidQEntry(midQ);
- atomic_dec(&ses->server->inFlight);
+ atomic_dec(&ses->server->inFlight);
wake_up(&ses->server->request_q);
return rc;
@@ -783,7 +783,7 @@ send_nt_cancel(struct cifsTconInfo *tcon, struct smb_hdr *in_buf,
header_assemble(in_buf, SMB_COM_NT_CANCEL, tcon, 0);
in_buf->Mid = mid;
- down(&ses->server->tcpSem);
+ down(&ses->server->tcpSem);
rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
if (rc) {
up(&ses->server->tcpSem);
@@ -832,20 +832,20 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon,
struct cifsSesInfo *ses;
if (tcon == NULL || tcon->ses == NULL) {
- cERROR(1,("Null smb session"));
+ cERROR(1, ("Null smb session"));
return -EIO;
}
ses = tcon->ses;
- if(ses->server == NULL) {
- cERROR(1,("Null tcp session"));
+ if (ses->server == NULL) {
+ cERROR(1, ("Null tcp session"));
return -EIO;
}
- if(ses->server->tcpStatus == CifsExiting)
+ if (ses->server->tcpStatus == CifsExiting)
return -ENOENT;
- /* Ensure that we do not send more than 50 overlapping requests
+ /* Ensure that we do not send more than 50 overlapping requests
to the same server. We may make this configurable later or
use ses->maxReq */
@@ -853,11 +853,11 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon,
if (rc)
return rc;
- /* make sure that we sign in the same order that we send on this socket
+ /* make sure that we sign in the same order that we send on this socket
and avoid races inside tcp sendmsg code that could cause corruption
of smb data */
- down(&ses->server->tcpSem);
+ down(&ses->server->tcpSem);
rc = allocate_mid(ses, in_buf, &midQ);
if (rc) {
@@ -887,14 +887,14 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon,
#endif
up(&ses->server->tcpSem);
- if(rc < 0) {
+ if (rc < 0) {
DeleteMidQEntry(midQ);
return rc;
}
/* Wait for a reply - allow signals to interrupt. */
rc = wait_event_interruptible(ses->server->response_q,
- (!(midQ->midState == MID_REQUEST_SUBMITTED)) ||
+ (!(midQ->midState == MID_REQUEST_SUBMITTED)) ||
((ses->server->tcpStatus != CifsGood) &&
(ses->server->tcpStatus != CifsNew)));
@@ -928,7 +928,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon,
}
/* Wait 5 seconds for the response. */
- if (wait_for_response(ses, midQ, 5 * HZ, 5 * HZ)==0) {
+ if (wait_for_response(ses, midQ, 5 * HZ, 5 * HZ) == 0) {
/* We got the response - restart system call. */
rstart = 1;
}
@@ -939,10 +939,10 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon,
spin_unlock(&GlobalMid_Lock);
receive_len = midQ->resp_buf->smb_buf_length;
} else {
- cERROR(1,("No response for cmd %d mid %d",
+ cERROR(1, ("No response for cmd %d mid %d",
midQ->command, midQ->mid));
- if(midQ->midState == MID_REQUEST_SUBMITTED) {
- if(ses->server->tcpStatus == CifsExiting)
+ if (midQ->midState == MID_REQUEST_SUBMITTED) {
+ if (ses->server->tcpStatus == CifsExiting)
rc = -EHOSTDOWN;
else {
ses->server->tcpStatus = CifsNeedReconnect;
@@ -951,9 +951,9 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon,
}
if (rc != -EHOSTDOWN) {
- if(midQ->midState == MID_RETRY_NEEDED) {
+ if (midQ->midState == MID_RETRY_NEEDED) {
rc = -EAGAIN;
- cFYI(1,("marking request for retry"));
+ cFYI(1, ("marking request for retry"));
} else {
rc = -EIO;
}
@@ -962,7 +962,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon,
DeleteMidQEntry(midQ);
return rc;
}
-
+
if (receive_len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE) {
cERROR(1, ("Frame too large received. Length: %d Xid: %d",
receive_len, xid));
@@ -978,14 +978,14 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon,
dump_smb(out_buf, 92);
/* convert the length into a more usable form */
- if((receive_len > 24) &&
+ if ((receive_len > 24) &&
(ses->server->secMode & (SECMODE_SIGN_REQUIRED |
SECMODE_SIGN_ENABLED))) {
rc = cifs_verify_signature(out_buf,
- ses->server->mac_signing_key,
+ &ses->server->mac_signing_key,
midQ->sequence_number+1);
- if(rc) {
- cERROR(1,("Unexpected SMB signature"));
+ if (rc) {
+ cERROR(1, ("Unexpected SMB signature"));
/* BB FIXME add code to kill session */
}
}
@@ -1003,7 +1003,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon,
BCC(out_buf) = le16_to_cpu(BCC_LE(out_buf));
} else {
rc = -EIO;
- cERROR(1,("Bad MID state?"));
+ cERROR(1, ("Bad MID state?"));
}
}
DeleteMidQEntry(midQ);
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c
index 18fcec1..f61e433 100644
--- a/fs/cifs/xattr.c
+++ b/fs/cifs/xattr.c
@@ -1,7 +1,7 @@
/*
* fs/cifs/xattr.c
*
- * Copyright (c) International Business Machines Corp., 2003
+ * Copyright (c) International Business Machines Corp., 2003, 2007
* Author(s): Steve French (sfrench@us.ibm.com)
*
* This library is free software; you can redistribute it and/or modify
@@ -37,50 +37,52 @@
#define XATTR_TRUSTED_PREFIX_LEN 8
#define XATTR_SECURITY_PREFIX_LEN 9
/* BB need to add server (Samba e.g) support for security and trusted prefix */
-
-int cifs_removexattr(struct dentry * direntry, const char * ea_name)
+
+int cifs_removexattr(struct dentry *direntry, const char *ea_name)
{
int rc = -EOPNOTSUPP;
#ifdef CONFIG_CIFS_XATTR
int xid;
struct cifs_sb_info *cifs_sb;
struct cifsTconInfo *pTcon;
- struct super_block * sb;
- char * full_path;
-
- if(direntry == NULL)
+ struct super_block *sb;
+ char *full_path;
+
+ if (direntry == NULL)
return -EIO;
- if(direntry->d_inode == NULL)
+ if (direntry->d_inode == NULL)
return -EIO;
sb = direntry->d_inode->i_sb;
- if(sb == NULL)
+ if (sb == NULL)
return -EIO;
xid = GetXid();
-
+
cifs_sb = CIFS_SB(sb);
pTcon = cifs_sb->tcon;
-
+
full_path = build_path_from_dentry(direntry);
- if(full_path == NULL) {
+ if (full_path == NULL) {
FreeXid(xid);
return -ENOMEM;
}
- if(ea_name == NULL) {
- cFYI(1,("Null xattr names not supported"));
- } else if(strncmp(ea_name,CIFS_XATTR_USER_PREFIX,5)
- && (strncmp(ea_name,CIFS_XATTR_OS2_PREFIX,4))) {
- cFYI(1,("illegal xattr namespace %s (only user namespace supported)",ea_name));
+ if (ea_name == NULL) {
+ cFYI(1, ("Null xattr names not supported"));
+ } else if (strncmp(ea_name, CIFS_XATTR_USER_PREFIX, 5)
+ && (strncmp(ea_name, CIFS_XATTR_OS2_PREFIX, 4))) {
+ cFYI(1,
+ ("illegal xattr request %s (only user namespace supported)",
+ ea_name));
/* BB what if no namespace prefix? */
/* Should we just pass them to server, except for
system and perhaps security prefixes? */
} else {
- if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
goto remove_ea_exit;
- ea_name+=5; /* skip past user. prefix */
- rc = CIFSSMBSetEA(xid,pTcon,full_path,ea_name,NULL,
+ ea_name += 5; /* skip past user. prefix */
+ rc = CIFSSMBSetEA(xid, pTcon, full_path, ea_name, NULL,
(__u16)0, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
}
@@ -91,23 +93,23 @@ remove_ea_exit:
return rc;
}
-int cifs_setxattr(struct dentry * direntry, const char * ea_name,
- const void * ea_value, size_t value_size, int flags)
+int cifs_setxattr(struct dentry *direntry, const char *ea_name,
+ const void *ea_value, size_t value_size, int flags)
{
int rc = -EOPNOTSUPP;
#ifdef CONFIG_CIFS_XATTR
int xid;
struct cifs_sb_info *cifs_sb;
struct cifsTconInfo *pTcon;
- struct super_block * sb;
- char * full_path;
+ struct super_block *sb;
+ char *full_path;
- if(direntry == NULL)
+ if (direntry == NULL)
return -EIO;
- if(direntry->d_inode == NULL)
+ if (direntry->d_inode == NULL)
return -EIO;
sb = direntry->d_inode->i_sb;
- if(sb == NULL)
+ if (sb == NULL)
return -EIO;
xid = GetXid();
@@ -115,7 +117,7 @@ int cifs_setxattr(struct dentry * direntry, const char * ea_name,
pTcon = cifs_sb->tcon;
full_path = build_path_from_dentry(direntry);
- if(full_path == NULL) {
+ if (full_path == NULL) {
FreeXid(xid);
return -ENOMEM;
}
@@ -123,67 +125,69 @@ int cifs_setxattr(struct dentry * direntry, const char * ea_name,
/* return alt name if available as pseudo attr */
/* if proc/fs/cifs/streamstoxattr is set then
- search server for EAs or streams to
+ search server for EAs or streams to
returns as xattrs */
- if(value_size > MAX_EA_VALUE_SIZE) {
- cFYI(1,("size of EA value too large"));
+ if (value_size > MAX_EA_VALUE_SIZE) {
+ cFYI(1, ("size of EA value too large"));
kfree(full_path);
FreeXid(xid);
return -EOPNOTSUPP;
}
- if(ea_name == NULL) {
- cFYI(1,("Null xattr names not supported"));
- } else if(strncmp(ea_name,CIFS_XATTR_USER_PREFIX,5) == 0) {
- if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
+ if (ea_name == NULL) {
+ cFYI(1, ("Null xattr names not supported"));
+ } else if (strncmp(ea_name, CIFS_XATTR_USER_PREFIX, 5) == 0) {
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
goto set_ea_exit;
- if(strncmp(ea_name,CIFS_XATTR_DOS_ATTRIB,14) == 0) {
- cFYI(1,("attempt to set cifs inode metadata"));
+ if (strncmp(ea_name, CIFS_XATTR_DOS_ATTRIB, 14) == 0) {
+ cFYI(1, ("attempt to set cifs inode metadata"));
}
ea_name += 5; /* skip past user. prefix */
- rc = CIFSSMBSetEA(xid,pTcon,full_path,ea_name,ea_value,
+ rc = CIFSSMBSetEA(xid, pTcon, full_path, ea_name, ea_value,
(__u16)value_size, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
- } else if(strncmp(ea_name, CIFS_XATTR_OS2_PREFIX,4) == 0) {
- if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
+ } else if (strncmp(ea_name, CIFS_XATTR_OS2_PREFIX, 4) == 0) {
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
goto set_ea_exit;
ea_name += 4; /* skip past os2. prefix */
- rc = CIFSSMBSetEA(xid,pTcon,full_path,ea_name,ea_value,
+ rc = CIFSSMBSetEA(xid, pTcon, full_path, ea_name, ea_value,
(__u16)value_size, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
} else {
- int temp;
- temp = strncmp(ea_name,POSIX_ACL_XATTR_ACCESS,
+ int temp;
+ temp = strncmp(ea_name, POSIX_ACL_XATTR_ACCESS,
strlen(POSIX_ACL_XATTR_ACCESS));
if (temp == 0) {
#ifdef CONFIG_CIFS_POSIX
- if(sb->s_flags & MS_POSIXACL)
- rc = CIFSSMBSetPosixACL(xid, pTcon,full_path,
- ea_value, (const int)value_size,
- ACL_TYPE_ACCESS,cifs_sb->local_nls,
- cifs_sb->mnt_cifs_flags &
+ if (sb->s_flags & MS_POSIXACL)
+ rc = CIFSSMBSetPosixACL(xid, pTcon, full_path,
+ ea_value, (const int)value_size,
+ ACL_TYPE_ACCESS, cifs_sb->local_nls,
+ cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
- cFYI(1,("set POSIX ACL rc %d",rc));
+ cFYI(1, ("set POSIX ACL rc %d", rc));
#else
- cFYI(1,("set POSIX ACL not supported"));
+ cFYI(1, ("set POSIX ACL not supported"));
#endif
- } else if(strncmp(ea_name,POSIX_ACL_XATTR_DEFAULT,strlen(POSIX_ACL_XATTR_DEFAULT)) == 0) {
+ } else if (strncmp(ea_name, POSIX_ACL_XATTR_DEFAULT,
+ strlen(POSIX_ACL_XATTR_DEFAULT)) == 0) {
#ifdef CONFIG_CIFS_POSIX
- if(sb->s_flags & MS_POSIXACL)
- rc = CIFSSMBSetPosixACL(xid, pTcon,full_path,
- ea_value, (const int)value_size,
+ if (sb->s_flags & MS_POSIXACL)
+ rc = CIFSSMBSetPosixACL(xid, pTcon, full_path,
+ ea_value, (const int)value_size,
ACL_TYPE_DEFAULT, cifs_sb->local_nls,
- cifs_sb->mnt_cifs_flags &
+ cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
- cFYI(1,("set POSIX default ACL rc %d",rc));
+ cFYI(1, ("set POSIX default ACL rc %d", rc));
#else
- cFYI(1,("set default POSIX ACL not supported"));
+ cFYI(1, ("set default POSIX ACL not supported"));
#endif
} else {
- cFYI(1,("illegal xattr request %s (only user namespace supported)",ea_name));
+ cFYI(1, ("illegal xattr request %s (only user namespace"
+ " supported)", ea_name));
/* BB what if no namespace prefix? */
- /* Should we just pass them to server, except for
+ /* Should we just pass them to server, except for
system and perhaps security prefixes? */
}
}
@@ -195,23 +199,23 @@ set_ea_exit:
return rc;
}
-ssize_t cifs_getxattr(struct dentry * direntry, const char * ea_name,
- void * ea_value, size_t buf_size)
+ssize_t cifs_getxattr(struct dentry *direntry, const char *ea_name,
+ void *ea_value, size_t buf_size)
{
ssize_t rc = -EOPNOTSUPP;
#ifdef CONFIG_CIFS_XATTR
int xid;
struct cifs_sb_info *cifs_sb;
struct cifsTconInfo *pTcon;
- struct super_block * sb;
- char * full_path;
+ struct super_block *sb;
+ char *full_path;
- if(direntry == NULL)
+ if (direntry == NULL)
return -EIO;
- if(direntry->d_inode == NULL)
+ if (direntry->d_inode == NULL)
return -EIO;
sb = direntry->d_inode->i_sb;
- if(sb == NULL)
+ if (sb == NULL)
return -EIO;
xid = GetXid();
@@ -220,42 +224,42 @@ ssize_t cifs_getxattr(struct dentry * direntry, const char * ea_name,
pTcon = cifs_sb->tcon;
full_path = build_path_from_dentry(direntry);
- if(full_path == NULL) {
+ if (full_path == NULL) {
FreeXid(xid);
return -ENOMEM;
}
/* return dos attributes as pseudo xattr */
/* return alt name if available as pseudo attr */
- if(ea_name == NULL) {
- cFYI(1,("Null xattr names not supported"));
- } else if(strncmp(ea_name,CIFS_XATTR_USER_PREFIX,5) == 0) {
- if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
+ if (ea_name == NULL) {
+ cFYI(1, ("Null xattr names not supported"));
+ } else if (strncmp(ea_name, CIFS_XATTR_USER_PREFIX, 5) == 0) {
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
goto get_ea_exit;
- if(strncmp(ea_name,CIFS_XATTR_DOS_ATTRIB,14) == 0) {
- cFYI(1,("attempt to query cifs inode metadata"));
+ if (strncmp(ea_name, CIFS_XATTR_DOS_ATTRIB, 14) == 0) {
+ cFYI(1, ("attempt to query cifs inode metadata"));
/* revalidate/getattr then populate from inode */
} /* BB add else when above is implemented */
ea_name += 5; /* skip past user. prefix */
- rc = CIFSSMBQueryEA(xid,pTcon,full_path,ea_name,ea_value,
+ rc = CIFSSMBQueryEA(xid, pTcon, full_path, ea_name, ea_value,
buf_size, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
- } else if(strncmp(ea_name, CIFS_XATTR_OS2_PREFIX,4) == 0) {
- if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
+ } else if (strncmp(ea_name, CIFS_XATTR_OS2_PREFIX, 4) == 0) {
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
goto get_ea_exit;
ea_name += 4; /* skip past os2. prefix */
- rc = CIFSSMBQueryEA(xid,pTcon,full_path,ea_name,ea_value,
+ rc = CIFSSMBQueryEA(xid, pTcon, full_path, ea_name, ea_value,
buf_size, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
- } else if(strncmp(ea_name,POSIX_ACL_XATTR_ACCESS,
+ } else if (strncmp(ea_name, POSIX_ACL_XATTR_ACCESS,
strlen(POSIX_ACL_XATTR_ACCESS)) == 0) {
#ifdef CONFIG_CIFS_POSIX
- if(sb->s_flags & MS_POSIXACL)
+ if (sb->s_flags & MS_POSIXACL)
rc = CIFSSMBGetPosixACL(xid, pTcon, full_path,
- ea_value, buf_size, ACL_TYPE_ACCESS,
+ ea_value, buf_size, ACL_TYPE_ACCESS,
cifs_sb->local_nls,
- cifs_sb->mnt_cifs_flags &
+ cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
/* else if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) {
__u16 fid;
@@ -272,39 +276,40 @@ ssize_t cifs_getxattr(struct dentry * direntry, const char * ea_name,
CIFSSMBClose(xid, pTcon, fid);
}
} */ /* BB enable after fixing up return data */
-
-#else
- cFYI(1,("query POSIX ACL not supported yet"));
+#else
+ cFYI(1, ("query POSIX ACL not supported yet"));
#endif /* CONFIG_CIFS_POSIX */
- } else if(strncmp(ea_name,POSIX_ACL_XATTR_DEFAULT,
+ } else if (strncmp(ea_name, POSIX_ACL_XATTR_DEFAULT,
strlen(POSIX_ACL_XATTR_DEFAULT)) == 0) {
#ifdef CONFIG_CIFS_POSIX
- if(sb->s_flags & MS_POSIXACL)
+ if (sb->s_flags & MS_POSIXACL)
rc = CIFSSMBGetPosixACL(xid, pTcon, full_path,
- ea_value, buf_size, ACL_TYPE_DEFAULT,
+ ea_value, buf_size, ACL_TYPE_DEFAULT,
cifs_sb->local_nls,
- cifs_sb->mnt_cifs_flags &
+ cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
-#else
- cFYI(1,("query POSIX default ACL not supported yet"));
+#else
+ cFYI(1, ("query POSIX default ACL not supported yet"));
#endif
- } else if(strncmp(ea_name,
- CIFS_XATTR_TRUSTED_PREFIX,XATTR_TRUSTED_PREFIX_LEN) == 0) {
- cFYI(1,("Trusted xattr namespace not supported yet"));
- } else if(strncmp(ea_name,
- CIFS_XATTR_SECURITY_PREFIX,XATTR_SECURITY_PREFIX_LEN) == 0) {
- cFYI(1,("Security xattr namespace not supported yet"));
+ } else if (strncmp(ea_name,
+ CIFS_XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) == 0) {
+ cFYI(1, ("Trusted xattr namespace not supported yet"));
+ } else if (strncmp(ea_name,
+ CIFS_XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) == 0) {
+ cFYI(1, ("Security xattr namespace not supported yet"));
} else {
- cFYI(1,("illegal xattr name request %s (only user namespace supported)",ea_name));
+ cFYI(1,
+ ("illegal xattr request %s (only user namespace supported)",
+ ea_name));
}
- /* We could add an additional check for streams ie
+ /* We could add an additional check for streams ie
if proc/fs/cifs/streamstoxattr is set then
- search server for EAs or streams to
+ search server for EAs or streams to
returns as xattrs */
- if(rc == -EINVAL)
- rc = -EOPNOTSUPP;
+ if (rc == -EINVAL)
+ rc = -EOPNOTSUPP;
get_ea_exit:
kfree(full_path);
@@ -313,34 +318,34 @@ get_ea_exit:
return rc;
}
-ssize_t cifs_listxattr(struct dentry * direntry, char * data, size_t buf_size)
+ssize_t cifs_listxattr(struct dentry *direntry, char *data, size_t buf_size)
{
ssize_t rc = -EOPNOTSUPP;
#ifdef CONFIG_CIFS_XATTR
int xid;
struct cifs_sb_info *cifs_sb;
struct cifsTconInfo *pTcon;
- struct super_block * sb;
- char * full_path;
+ struct super_block *sb;
+ char *full_path;
- if(direntry == NULL)
+ if (direntry == NULL)
return -EIO;
- if(direntry->d_inode == NULL)
+ if (direntry->d_inode == NULL)
return -EIO;
sb = direntry->d_inode->i_sb;
- if(sb == NULL)
+ if (sb == NULL)
return -EIO;
cifs_sb = CIFS_SB(sb);
pTcon = cifs_sb->tcon;
- if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
return -EOPNOTSUPP;
xid = GetXid();
full_path = build_path_from_dentry(direntry);
- if(full_path == NULL) {
+ if (full_path == NULL) {
FreeXid(xid);
return -ENOMEM;
}
@@ -348,11 +353,11 @@ ssize_t cifs_listxattr(struct dentry * direntry, char * data, size_t buf_size)
/* return alt name if available as pseudo attr */
/* if proc/fs/cifs/streamstoxattr is set then
- search server for EAs or streams to
+ search server for EAs or streams to
returns as xattrs */
- rc = CIFSSMBQAllEAs(xid,pTcon,full_path,data,buf_size,
+ rc = CIFSSMBQAllEAs(xid, pTcon, full_path, data, buf_size,
cifs_sb->local_nls,
- cifs_sb->mnt_cifs_flags &
+ cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
kfree(full_path);
diff --git a/fs/coda/cache.c b/fs/coda/cache.c
index fcb88fa..8a23703 100644
--- a/fs/coda/cache.c
+++ b/fs/coda/cache.c
@@ -43,17 +43,12 @@ void coda_cache_enter(struct inode *inode, int mask)
void coda_cache_clear_inode(struct inode *inode)
{
struct coda_inode_info *cii = ITOC(inode);
- cii->c_cached_perm = 0;
+ cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
}
/* remove all acl caches */
void coda_cache_clear_all(struct super_block *sb)
{
- struct coda_sb_info *sbi;
-
- sbi = coda_sbp(sb);
- BUG_ON(!sbi);
-
atomic_inc(&permission_epoch);
}
diff --git a/fs/coda/cnode.c b/fs/coda/cnode.c
index 28c8727..a7a7809 100644
--- a/fs/coda/cnode.c
+++ b/fs/coda/cnode.c
@@ -55,11 +55,6 @@ static int coda_set_inode(struct inode *inode, void *data)
return 0;
}
-static int coda_fail_inode(struct inode *inode, void *data)
-{
- return -1;
-}
-
struct inode * coda_iget(struct super_block * sb, struct CodaFid * fid,
struct coda_vattr * attr)
{
@@ -141,7 +136,7 @@ struct inode *coda_fid_to_inode(struct CodaFid *fid, struct super_block *sb)
return NULL;
}
- inode = iget5_locked(sb, hash, coda_test_inode, coda_fail_inode, fid);
+ inode = ilookup5(sb, hash, coda_test_inode, fid);
if ( !inode )
return NULL;
diff --git a/fs/coda/coda_int.h b/fs/coda/coda_int.h
index 9e6338f..8ccd5ed 100644
--- a/fs/coda/coda_int.h
+++ b/fs/coda/coda_int.h
@@ -1,12 +1,19 @@
#ifndef _CODA_INT_
#define _CODA_INT_
+struct dentry;
+
extern struct file_system_type coda_fs_type;
+extern unsigned long coda_timeout;
+extern int coda_hard;
+extern int coda_fake_statfs;
void coda_destroy_inodecache(void);
int coda_init_inodecache(void);
int coda_fsync(struct file *coda_file, struct dentry *coda_dentry,
int datasync);
+void coda_sysctl_init(void);
+void coda_sysctl_clean(void);
#endif /* _CODA_INT_ */
diff --git a/fs/coda/dir.c b/fs/coda/dir.c
index 898a86d..f89ff08 100644
--- a/fs/coda/dir.c
+++ b/fs/coda/dir.c
@@ -25,7 +25,6 @@
#include <linux/coda_psdev.h>
#include <linux/coda_fs_i.h>
#include <linux/coda_cache.h>
-#include <linux/coda_proc.h>
#include "coda_int.h"
@@ -43,15 +42,15 @@ static int coda_rename(struct inode *old_inode, struct dentry *old_dentry,
struct inode *new_inode, struct dentry *new_dentry);
/* dir file-ops */
-static int coda_readdir(struct file *file, void *dirent, filldir_t filldir);
+static int coda_readdir(struct file *file, void *buf, filldir_t filldir);
/* dentry ops */
static int coda_dentry_revalidate(struct dentry *de, struct nameidata *nd);
static int coda_dentry_delete(struct dentry *);
/* support routines */
-static int coda_venus_readdir(struct file *filp, filldir_t filldir,
- void *dirent, struct dentry *dir);
+static int coda_venus_readdir(struct file *coda_file, void *buf,
+ filldir_t filldir);
/* same as fs/bad_inode.c */
static int coda_return_EIO(void)
@@ -87,7 +86,6 @@ const struct file_operations coda_dir_operations = {
.read = generic_read_dir,
.readdir = coda_readdir,
.open = coda_open,
- .flush = coda_flush,
.release = coda_release,
.fsync = coda_fsync,
};
@@ -97,58 +95,45 @@ const struct file_operations coda_dir_operations = {
/* access routines: lookup, readlink, permission */
static struct dentry *coda_lookup(struct inode *dir, struct dentry *entry, struct nameidata *nd)
{
- struct inode *res_inode = NULL;
+ struct inode *inode = NULL;
struct CodaFid resfid = { { 0, } };
- int dropme = 0; /* to indicate entry should not be cached */
int type = 0;
int error = 0;
const char *name = entry->d_name.name;
size_t length = entry->d_name.len;
-
- if ( length > CODA_MAXNAMLEN ) {
- printk("name too long: lookup, %s (%*s)\n",
+
+ if (length > CODA_MAXNAMLEN) {
+ printk(KERN_ERR "name too long: lookup, %s (%*s)\n",
coda_i2s(dir), (int)length, name);
return ERR_PTR(-ENAMETOOLONG);
}
+ /* control object, create inode on the fly */
+ if (coda_isroot(dir) && coda_iscontrol(name, length)) {
+ error = coda_cnode_makectl(&inode, dir->i_sb);
+ type = CODA_NOCACHE;
+ goto exit;
+ }
+
lock_kernel();
- /* control object, create inode on the fly */
- if (coda_isroot(dir) && coda_iscontrol(name, length)) {
- error = coda_cnode_makectl(&res_inode, dir->i_sb);
- dropme = 1;
- goto exit;
- }
- error = venus_lookup(dir->i_sb, coda_i2f(dir),
- (const char *)name, length, &type, &resfid);
+ error = venus_lookup(dir->i_sb, coda_i2f(dir), name, length,
+ &type, &resfid);
+ if (!error)
+ error = coda_cnode_make(&inode, &resfid, dir->i_sb);
- res_inode = NULL;
- if (!error) {
- if (type & CODA_NOCACHE) {
- type &= (~CODA_NOCACHE);
- dropme = 1;
- }
+ unlock_kernel();
- error = coda_cnode_make(&res_inode, &resfid, dir->i_sb);
- if (error) {
- unlock_kernel();
- return ERR_PTR(error);
- }
- } else if (error != -ENOENT) {
- unlock_kernel();
+ if (error && error != -ENOENT)
return ERR_PTR(error);
- }
exit:
- entry->d_time = 0;
entry->d_op = &coda_dentry_operations;
- d_add(entry, res_inode);
- if ( dropme ) {
- d_drop(entry);
- coda_flag_inode(res_inode, C_VATTR);
- }
- unlock_kernel();
- return NULL;
+
+ if (inode && (type & CODA_NOCACHE))
+ coda_flag_inode(inode, C_VATTR | C_PURGE);
+
+ return d_splice_alias(inode, entry);
}
@@ -161,8 +146,6 @@ int coda_permission(struct inode *inode, int mask, struct nameidata *nd)
lock_kernel();
- coda_vfs_stat.permission++;
-
if (coda_cache_check(inode, mask))
goto out;
@@ -173,12 +156,11 @@ int coda_permission(struct inode *inode, int mask, struct nameidata *nd)
out:
unlock_kernel();
-
- return error;
+ return error;
}
-static inline void coda_dir_changed(struct inode *dir, int link)
+static inline void coda_dir_update_mtime(struct inode *dir)
{
#ifdef REQUERY_VENUS_FOR_MTIME
/* invalidate the directory cnode's attributes so we refetch the
@@ -186,12 +168,27 @@ static inline void coda_dir_changed(struct inode *dir, int link)
coda_flag_inode(dir, C_VATTR);
#else
/* optimistically we can also act as if our nose bleeds. The
- * granularity of the mtime is coarse anyways so we might actually be
- * right most of the time. Note: we only do this for directories. */
+ * granularity of the mtime is coarse anyways so we might actually be
+ * right most of the time. Note: we only do this for directories. */
dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
#endif
- if (link)
- dir->i_nlink += link;
+}
+
+/* we have to wrap inc_nlink/drop_nlink because sometimes userspace uses a
+ * trick to fool GNU find's optimizations. If we can't be sure of the link
+ * (because of volume mount points) we set i_nlink to 1 which forces find
+ * to consider every child as a possible directory. We should also never
+ * see an increment or decrement for deleted directories where i_nlink == 0 */
+static inline void coda_dir_inc_nlink(struct inode *dir)
+{
+ if (dir->i_nlink >= 2)
+ inc_nlink(dir);
+}
+
+static inline void coda_dir_drop_nlink(struct inode *dir)
+{
+ if (dir->i_nlink > 2)
+ drop_nlink(dir);
}
/* creation routines: create, mknod, mkdir, link, symlink */
@@ -205,7 +202,6 @@ static int coda_create(struct inode *dir, struct dentry *de, int mode, struct na
struct coda_vattr attrs;
lock_kernel();
- coda_vfs_stat.create++;
if (coda_isroot(dir) && coda_iscontrol(name, length)) {
unlock_kernel();
@@ -229,10 +225,10 @@ static int coda_create(struct inode *dir, struct dentry *de, int mode, struct na
}
/* invalidate the directory cnode's attributes */
- coda_dir_changed(dir, 0);
+ coda_dir_update_mtime(dir);
unlock_kernel();
d_instantiate(de, inode);
- return 0;
+ return 0;
}
static int coda_mkdir(struct inode *dir, struct dentry *de, int mode)
@@ -245,7 +241,6 @@ static int coda_mkdir(struct inode *dir, struct dentry *de, int mode)
struct CodaFid newfid;
lock_kernel();
- coda_vfs_stat.mkdir++;
if (coda_isroot(dir) && coda_iscontrol(name, len)) {
unlock_kernel();
@@ -268,12 +263,13 @@ static int coda_mkdir(struct inode *dir, struct dentry *de, int mode)
d_drop(de);
return PTR_ERR(inode);
}
-
+
/* invalidate the directory cnode's attributes */
- coda_dir_changed(dir, 1);
+ coda_dir_inc_nlink(dir);
+ coda_dir_update_mtime(dir);
unlock_kernel();
d_instantiate(de, inode);
- return 0;
+ return 0;
}
/* try to make de an entry in dir_inodde linked to source_de */
@@ -286,7 +282,6 @@ static int coda_link(struct dentry *source_de, struct inode *dir_inode,
int error;
lock_kernel();
- coda_vfs_stat.link++;
if (coda_isroot(dir_inode) && coda_iscontrol(name, len)) {
unlock_kernel();
@@ -296,16 +291,16 @@ static int coda_link(struct dentry *source_de, struct inode *dir_inode,
error = venus_link(dir_inode->i_sb, coda_i2f(inode),
coda_i2f(dir_inode), (const char *)name, len);
- if (error) {
+ if (error) {
d_drop(de);
goto out;
}
- coda_dir_changed(dir_inode, 0);
+ coda_dir_update_mtime(dir_inode);
atomic_inc(&inode->i_count);
d_instantiate(de, inode);
inc_nlink(inode);
-
+
out:
unlock_kernel();
return(error);
@@ -318,10 +313,9 @@ static int coda_symlink(struct inode *dir_inode, struct dentry *de,
const char *name = de->d_name.name;
int len = de->d_name.len;
int symlen;
- int error=0;
-
+ int error = 0;
+
lock_kernel();
- coda_vfs_stat.symlink++;
if (coda_isroot(dir_inode) && coda_iscontrol(name, len)) {
unlock_kernel();
@@ -336,18 +330,18 @@ static int coda_symlink(struct inode *dir_inode, struct dentry *de,
/*
* This entry is now negative. Since we do not create
- * an inode for the entry we have to drop it.
+ * an inode for the entry we have to drop it.
*/
d_drop(de);
- error = venus_symlink(dir_inode->i_sb, coda_i2f(dir_inode), name, len,
+ error = venus_symlink(dir_inode->i_sb, coda_i2f(dir_inode), name, len,
symname, symlen);
/* mtime is no good anymore */
if ( !error )
- coda_dir_changed(dir_inode, 0);
+ coda_dir_update_mtime(dir_inode);
unlock_kernel();
- return error;
+ return error;
}
/* destruction routines: unlink, rmdir */
@@ -358,79 +352,70 @@ int coda_unlink(struct inode *dir, struct dentry *de)
int len = de->d_name.len;
lock_kernel();
- coda_vfs_stat.unlink++;
- error = venus_remove(dir->i_sb, coda_i2f(dir), name, len);
- if ( error ) {
+ error = venus_remove(dir->i_sb, coda_i2f(dir), name, len);
+ if ( error ) {
unlock_kernel();
- return error;
- }
+ return error;
+ }
- coda_dir_changed(dir, 0);
+ coda_dir_update_mtime(dir);
drop_nlink(de->d_inode);
unlock_kernel();
-
- return 0;
+ return 0;
}
int coda_rmdir(struct inode *dir, struct dentry *de)
{
const char *name = de->d_name.name;
int len = de->d_name.len;
- int error;
+ int error;
lock_kernel();
- coda_vfs_stat.rmdir++;
- if (!d_unhashed(de)) {
- unlock_kernel();
- return -EBUSY;
- }
error = venus_rmdir(dir->i_sb, coda_i2f(dir), name, len);
+ if (!error) {
+ /* VFS may delete the child */
+ if (de->d_inode)
+ de->d_inode->i_nlink = 0;
- if ( error ) {
- unlock_kernel();
- return error;
- }
-
- coda_dir_changed(dir, -1);
- drop_nlink(de->d_inode);
- d_delete(de);
+ /* fix the link count of the parent */
+ coda_dir_drop_nlink(dir);
+ coda_dir_update_mtime(dir);
+ }
unlock_kernel();
-
- return 0;
+ return error;
}
/* rename */
-static int coda_rename(struct inode *old_dir, struct dentry *old_dentry,
+static int coda_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry)
{
- const char *old_name = old_dentry->d_name.name;
- const char *new_name = new_dentry->d_name.name;
+ const char *old_name = old_dentry->d_name.name;
+ const char *new_name = new_dentry->d_name.name;
int old_length = old_dentry->d_name.len;
int new_length = new_dentry->d_name.len;
- int link_adjust = 0;
- int error;
+ int error;
lock_kernel();
- coda_vfs_stat.rename++;
- error = venus_rename(old_dir->i_sb, coda_i2f(old_dir),
- coda_i2f(new_dir), old_length, new_length,
+ error = venus_rename(old_dir->i_sb, coda_i2f(old_dir),
+ coda_i2f(new_dir), old_length, new_length,
(const char *) old_name, (const char *)new_name);
- if ( !error ) {
+ if ( !error ) {
if ( new_dentry->d_inode ) {
- if ( S_ISDIR(new_dentry->d_inode->i_mode) )
- link_adjust = 1;
-
- coda_dir_changed(old_dir, -link_adjust);
- coda_dir_changed(new_dir, link_adjust);
+ if ( S_ISDIR(new_dentry->d_inode->i_mode) ) {
+ coda_dir_drop_nlink(old_dir);
+ coda_dir_inc_nlink(new_dir);
+ }
+ coda_dir_update_mtime(old_dir);
+ coda_dir_update_mtime(new_dir);
coda_flag_inode(new_dentry->d_inode, C_VATTR);
} else {
coda_flag_inode(old_dir, C_VATTR);
coda_flag_inode(new_dir, C_VATTR);
- }
+ }
}
unlock_kernel();
@@ -439,44 +424,41 @@ static int coda_rename(struct inode *old_dir, struct dentry *old_dentry,
/* file operations for directories */
-int coda_readdir(struct file *coda_file, void *dirent, filldir_t filldir)
+int coda_readdir(struct file *coda_file, void *buf, filldir_t filldir)
{
- struct dentry *coda_dentry = coda_file->f_path.dentry;
struct coda_file_info *cfi;
struct file *host_file;
- struct inode *host_inode;
int ret;
cfi = CODA_FTOC(coda_file);
BUG_ON(!cfi || cfi->cfi_magic != CODA_MAGIC);
host_file = cfi->cfi_container;
- coda_vfs_stat.readdir++;
-
- host_inode = host_file->f_path.dentry->d_inode;
- mutex_lock(&host_inode->i_mutex);
- host_file->f_pos = coda_file->f_pos;
-
- if (!host_file->f_op->readdir) {
- /* Venus: we must read Venus dirents from the file */
- ret = coda_venus_readdir(host_file, filldir, dirent, coda_dentry);
- } else {
- /* potemkin case: we were handed a directory inode. */
- /* Yuk, we can't call vfs_readdir because we are already
- * holding the inode semaphore. */
- ret = -ENOTDIR;
- if (!host_file->f_op || !host_file->f_op->readdir)
- goto out;
+ if (!host_file->f_op)
+ return -ENOTDIR;
+
+ if (host_file->f_op->readdir)
+ {
+ /* potemkin case: we were handed a directory inode.
+ * We can't use vfs_readdir because we have to keep the file
+ * position in sync between the coda_file and the host_file.
+ * and as such we need grab the inode mutex. */
+ struct inode *host_inode = host_file->f_path.dentry->d_inode;
+
+ mutex_lock(&host_inode->i_mutex);
+ host_file->f_pos = coda_file->f_pos;
ret = -ENOENT;
if (!IS_DEADDIR(host_inode)) {
- ret = host_file->f_op->readdir(host_file, dirent, filldir);
+ ret = host_file->f_op->readdir(host_file, buf, filldir);
file_accessed(host_file);
}
+
+ coda_file->f_pos = host_file->f_pos;
+ mutex_unlock(&host_inode->i_mutex);
}
-out:
- coda_file->f_pos = host_file->f_pos;
- mutex_unlock(&host_inode->i_mutex);
+ else /* Venus: we must read Venus dirents from a file */
+ ret = coda_venus_readdir(coda_file, buf, filldir);
return ret;
}
@@ -501,57 +483,68 @@ static inline unsigned int CDT2DT(unsigned char cdt)
}
/* support routines */
-static int coda_venus_readdir(struct file *filp, filldir_t filldir,
- void *dirent, struct dentry *dir)
+static int coda_venus_readdir(struct file *coda_file, void *buf,
+ filldir_t filldir)
{
int result = 0; /* # of entries returned */
+ struct coda_file_info *cfi;
+ struct coda_inode_info *cii;
+ struct file *host_file;
+ struct dentry *de;
struct venus_dirent *vdir;
unsigned long vdir_size =
(unsigned long)(&((struct venus_dirent *)0)->d_name);
unsigned int type;
struct qstr name;
ino_t ino;
- int ret, i;
+ int ret;
+
+ cfi = CODA_FTOC(coda_file);
+ BUG_ON(!cfi || cfi->cfi_magic != CODA_MAGIC);
+ host_file = cfi->cfi_container;
+
+ de = coda_file->f_path.dentry;
+ cii = ITOC(de->d_inode);
vdir = kmalloc(sizeof(*vdir), GFP_KERNEL);
if (!vdir) return -ENOMEM;
- i = filp->f_pos;
- switch(i) {
- case 0:
- ret = filldir(dirent, ".", 1, 0, dir->d_inode->i_ino, DT_DIR);
- if (ret < 0) break;
+ if (coda_file->f_pos == 0) {
+ ret = filldir(buf, ".", 1, 0, de->d_inode->i_ino, DT_DIR);
+ if (ret < 0)
+ goto out;
result++;
- filp->f_pos++;
- /* fallthrough */
- case 1:
- ret = filldir(dirent, "..", 2, 1, dir->d_parent->d_inode->i_ino, DT_DIR);
- if (ret < 0) break;
+ coda_file->f_pos++;
+ }
+ if (coda_file->f_pos == 1) {
+ ret = filldir(buf, "..", 2, 1, de->d_parent->d_inode->i_ino, DT_DIR);
+ if (ret < 0)
+ goto out;
result++;
- filp->f_pos++;
- /* fallthrough */
- default:
+ coda_file->f_pos++;
+ }
while (1) {
/* read entries from the directory file */
- ret = kernel_read(filp, filp->f_pos - 2, (char *)vdir,
+ ret = kernel_read(host_file, coda_file->f_pos - 2, (char *)vdir,
sizeof(*vdir));
if (ret < 0) {
- printk("coda_venus_readdir: read dir failed %d\n", ret);
+ printk(KERN_ERR "coda readdir: read dir %s failed %d\n",
+ coda_f2s(&cii->c_fid), ret);
break;
}
if (ret == 0) break; /* end of directory file reached */
/* catch truncated reads */
if (ret < vdir_size || ret < vdir_size + vdir->d_namlen) {
- printk("coda_venus_readdir: short read: %ld\n",
- filp->f_path.dentry->d_inode->i_ino);
+ printk(KERN_ERR "coda readdir: short read on %s\n",
+ coda_f2s(&cii->c_fid));
ret = -EBADF;
break;
}
/* validate whether the directory file actually makes sense */
if (vdir->d_reclen < vdir_size + vdir->d_namlen) {
- printk("coda_venus_readdir: Invalid dir: %ld\n",
- filp->f_path.dentry->d_inode->i_ino);
+ printk(KERN_ERR "coda readdir: invalid dir %s\n",
+ coda_f2s(&cii->c_fid));
ret = -EBADF;
break;
}
@@ -570,21 +563,21 @@ static int coda_venus_readdir(struct file *filp, filldir_t filldir,
* userspace doesn't have to worry about breaking
* getcwd by having mismatched inode numbers for
* internal volume mountpoints. */
- ino = find_inode_number(dir, &name);
+ ino = find_inode_number(de, &name);
if (!ino) ino = vdir->d_fileno;
type = CDT2DT(vdir->d_type);
- ret = filldir(dirent, name.name, name.len, filp->f_pos,
- ino, type);
+ ret = filldir(buf, name.name, name.len,
+ coda_file->f_pos, ino, type);
/* failure means no space for filling in this round */
if (ret < 0) break;
result++;
}
/* we'll always have progress because d_reclen is unsigned and
* we've already established it is non-zero. */
- filp->f_pos += vdir->d_reclen;
+ coda_file->f_pos += vdir->d_reclen;
}
- }
+out:
kfree(vdir);
return result ? result : ret;
}
diff --git a/fs/coda/file.c b/fs/coda/file.c
index 99dbe86..29137ff 100644
--- a/fs/coda/file.c
+++ b/fs/coda/file.c
@@ -22,14 +22,9 @@
#include <linux/coda_linux.h>
#include <linux/coda_fs_i.h>
#include <linux/coda_psdev.h>
-#include <linux/coda_proc.h>
#include "coda_int.h"
-/* if CODA_STORE fails with EOPNOTSUPP, venus clearly doesn't support
- * CODA_STORE/CODA_RELEASE and we fall back on using the CODA_CLOSE upcall */
-static int use_coda_close;
-
static ssize_t
coda_file_read(struct file *coda_file, char __user *buf, size_t count, loff_t *ppos)
{
@@ -134,8 +129,6 @@ int coda_open(struct inode *coda_inode, struct file *coda_file)
unsigned short coda_flags = coda_flags_to_cflags(flags);
struct coda_file_info *cfi;
- coda_vfs_stat.open++;
-
cfi = kmalloc(sizeof(struct coda_file_info), GFP_KERNEL);
if (!cfi)
return -ENOMEM;
@@ -143,8 +136,11 @@ int coda_open(struct inode *coda_inode, struct file *coda_file)
lock_kernel();
error = venus_open(coda_inode->i_sb, coda_i2f(coda_inode), coda_flags,
- &host_file);
- if (error || !host_file) {
+ &host_file);
+ if (!host_file)
+ error = -EIO;
+
+ if (error) {
kfree(cfi);
unlock_kernel();
return error;
@@ -163,49 +159,6 @@ int coda_open(struct inode *coda_inode, struct file *coda_file)
return 0;
}
-int coda_flush(struct file *coda_file, fl_owner_t id)
-{
- unsigned short flags = coda_file->f_flags & ~O_EXCL;
- unsigned short coda_flags = coda_flags_to_cflags(flags);
- struct coda_file_info *cfi;
- struct inode *coda_inode;
- int err = 0, fcnt;
-
- lock_kernel();
-
- coda_vfs_stat.flush++;
-
- /* last close semantics */
- fcnt = file_count(coda_file);
- if (fcnt > 1)
- goto out;
-
- /* No need to make an upcall when we have not made any modifications
- * to the file */
- if ((coda_file->f_flags & O_ACCMODE) == O_RDONLY)
- goto out;
-
- if (use_coda_close)
- goto out;
-
- cfi = CODA_FTOC(coda_file);
- BUG_ON(!cfi || cfi->cfi_magic != CODA_MAGIC);
-
- coda_inode = coda_file->f_path.dentry->d_inode;
-
- err = venus_store(coda_inode->i_sb, coda_i2f(coda_inode), coda_flags,
- coda_file->f_uid);
-
- if (err == -EOPNOTSUPP) {
- use_coda_close = 1;
- err = 0;
- }
-
-out:
- unlock_kernel();
- return err;
-}
-
int coda_release(struct inode *coda_inode, struct file *coda_file)
{
unsigned short flags = (coda_file->f_flags) & (~O_EXCL);
@@ -216,23 +169,12 @@ int coda_release(struct inode *coda_inode, struct file *coda_file)
int err = 0;
lock_kernel();
- coda_vfs_stat.release++;
-
- if (!use_coda_close) {
- err = venus_release(coda_inode->i_sb, coda_i2f(coda_inode),
- coda_flags);
- if (err == -EOPNOTSUPP) {
- use_coda_close = 1;
- err = 0;
- }
- }
cfi = CODA_FTOC(coda_file);
BUG_ON(!cfi || cfi->cfi_magic != CODA_MAGIC);
- if (use_coda_close)
- err = venus_close(coda_inode->i_sb, coda_i2f(coda_inode),
- coda_flags, coda_file->f_uid);
+ err = venus_close(coda_inode->i_sb, coda_i2f(coda_inode),
+ coda_flags, coda_file->f_uid);
host_inode = cfi->cfi_container->f_path.dentry->d_inode;
cii = ITOC(coda_inode);
@@ -249,7 +191,10 @@ int coda_release(struct inode *coda_inode, struct file *coda_file)
coda_file->private_data = NULL;
unlock_kernel();
- return err;
+
+ /* VFS fput ignores the return value from file_operations->release, so
+ * there is no use returning an error here */
+ return 0;
}
int coda_fsync(struct file *coda_file, struct dentry *coda_dentry, int datasync)
@@ -268,8 +213,6 @@ int coda_fsync(struct file *coda_file, struct dentry *coda_dentry, int datasync)
BUG_ON(!cfi || cfi->cfi_magic != CODA_MAGIC);
host_file = cfi->cfi_container;
- coda_vfs_stat.fsync++;
-
if (host_file->f_op && host_file->f_op->fsync) {
host_dentry = host_file->f_path.dentry;
host_inode = host_dentry->d_inode;
@@ -293,7 +236,6 @@ const struct file_operations coda_file_operations = {
.write = coda_file_write,
.mmap = coda_file_mmap,
.open = coda_open,
- .flush = coda_flush,
.release = coda_release,
.fsync = coda_fsync,
.splice_read = coda_file_splice_read,
diff --git a/fs/coda/inode.c b/fs/coda/inode.c
index dbff1bd..342f4e0 100644
--- a/fs/coda/inode.c
+++ b/fs/coda/inode.c
@@ -64,13 +64,13 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
inode_init_once(&ei->vfs_inode);
}
-
+
int coda_init_inodecache(void)
{
coda_inode_cachep = kmem_cache_create("coda_inode_cache",
sizeof(struct coda_inode_info),
0, SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
- init_once, NULL);
+ init_once);
if (coda_inode_cachep == NULL)
return -ENOMEM;
return 0;
@@ -83,7 +83,7 @@ void coda_destroy_inodecache(void)
static int coda_remount(struct super_block *sb, int *flags, char *data)
{
- *flags |= MS_NODIRATIME;
+ *flags |= MS_NOATIME;
return 0;
}
@@ -141,11 +141,10 @@ static int get_device_index(struct coda_mount_data *data)
static int coda_fill_super(struct super_block *sb, void *data, int silent)
{
- struct inode *root = NULL;
- struct coda_sb_info *sbi = NULL;
+ struct inode *root = NULL;
struct venus_comm *vc = NULL;
struct CodaFid fid;
- int error;
+ int error;
int idx;
idx = get_device_index((struct coda_mount_data *) data);
@@ -167,21 +166,14 @@ static int coda_fill_super(struct super_block *sb, void *data, int silent)
return -EBUSY;
}
- sbi = kmalloc(sizeof(struct coda_sb_info), GFP_KERNEL);
- if(!sbi) {
- return -ENOMEM;
- }
-
vc->vc_sb = sb;
- sbi->sbi_vcomm = vc;
-
- sb->s_fs_info = sbi;
- sb->s_flags |= MS_NODIRATIME; /* probably even noatime */
- sb->s_blocksize = 1024; /* XXXXX what do we put here?? */
- sb->s_blocksize_bits = 10;
- sb->s_magic = CODA_SUPER_MAGIC;
- sb->s_op = &coda_super_operations;
+ sb->s_fs_info = vc;
+ sb->s_flags |= MS_NOATIME;
+ sb->s_blocksize = 4096; /* XXXXX what do we put here?? */
+ sb->s_blocksize_bits = 12;
+ sb->s_magic = CODA_SUPER_MAGIC;
+ sb->s_op = &coda_super_operations;
/* get root fid from Venus: this needs the root inode */
error = venus_rootfid(sb, &fid);
@@ -207,26 +199,20 @@ static int coda_fill_super(struct super_block *sb, void *data, int silent)
return 0;
error:
- if (sbi) {
- kfree(sbi);
- if(vc)
- vc->vc_sb = NULL;
- }
if (root)
- iput(root);
+ iput(root);
+ if (vc)
+ vc->vc_sb = NULL;
- return -EINVAL;
+ return -EINVAL;
}
static void coda_put_super(struct super_block *sb)
{
- struct coda_sb_info *sbi;
-
- sbi = coda_sbp(sb);
- sbi->sbi_vcomm->vc_sb = NULL;
+ coda_vcp(sb)->vc_sb = NULL;
+ sb->s_fs_info = NULL;
printk("Coda: Bye bye.\n");
- kfree(sbi);
}
static void coda_clear_inode(struct inode *inode)
@@ -296,7 +282,7 @@ static int coda_statfs(struct dentry *dentry, struct kstatfs *buf)
/* and fill in the rest */
buf->f_type = CODA_SUPER_MAGIC;
- buf->f_bsize = 1024;
+ buf->f_bsize = 4096;
buf->f_namelen = CODA_MAXNAMLEN;
return 0;
diff --git a/fs/coda/psdev.c b/fs/coda/psdev.c
index 803aacf..dcc6aea 100644
--- a/fs/coda/psdev.c
+++ b/fs/coda/psdev.c
@@ -45,12 +45,9 @@
#include <linux/coda_linux.h>
#include <linux/coda_fs_i.h>
#include <linux/coda_psdev.h>
-#include <linux/coda_proc.h>
#include "coda_int.h"
-#define upc_free(r) kfree(r)
-
/* statistics */
int coda_hard; /* allows signals during upcalls */
unsigned long coda_timeout = 30; /* .. secs, then signals will dequeue */
@@ -195,7 +192,8 @@ static ssize_t coda_psdev_write(struct file *file, const char __user *buf,
if (req->uc_opcode == CODA_OPEN_BY_FD) {
struct coda_open_by_fd_out *outp =
(struct coda_open_by_fd_out *)req->uc_data;
- outp->fh = fget(outp->fd);
+ if (!outp->oh.result)
+ outp->fh = fget(outp->fd);
}
wake_up(&req->uc_sleep);
@@ -263,7 +261,7 @@ static ssize_t coda_psdev_read(struct file * file, char __user * buf,
}
CODA_FREE(req->uc_data, sizeof(struct coda_in_hdr));
- upc_free(req);
+ kfree(req);
out:
unlock_kernel();
return (count ? count : retval);
@@ -271,71 +269,70 @@ out:
static int coda_psdev_open(struct inode * inode, struct file * file)
{
- struct venus_comm *vcp;
- int idx;
+ struct venus_comm *vcp;
+ int idx, err;
- lock_kernel();
idx = iminor(inode);
- if(idx >= MAX_CODADEVS) {
- unlock_kernel();
+ if (idx < 0 || idx >= MAX_CODADEVS)
return -ENODEV;
- }
+ lock_kernel();
+
+ err = -EBUSY;
vcp = &coda_comms[idx];
- if(vcp->vc_inuse) {
- unlock_kernel();
- return -EBUSY;
- }
-
- if (!vcp->vc_inuse++) {
+ if (!vcp->vc_inuse) {
+ vcp->vc_inuse++;
+
INIT_LIST_HEAD(&vcp->vc_pending);
INIT_LIST_HEAD(&vcp->vc_processing);
init_waitqueue_head(&vcp->vc_waitq);
vcp->vc_sb = NULL;
vcp->vc_seq = 0;
+
+ file->private_data = vcp;
+ err = 0;
}
-
- file->private_data = vcp;
unlock_kernel();
- return 0;
+ return err;
}
static int coda_psdev_release(struct inode * inode, struct file * file)
{
- struct venus_comm *vcp = (struct venus_comm *) file->private_data;
- struct upc_req *req, *tmp;
+ struct venus_comm *vcp = (struct venus_comm *) file->private_data;
+ struct upc_req *req, *tmp;
- lock_kernel();
- if ( !vcp->vc_inuse ) {
- unlock_kernel();
+ if (!vcp || !vcp->vc_inuse ) {
printk("psdev_release: Not open.\n");
return -1;
}
- if (--vcp->vc_inuse) {
- unlock_kernel();
- return 0;
- }
-
- /* Wakeup clients so they can return. */
+ lock_kernel();
+
+ /* Wakeup clients so they can return. */
list_for_each_entry_safe(req, tmp, &vcp->vc_pending, uc_chain) {
+ list_del(&req->uc_chain);
+
/* Async requests need to be freed here */
if (req->uc_flags & REQ_ASYNC) {
CODA_FREE(req->uc_data, sizeof(struct coda_in_hdr));
- upc_free(req);
+ kfree(req);
continue;
}
req->uc_flags |= REQ_ABORT;
wake_up(&req->uc_sleep);
- }
-
- list_for_each_entry(req, &vcp->vc_processing, uc_chain) {
+ }
+
+ list_for_each_entry_safe(req, tmp, &vcp->vc_processing, uc_chain) {
+ list_del(&req->uc_chain);
+
req->uc_flags |= REQ_ABORT;
- wake_up(&req->uc_sleep);
- }
+ wake_up(&req->uc_sleep);
+ }
+ file->private_data = NULL;
+ vcp->vc_inuse--;
unlock_kernel();
return 0;
}
@@ -376,21 +373,20 @@ out:
return err;
}
-
-MODULE_AUTHOR("Peter J. Braam <braam@cs.cmu.edu>");
+MODULE_AUTHOR("Jan Harkes, Peter J. Braam");
+MODULE_DESCRIPTION("Coda Distributed File System VFS interface");
+MODULE_ALIAS_CHARDEV_MAJOR(CODA_PSDEV_MAJOR);
MODULE_LICENSE("GPL");
+#ifdef CONFIG_CODA_FS_OLD_API
+MODULE_VERSION("5.3.21");
+#else
+MODULE_VERSION("6.6");
+#endif
static int __init init_coda(void)
{
int status;
int i;
- printk(KERN_INFO "Coda Kernel/Venus communications, "
-#ifdef CONFIG_CODA_FS_OLD_API
- "v5.3.20"
-#else
- "v6.0.0"
-#endif
- ", coda@cs.cmu.edu\n");
status = coda_init_inodecache();
if (status)
diff --git a/fs/coda/symlink.c b/fs/coda/symlink.c
index 76e00a6..4513b7258 100644
--- a/fs/coda/symlink.c
+++ b/fs/coda/symlink.c
@@ -20,7 +20,6 @@
#include <linux/coda_linux.h>
#include <linux/coda_psdev.h>
#include <linux/coda_fs_i.h>
-#include <linux/coda_proc.h>
static int coda_symlink_filler(struct file *file, struct page *page)
{
@@ -32,7 +31,6 @@ static int coda_symlink_filler(struct file *file, struct page *page)
lock_kernel();
cii = ITOC(inode);
- coda_vfs_stat.follow_link++;
error = venus_readlink(inode->i_sb, &cii->c_fid, p, &len);
unlock_kernel();
diff --git a/fs/coda/sysctl.c b/fs/coda/sysctl.c
index c57a1fa..81b7771 100644
--- a/fs/coda/sysctl.c
+++ b/fs/coda/sysctl.c
@@ -5,181 +5,14 @@
*
* Carnegie Mellon encourages users to contribute improvements to
* the Coda project. Contact Peter Braam (coda@cs.cmu.edu).
- *
- * CODA operation statistics
- * (c) March, 1998 Zhanyong Wan <zhanyong.wan@yale.edu>
- *
*/
-#include <linux/time.h>
-#include <linux/mm.h>
#include <linux/sysctl.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/slab.h>
-#include <linux/stat.h>
-#include <linux/ctype.h>
-#include <linux/bitops.h>
-#include <asm/uaccess.h>
-#include <linux/utsname.h>
-#include <linux/module.h>
-#include <linux/coda.h>
-#include <linux/coda_linux.h>
-#include <linux/coda_fs_i.h>
-#include <linux/coda_psdev.h>
-#include <linux/coda_cache.h>
-#include <linux/coda_proc.h>
+#include "coda_int.h"
static struct ctl_table_header *fs_table_header;
-#define CODA_TIMEOUT 3 /* timeout on upcalls to become intrble */
-#define CODA_HARD 5 /* mount type "hard" or "soft" */
-#define CODA_VFS 6 /* vfs statistics */
-#define CODA_CACHE_INV 9 /* cache invalidation statistics */
-#define CODA_FAKE_STATFS 10 /* don't query venus for actual cache usage */
-
-struct coda_vfs_stats coda_vfs_stat;
-static struct coda_cache_inv_stats coda_cache_inv_stat;
-
-static void reset_coda_vfs_stats( void )
-{
- memset( &coda_vfs_stat, 0, sizeof( coda_vfs_stat ) );
-}
-
-static void reset_coda_cache_inv_stats( void )
-{
- memset( &coda_cache_inv_stat, 0, sizeof( coda_cache_inv_stat ) );
-}
-
-static int do_reset_coda_vfs_stats( ctl_table * table, int write,
- struct file * filp, void __user * buffer,
- size_t * lenp, loff_t * ppos )
-{
- if ( write ) {
- reset_coda_vfs_stats();
-
- *ppos += *lenp;
- } else {
- *lenp = 0;
- }
-
- return 0;
-}
-
-static int do_reset_coda_cache_inv_stats( ctl_table * table, int write,
- struct file * filp,
- void __user * buffer,
- size_t * lenp, loff_t * ppos )
-{
- if ( write ) {
- reset_coda_cache_inv_stats();
-
- *ppos += *lenp;
- } else {
- *lenp = 0;
- }
-
- return 0;
-}
-
-static int proc_vfs_stats_show(struct seq_file *m, void *v)
-{
- struct coda_vfs_stats * ps = & coda_vfs_stat;
-
- seq_printf(m,
- "Coda VFS statistics\n"
- "===================\n\n"
- "File Operations:\n"
- "\topen\t\t%9d\n"
- "\tflush\t\t%9d\n"
- "\trelease\t\t%9d\n"
- "\tfsync\t\t%9d\n\n"
- "Dir Operations:\n"
- "\treaddir\t\t%9d\n\n"
- "Inode Operations\n"
- "\tcreate\t\t%9d\n"
- "\tlookup\t\t%9d\n"
- "\tlink\t\t%9d\n"
- "\tunlink\t\t%9d\n"
- "\tsymlink\t\t%9d\n"
- "\tmkdir\t\t%9d\n"
- "\trmdir\t\t%9d\n"
- "\trename\t\t%9d\n"
- "\tpermission\t%9d\n",
-
- /* file operations */
- ps->open,
- ps->flush,
- ps->release,
- ps->fsync,
-
- /* dir operations */
- ps->readdir,
-
- /* inode operations */
- ps->create,
- ps->lookup,
- ps->link,
- ps->unlink,
- ps->symlink,
- ps->mkdir,
- ps->rmdir,
- ps->rename,
- ps->permission);
- return 0;
-}
-
-static int proc_cache_inv_stats_show(struct seq_file *m, void *v)
-{
- struct coda_cache_inv_stats * ps = & coda_cache_inv_stat;
-
- seq_printf(m,
- "Coda cache invalidation statistics\n"
- "==================================\n\n"
- "flush\t\t%9d\n"
- "purge user\t%9d\n"
- "zap_dir\t\t%9d\n"
- "zap_file\t%9d\n"
- "zap_vnode\t%9d\n"
- "purge_fid\t%9d\n"
- "replace\t\t%9d\n",
- ps->flush,
- ps->purge_user,
- ps->zap_dir,
- ps->zap_file,
- ps->zap_vnode,
- ps->purge_fid,
- ps->replace );
- return 0;
-}
-
-static int proc_vfs_stats_open(struct inode *inode, struct file *file)
-{
- return single_open(file, proc_vfs_stats_show, NULL);
-}
-
-static int proc_cache_inv_stats_open(struct inode *inode, struct file *file)
-{
- return single_open(file, proc_cache_inv_stats_show, NULL);
-}
-
-static const struct file_operations proc_vfs_stats_fops = {
- .owner = THIS_MODULE,
- .open = proc_vfs_stats_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static const struct file_operations proc_cache_inv_stats_fops = {
- .owner = THIS_MODULE,
- .open = proc_cache_inv_stats_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
static ctl_table coda_table[] = {
{
.ctl_name = CTL_UNNUMBERED,
@@ -199,22 +32,6 @@ static ctl_table coda_table[] = {
},
{
.ctl_name = CTL_UNNUMBERED,
- .procname = "vfs_stats",
- .data = NULL,
- .maxlen = 0,
- .mode = 0644,
- .proc_handler = &do_reset_coda_vfs_stats
- },
- {
- .ctl_name = CTL_UNNUMBERED,
- .procname = "cache_inv_stats",
- .data = NULL,
- .maxlen = 0,
- .mode = 0644,
- .proc_handler = &do_reset_coda_cache_inv_stats
- },
- {
- .ctl_name = CTL_UNNUMBERED,
.procname = "fake_statfs",
.data = &coda_fake_statfs,
.maxlen = sizeof(int),
@@ -235,59 +52,20 @@ static ctl_table fs_table[] = {
};
-#ifdef CONFIG_PROC_FS
-
-/*
- target directory structure:
- /proc/fs (see linux/fs/proc/root.c)
- /proc/fs/coda
- /proc/fs/coda/{vfs_stats,
-
-*/
-
-static struct proc_dir_entry* proc_fs_coda;
-
-#endif
-
void coda_sysctl_init(void)
{
- reset_coda_vfs_stats();
- reset_coda_cache_inv_stats();
-
-#ifdef CONFIG_PROC_FS
- proc_fs_coda = proc_mkdir("coda", proc_root_fs);
- if (proc_fs_coda) {
- struct proc_dir_entry *pde;
-
- proc_fs_coda->owner = THIS_MODULE;
- pde = create_proc_entry("vfs_stats", 0, proc_fs_coda);
- if (pde)
- pde->proc_fops = &proc_vfs_stats_fops;
- pde = create_proc_entry("cache_inv_stats", 0, proc_fs_coda);
- if (pde)
- pde->proc_fops = &proc_cache_inv_stats_fops;
- }
-#endif
-
#ifdef CONFIG_SYSCTL
if ( !fs_table_header )
fs_table_header = register_sysctl_table(fs_table);
-#endif
+#endif
}
-void coda_sysctl_clean(void)
+void coda_sysctl_clean(void)
{
-
#ifdef CONFIG_SYSCTL
if ( fs_table_header ) {
unregister_sysctl_table(fs_table_header);
fs_table_header = NULL;
}
#endif
-
-#ifdef CONFIG_PROC_FS
- remove_proc_entry("cache_inv_stats", proc_fs_coda);
- remove_proc_entry("vfs_stats", proc_fs_coda);
- remove_proc_entry("coda", proc_root_fs);
-#endif
}
diff --git a/fs/coda/upcall.c b/fs/coda/upcall.c
index 5faacdb..cdb4c07 100644
--- a/fs/coda/upcall.c
+++ b/fs/coda/upcall.c
@@ -35,12 +35,10 @@
#include <linux/coda_psdev.h>
#include <linux/coda_fs_i.h>
#include <linux/coda_cache.h>
-#include <linux/coda_proc.h>
-#define upc_alloc() kmalloc(sizeof(struct upc_req), GFP_KERNEL)
-#define upc_free(r) kfree(r)
+#include "coda_int.h"
-static int coda_upcall(struct coda_sb_info *mntinfo, int inSize, int *outSize,
+static int coda_upcall(struct venus_comm *vc, int inSize, int *outSize,
union inputArgs *buffer);
static void *alloc_upcall(int opcode, int size)
@@ -86,13 +84,9 @@ int venus_rootfid(struct super_block *sb, struct CodaFid *fidp)
insize = SIZE(root);
UPARG(CODA_ROOT);
- error = coda_upcall(coda_sbp(sb), insize, &outsize, inp);
-
- if (error) {
- printk("coda_get_rootfid: error %d\n", error);
- } else {
+ error = coda_upcall(coda_vcp(sb), insize, &outsize, inp);
+ if (!error)
*fidp = outp->coda_root.VFid;
- }
CODA_FREE(inp, insize);
return error;
@@ -109,9 +103,9 @@ int venus_getattr(struct super_block *sb, struct CodaFid *fid,
UPARG(CODA_GETATTR);
inp->coda_getattr.VFid = *fid;
- error = coda_upcall(coda_sbp(sb), insize, &outsize, inp);
-
- *attr = outp->coda_getattr.attr;
+ error = coda_upcall(coda_vcp(sb), insize, &outsize, inp);
+ if (!error)
+ *attr = outp->coda_getattr.attr;
CODA_FREE(inp, insize);
return error;
@@ -130,7 +124,7 @@ int venus_setattr(struct super_block *sb, struct CodaFid *fid,
inp->coda_setattr.VFid = *fid;
inp->coda_setattr.attr = *vattr;
- error = coda_upcall(coda_sbp(sb), insize, &outsize, inp);
+ error = coda_upcall(coda_vcp(sb), insize, &outsize, inp);
CODA_FREE(inp, insize);
return error;
@@ -156,64 +150,18 @@ int venus_lookup(struct super_block *sb, struct CodaFid *fid,
memcpy((char *)(inp) + offset, name, length);
*((char *)inp + offset + length) = '\0';
- error = coda_upcall(coda_sbp(sb), insize, &outsize, inp);
-
- *resfid = outp->coda_lookup.VFid;
- *type = outp->coda_lookup.vtype;
-
- CODA_FREE(inp, insize);
- return error;
-}
-
-int venus_store(struct super_block *sb, struct CodaFid *fid, int flags,
- vuid_t uid)
-{
- union inputArgs *inp;
- union outputArgs *outp;
- int insize, outsize, error;
-#ifdef CONFIG_CODA_FS_OLD_API
- struct coda_cred cred = { 0, };
- cred.cr_fsuid = uid;
-#endif
-
- insize = SIZE(store);
- UPARG(CODA_STORE);
-
-#ifdef CONFIG_CODA_FS_OLD_API
- memcpy(&(inp->ih.cred), &cred, sizeof(cred));
-#else
- inp->ih.uid = uid;
-#endif
-
- inp->coda_store.VFid = *fid;
- inp->coda_store.flags = flags;
-
- error = coda_upcall(coda_sbp(sb), insize, &outsize, inp);
-
- CODA_FREE(inp, insize);
- return error;
-}
-
-int venus_release(struct super_block *sb, struct CodaFid *fid, int flags)
-{
- union inputArgs *inp;
- union outputArgs *outp;
- int insize, outsize, error;
-
- insize = SIZE(release);
- UPARG(CODA_RELEASE);
-
- inp->coda_release.VFid = *fid;
- inp->coda_release.flags = flags;
-
- error = coda_upcall(coda_sbp(sb), insize, &outsize, inp);
+ error = coda_upcall(coda_vcp(sb), insize, &outsize, inp);
+ if (!error) {
+ *resfid = outp->coda_lookup.VFid;
+ *type = outp->coda_lookup.vtype;
+ }
CODA_FREE(inp, insize);
return error;
}
int venus_close(struct super_block *sb, struct CodaFid *fid, int flags,
- vuid_t uid)
+ vuid_t uid)
{
union inputArgs *inp;
union outputArgs *outp;
@@ -235,7 +183,7 @@ int venus_close(struct super_block *sb, struct CodaFid *fid, int flags,
inp->coda_close.VFid = *fid;
inp->coda_close.flags = flags;
- error = coda_upcall(coda_sbp(sb), insize, &outsize, inp);
+ error = coda_upcall(coda_vcp(sb), insize, &outsize, inp);
CODA_FREE(inp, insize);
return error;
@@ -251,12 +199,12 @@ int venus_open(struct super_block *sb, struct CodaFid *fid,
insize = SIZE(open_by_fd);
UPARG(CODA_OPEN_BY_FD);
- inp->coda_open.VFid = *fid;
- inp->coda_open.flags = flags;
-
- error = coda_upcall(coda_sbp(sb), insize, &outsize, inp);
+ inp->coda_open_by_fd.VFid = *fid;
+ inp->coda_open_by_fd.flags = flags;
- *fh = outp->coda_open_by_fd.fh;
+ error = coda_upcall(coda_vcp(sb), insize, &outsize, inp);
+ if (!error)
+ *fh = outp->coda_open_by_fd.fh;
CODA_FREE(inp, insize);
return error;
@@ -281,11 +229,12 @@ int venus_mkdir(struct super_block *sb, struct CodaFid *dirfid,
/* Venus must get null terminated string */
memcpy((char *)(inp) + offset, name, length);
*((char *)inp + offset + length) = '\0';
-
- error = coda_upcall(coda_sbp(sb), insize, &outsize, inp);
- *attrs = outp->coda_mkdir.attr;
- *newfid = outp->coda_mkdir.VFid;
+ error = coda_upcall(coda_vcp(sb), insize, &outsize, inp);
+ if (!error) {
+ *attrs = outp->coda_mkdir.attr;
+ *newfid = outp->coda_mkdir.VFid;
+ }
CODA_FREE(inp, insize);
return error;
@@ -323,7 +272,7 @@ int venus_rename(struct super_block *sb, struct CodaFid *old_fid,
memcpy((char *)(inp) + offset, new_name, new_length);
*((char *)inp + offset + new_length) = '\0';
- error = coda_upcall(coda_sbp(sb), insize, &outsize, inp);
+ error = coda_upcall(coda_vcp(sb), insize, &outsize, inp);
CODA_FREE(inp, insize);
return error;
@@ -351,11 +300,12 @@ int venus_create(struct super_block *sb, struct CodaFid *dirfid,
/* Venus must get null terminated string */
memcpy((char *)(inp) + offset, name, length);
*((char *)inp + offset + length) = '\0';
-
- error = coda_upcall(coda_sbp(sb), insize, &outsize, inp);
- *attrs = outp->coda_create.attr;
- *newfid = outp->coda_create.VFid;
+ error = coda_upcall(coda_vcp(sb), insize, &outsize, inp);
+ if (!error) {
+ *attrs = outp->coda_create.attr;
+ *newfid = outp->coda_create.VFid;
+ }
CODA_FREE(inp, insize);
return error;
@@ -377,8 +327,8 @@ int venus_rmdir(struct super_block *sb, struct CodaFid *dirfid,
inp->coda_rmdir.name = offset;
memcpy((char *)(inp) + offset, name, length);
*((char *)inp + offset + length) = '\0';
-
- error = coda_upcall(coda_sbp(sb), insize, &outsize, inp);
+
+ error = coda_upcall(coda_vcp(sb), insize, &outsize, inp);
CODA_FREE(inp, insize);
return error;
@@ -399,8 +349,8 @@ int venus_remove(struct super_block *sb, struct CodaFid *dirfid,
inp->coda_remove.name = offset;
memcpy((char *)(inp) + offset, name, length);
*((char *)inp + offset + length) = '\0';
-
- error = coda_upcall(coda_sbp(sb), insize, &outsize, inp);
+
+ error = coda_upcall(coda_vcp(sb), insize, &outsize, inp);
CODA_FREE(inp, insize);
return error;
@@ -420,19 +370,18 @@ int venus_readlink(struct super_block *sb, struct CodaFid *fid,
UPARG(CODA_READLINK);
inp->coda_readlink.VFid = *fid;
-
- error = coda_upcall(coda_sbp(sb), insize, &outsize, inp);
-
- if (! error) {
- retlen = outp->coda_readlink.count;
+
+ error = coda_upcall(coda_vcp(sb), insize, &outsize, inp);
+ if (!error) {
+ retlen = outp->coda_readlink.count;
if ( retlen > *length )
- retlen = *length;
+ retlen = *length;
*length = retlen;
result = (char *)outp + (long)outp->coda_readlink.data;
memcpy(buffer, result, retlen);
*(buffer + retlen) = '\0';
}
-
+
CODA_FREE(inp, insize);
return error;
}
@@ -458,8 +407,8 @@ int venus_link(struct super_block *sb, struct CodaFid *fid,
/* make sure strings are null terminated */
memcpy((char *)(inp) + offset, name, len);
*((char *)inp + offset + len) = '\0';
-
- error = coda_upcall(coda_sbp(sb), insize, &outsize, inp);
+
+ error = coda_upcall(coda_vcp(sb), insize, &outsize, inp);
CODA_FREE(inp, insize);
return error;
@@ -494,7 +443,7 @@ int venus_symlink(struct super_block *sb, struct CodaFid *fid,
memcpy((char *)(inp) + offset, name, len);
*((char *)inp + offset + len) = '\0';
- error = coda_upcall(coda_sbp(sb), insize, &outsize, inp);
+ error = coda_upcall(coda_vcp(sb), insize, &outsize, inp);
CODA_FREE(inp, insize);
return error;
@@ -509,9 +458,9 @@ int venus_fsync(struct super_block *sb, struct CodaFid *fid)
insize=SIZE(fsync);
UPARG(CODA_FSYNC);
- inp->coda_fsync.VFid = *fid;
- error = coda_upcall(coda_sbp(sb), sizeof(union inputArgs),
- &outsize, inp);
+ inp->coda_fsync.VFid = *fid;
+ error = coda_upcall(coda_vcp(sb), sizeof(union inputArgs),
+ &outsize, inp);
CODA_FREE(inp, insize);
return error;
@@ -529,7 +478,7 @@ int venus_access(struct super_block *sb, struct CodaFid *fid, int mask)
inp->coda_access.VFid = *fid;
inp->coda_access.flags = mask;
- error = coda_upcall(coda_sbp(sb), insize, &outsize, inp);
+ error = coda_upcall(coda_vcp(sb), insize, &outsize, inp);
CODA_FREE(inp, insize);
return error;
@@ -578,9 +527,9 @@ int venus_pioctl(struct super_block *sb, struct CodaFid *fid,
goto exit;
}
- error = coda_upcall(coda_sbp(sb), SIZE(ioctl) + data->vi.in_size,
- &outsize, inp);
-
+ error = coda_upcall(coda_vcp(sb), SIZE(ioctl) + data->vi.in_size,
+ &outsize, inp);
+
if (error) {
printk("coda_pioctl: Venus returns: %d for %s\n",
error, coda_f2s(fid));
@@ -620,16 +569,13 @@ int venus_statfs(struct dentry *dentry, struct kstatfs *sfs)
insize = max_t(unsigned int, INSIZE(statfs), OUTSIZE(statfs));
UPARG(CODA_STATFS);
- error = coda_upcall(coda_sbp(dentry->d_sb), insize, &outsize, inp);
-
- if (!error) {
+ error = coda_upcall(coda_vcp(dentry->d_sb), insize, &outsize, inp);
+ if (!error) {
sfs->f_blocks = outp->coda_statfs.stat.f_blocks;
sfs->f_bfree = outp->coda_statfs.stat.f_bfree;
sfs->f_bavail = outp->coda_statfs.stat.f_bavail;
sfs->f_files = outp->coda_statfs.stat.f_files;
sfs->f_ffree = outp->coda_statfs.stat.f_ffree;
- } else {
- printk("coda_statfs: Venus returns: %d\n", error);
}
CODA_FREE(inp, insize);
@@ -638,96 +584,129 @@ int venus_statfs(struct dentry *dentry, struct kstatfs *sfs)
/*
* coda_upcall and coda_downcall routines.
- *
*/
+static void coda_block_signals(sigset_t *old)
+{
+ spin_lock_irq(&current->sighand->siglock);
+ *old = current->blocked;
+
+ sigfillset(&current->blocked);
+ sigdelset(&current->blocked, SIGKILL);
+ sigdelset(&current->blocked, SIGSTOP);
+ sigdelset(&current->blocked, SIGINT);
+
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
+}
+
+static void coda_unblock_signals(sigset_t *old)
+{
+ spin_lock_irq(&current->sighand->siglock);
+ current->blocked = *old;
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
+}
+
+/* Don't allow signals to interrupt the following upcalls before venus
+ * has seen them,
+ * - CODA_CLOSE or CODA_RELEASE upcall (to avoid reference count problems)
+ * - CODA_STORE (to avoid data loss)
+ */
+#define CODA_INTERRUPTIBLE(r) (!coda_hard && \
+ (((r)->uc_opcode != CODA_CLOSE && \
+ (r)->uc_opcode != CODA_STORE && \
+ (r)->uc_opcode != CODA_RELEASE) || \
+ (r)->uc_flags & REQ_READ))
-static inline void coda_waitfor_upcall(struct upc_req *vmp,
- struct venus_comm *vcommp)
+static inline void coda_waitfor_upcall(struct upc_req *req)
{
DECLARE_WAITQUEUE(wait, current);
+ unsigned long timeout = jiffies + coda_timeout * HZ;
+ sigset_t old;
+ int blocked;
- vmp->uc_posttime = jiffies;
+ coda_block_signals(&old);
+ blocked = 1;
- add_wait_queue(&vmp->uc_sleep, &wait);
+ add_wait_queue(&req->uc_sleep, &wait);
for (;;) {
- if ( !coda_hard && vmp->uc_opcode != CODA_CLOSE )
+ if (CODA_INTERRUPTIBLE(req))
set_current_state(TASK_INTERRUPTIBLE);
else
set_current_state(TASK_UNINTERRUPTIBLE);
- /* venus died */
- if ( !vcommp->vc_inuse )
- break;
-
/* got a reply */
- if ( vmp->uc_flags & ( REQ_WRITE | REQ_ABORT ) )
+ if (req->uc_flags & (REQ_WRITE | REQ_ABORT))
break;
- if ( !coda_hard && vmp->uc_opcode != CODA_CLOSE && signal_pending(current) ) {
- /* if this process really wants to die, let it go */
- if ( sigismember(&(current->pending.signal), SIGKILL) ||
- sigismember(&(current->pending.signal), SIGINT) )
- break;
- /* signal is present: after timeout always return
- really smart idea, probably useless ... */
- if ( jiffies - vmp->uc_posttime > coda_timeout * HZ )
- break;
+ if (blocked && time_after(jiffies, timeout) &&
+ CODA_INTERRUPTIBLE(req))
+ {
+ coda_unblock_signals(&old);
+ blocked = 0;
+ }
+
+ if (signal_pending(current)) {
+ list_del(&req->uc_chain);
+ break;
}
- schedule();
+
+ if (blocked)
+ schedule_timeout(HZ);
+ else
+ schedule();
}
- remove_wait_queue(&vmp->uc_sleep, &wait);
- set_current_state(TASK_RUNNING);
+ if (blocked)
+ coda_unblock_signals(&old);
- return;
+ remove_wait_queue(&req->uc_sleep, &wait);
+ set_current_state(TASK_RUNNING);
}
-/*
- * coda_upcall will return an error in the case of
+/*
+ * coda_upcall will return an error in the case of
* failed communication with Venus _or_ will peek at Venus
* reply and return Venus' error.
*
* As venus has 2 types of errors, normal errors (positive) and internal
* errors (negative), normal errors are negated, while internal errors
* are all mapped to -EINTR, while showing a nice warning message. (jh)
- *
*/
-static int coda_upcall(struct coda_sb_info *sbi,
- int inSize, int *outSize,
- union inputArgs *buffer)
+static int coda_upcall(struct venus_comm *vcp,
+ int inSize, int *outSize,
+ union inputArgs *buffer)
{
- struct venus_comm *vcommp;
union outputArgs *out;
- struct upc_req *req;
+ union inputArgs *sig_inputArgs;
+ struct upc_req *req, *sig_req;
int error = 0;
- vcommp = sbi->sbi_vcomm;
- if ( !vcommp->vc_inuse ) {
- printk("No pseudo device in upcall comms at %p\n", vcommp);
- return -ENXIO;
+ if (!vcp->vc_inuse) {
+ printk(KERN_NOTICE "coda: Venus dead, not sending upcall\n");
+ return -ENXIO;
}
/* Format the request message. */
- req = upc_alloc();
- if (!req) {
- printk("Failed to allocate upc_req structure\n");
+ req = kmalloc(sizeof(struct upc_req), GFP_KERNEL);
+ if (!req)
return -ENOMEM;
- }
+
req->uc_data = (void *)buffer;
req->uc_flags = 0;
req->uc_inSize = inSize;
req->uc_outSize = *outSize ? *outSize : inSize;
req->uc_opcode = ((union inputArgs *)buffer)->ih.opcode;
- req->uc_unique = ++vcommp->vc_seq;
+ req->uc_unique = ++vcp->vc_seq;
init_waitqueue_head(&req->uc_sleep);
-
+
/* Fill in the common input args. */
((union inputArgs *)buffer)->ih.unique = req->uc_unique;
/* Append msg to pending queue and poke Venus. */
- list_add_tail(&(req->uc_chain), &vcommp->vc_pending);
-
- wake_up_interruptible(&vcommp->vc_waitq);
+ list_add_tail(&req->uc_chain, &vcp->vc_pending);
+
+ wake_up_interruptible(&vcp->vc_waitq);
/* We can be interrupted while we wait for Venus to process
* our request. If the interrupt occurs before Venus has read
* the request, we dequeue and return. If it occurs after the
@@ -738,67 +717,60 @@ static int coda_upcall(struct coda_sb_info *sbi,
* ENODEV. */
/* Go to sleep. Wake up on signals only after the timeout. */
- coda_waitfor_upcall(req, vcommp);
+ coda_waitfor_upcall(req);
- if (vcommp->vc_inuse) { /* i.e. Venus is still alive */
- /* Op went through, interrupt or not... */
- if (req->uc_flags & REQ_WRITE) {
+ /* Op went through, interrupt or not... */
+ if (req->uc_flags & REQ_WRITE) {
out = (union outputArgs *)req->uc_data;
/* here we map positive Venus errors to kernel errors */
error = -out->oh.result;
*outSize = req->uc_outSize;
goto exit;
- }
- if ( !(req->uc_flags & REQ_READ) && signal_pending(current)) {
- /* Interrupted before venus read it. */
- list_del(&(req->uc_chain));
- /* perhaps the best way to convince the app to
- give up? */
- error = -EINTR;
+ }
+
+ error = -EINTR;
+ if ((req->uc_flags & REQ_ABORT) || !signal_pending(current)) {
+ printk(KERN_WARNING "coda: Unexpected interruption.\n");
goto exit;
- }
- if ( (req->uc_flags & REQ_READ) && signal_pending(current) ) {
- /* interrupted after Venus did its read, send signal */
- union inputArgs *sig_inputArgs;
- struct upc_req *sig_req;
-
- list_del(&(req->uc_chain));
- error = -ENOMEM;
- sig_req = upc_alloc();
- if (!sig_req) goto exit;
-
- CODA_ALLOC((sig_req->uc_data), char *, sizeof(struct coda_in_hdr));
- if (!sig_req->uc_data) {
- upc_free(sig_req);
- goto exit;
- }
-
- error = -EINTR;
- sig_inputArgs = (union inputArgs *)sig_req->uc_data;
- sig_inputArgs->ih.opcode = CODA_SIGNAL;
- sig_inputArgs->ih.unique = req->uc_unique;
-
- sig_req->uc_flags = REQ_ASYNC;
- sig_req->uc_opcode = sig_inputArgs->ih.opcode;
- sig_req->uc_unique = sig_inputArgs->ih.unique;
- sig_req->uc_inSize = sizeof(struct coda_in_hdr);
- sig_req->uc_outSize = sizeof(struct coda_in_hdr);
-
- /* insert at head of queue! */
- list_add(&(sig_req->uc_chain), &vcommp->vc_pending);
- wake_up_interruptible(&vcommp->vc_waitq);
- } else {
- printk("Coda: Strange interruption..\n");
- error = -EINTR;
- }
- } else { /* If venus died i.e. !VC_OPEN(vcommp) */
- printk("coda_upcall: Venus dead on (op,un) (%d.%d) flags %d\n",
- req->uc_opcode, req->uc_unique, req->uc_flags);
- error = -ENODEV;
}
- exit:
- upc_free(req);
+ /* Interrupted before venus read it. */
+ if (!(req->uc_flags & REQ_READ))
+ goto exit;
+
+ /* Venus saw the upcall, make sure we can send interrupt signal */
+ if (!vcp->vc_inuse) {
+ printk(KERN_INFO "coda: Venus dead, not sending signal.\n");
+ goto exit;
+ }
+
+ error = -ENOMEM;
+ sig_req = kmalloc(sizeof(struct upc_req), GFP_KERNEL);
+ if (!sig_req) goto exit;
+
+ CODA_ALLOC((sig_req->uc_data), char *, sizeof(struct coda_in_hdr));
+ if (!sig_req->uc_data) {
+ kfree(sig_req);
+ goto exit;
+ }
+
+ error = -EINTR;
+ sig_inputArgs = (union inputArgs *)sig_req->uc_data;
+ sig_inputArgs->ih.opcode = CODA_SIGNAL;
+ sig_inputArgs->ih.unique = req->uc_unique;
+
+ sig_req->uc_flags = REQ_ASYNC;
+ sig_req->uc_opcode = sig_inputArgs->ih.opcode;
+ sig_req->uc_unique = sig_inputArgs->ih.unique;
+ sig_req->uc_inSize = sizeof(struct coda_in_hdr);
+ sig_req->uc_outSize = sizeof(struct coda_in_hdr);
+
+ /* insert at head of queue! */
+ list_add(&(sig_req->uc_chain), &vcp->vc_pending);
+ wake_up_interruptible(&vcp->vc_waitq);
+
+exit:
+ kfree(req);
return error;
}
@@ -838,77 +810,66 @@ static int coda_upcall(struct coda_sb_info *sbi,
int coda_downcall(int opcode, union outputArgs * out, struct super_block *sb)
{
+ struct inode *inode = NULL;
+ struct CodaFid *fid, *newfid;
+
/* Handle invalidation requests. */
- if ( !sb || !sb->s_root || !sb->s_root->d_inode)
- return 0;
-
- switch (opcode) {
-
- case CODA_FLUSH : {
- coda_cache_clear_all(sb);
- shrink_dcache_sb(sb);
- coda_flag_inode(sb->s_root->d_inode, C_FLUSH);
- return(0);
- }
-
- case CODA_PURGEUSER : {
- coda_cache_clear_all(sb);
- return(0);
- }
-
- case CODA_ZAPDIR : {
- struct inode *inode;
- struct CodaFid *fid = &out->coda_zapdir.CodaFid;
-
- inode = coda_fid_to_inode(fid, sb);
- if (inode) {
- coda_flag_inode_children(inode, C_PURGE);
- coda_flag_inode(inode, C_VATTR);
- iput(inode);
- }
-
- return(0);
- }
-
- case CODA_ZAPFILE : {
- struct inode *inode;
- struct CodaFid *fid = &out->coda_zapfile.CodaFid;
- inode = coda_fid_to_inode(fid, sb);
- if ( inode ) {
- coda_flag_inode(inode, C_VATTR);
- iput(inode);
- }
- return 0;
- }
-
- case CODA_PURGEFID : {
- struct inode *inode;
- struct CodaFid *fid = &out->coda_purgefid.CodaFid;
- inode = coda_fid_to_inode(fid, sb);
- if ( inode ) {
+ if ( !sb || !sb->s_root)
+ return 0;
+
+ switch (opcode) {
+ case CODA_FLUSH:
+ coda_cache_clear_all(sb);
+ shrink_dcache_sb(sb);
+ if (sb->s_root->d_inode)
+ coda_flag_inode(sb->s_root->d_inode, C_FLUSH);
+ break;
+
+ case CODA_PURGEUSER:
+ coda_cache_clear_all(sb);
+ break;
+
+ case CODA_ZAPDIR:
+ fid = &out->coda_zapdir.CodaFid;
+ inode = coda_fid_to_inode(fid, sb);
+ if (inode) {
+ coda_flag_inode_children(inode, C_PURGE);
+ coda_flag_inode(inode, C_VATTR);
+ }
+ break;
+
+ case CODA_ZAPFILE:
+ fid = &out->coda_zapfile.CodaFid;
+ inode = coda_fid_to_inode(fid, sb);
+ if (inode)
+ coda_flag_inode(inode, C_VATTR);
+ break;
+
+ case CODA_PURGEFID:
+ fid = &out->coda_purgefid.CodaFid;
+ inode = coda_fid_to_inode(fid, sb);
+ if (inode) {
coda_flag_inode_children(inode, C_PURGE);
/* catch the dentries later if some are still busy */
coda_flag_inode(inode, C_PURGE);
d_prune_aliases(inode);
- iput(inode);
- }
- return 0;
- }
-
- case CODA_REPLACE : {
- struct inode *inode;
- struct CodaFid *oldfid = &out->coda_replace.OldFid;
- struct CodaFid *newfid = &out->coda_replace.NewFid;
- inode = coda_fid_to_inode(oldfid, sb);
- if ( inode ) {
- coda_replace_fid(inode, oldfid, newfid);
- iput(inode);
- }
- return 0;
- }
- }
- return 0;
+ }
+ break;
+
+ case CODA_REPLACE:
+ fid = &out->coda_replace.OldFid;
+ newfid = &out->coda_replace.NewFid;
+ inode = coda_fid_to_inode(fid, sb);
+ if (inode)
+ coda_replace_fid(inode, fid, newfid);
+ break;
+ }
+
+ if (inode)
+ iput(inode);
+
+ return 0;
}
diff --git a/fs/compat.c b/fs/compat.c
index 4db6216..15078ce 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -1257,6 +1257,7 @@ static int compat_copy_strings(int argc, compat_uptr_t __user *argv,
{
struct page *kmapped_page = NULL;
char *kaddr = NULL;
+ unsigned long kpos = 0;
int ret;
while (argc-- > 0) {
@@ -1265,92 +1266,84 @@ static int compat_copy_strings(int argc, compat_uptr_t __user *argv,
unsigned long pos;
if (get_user(str, argv+argc) ||
- !(len = strnlen_user(compat_ptr(str), bprm->p))) {
+ !(len = strnlen_user(compat_ptr(str), MAX_ARG_STRLEN))) {
ret = -EFAULT;
goto out;
}
- if (bprm->p < len) {
+ if (len > MAX_ARG_STRLEN) {
ret = -E2BIG;
goto out;
}
- bprm->p -= len;
- /* XXX: add architecture specific overflow check here. */
+ /* We're going to work our way backwords. */
pos = bprm->p;
+ str += len;
+ bprm->p -= len;
while (len > 0) {
- int i, new, err;
int offset, bytes_to_copy;
- struct page *page;
offset = pos % PAGE_SIZE;
- i = pos/PAGE_SIZE;
- page = bprm->page[i];
- new = 0;
- if (!page) {
- page = alloc_page(GFP_HIGHUSER);
- bprm->page[i] = page;
- if (!page) {
- ret = -ENOMEM;
+ if (offset == 0)
+ offset = PAGE_SIZE;
+
+ bytes_to_copy = offset;
+ if (bytes_to_copy > len)
+ bytes_to_copy = len;
+
+ offset -= bytes_to_copy;
+ pos -= bytes_to_copy;
+ str -= bytes_to_copy;
+ len -= bytes_to_copy;
+
+ if (!kmapped_page || kpos != (pos & PAGE_MASK)) {
+ struct page *page;
+
+#ifdef CONFIG_STACK_GROWSUP
+ ret = expand_stack_downwards(bprm->vma, pos);
+ if (ret < 0) {
+ /* We've exceed the stack rlimit. */
+ ret = -E2BIG;
+ goto out;
+ }
+#endif
+ ret = get_user_pages(current, bprm->mm, pos,
+ 1, 1, 1, &page, NULL);
+ if (ret <= 0) {
+ /* We've exceed the stack rlimit. */
+ ret = -E2BIG;
goto out;
}
- new = 1;
- }
- if (page != kmapped_page) {
- if (kmapped_page)
+ if (kmapped_page) {
+ flush_kernel_dcache_page(kmapped_page);
kunmap(kmapped_page);
+ put_page(kmapped_page);
+ }
kmapped_page = page;
kaddr = kmap(kmapped_page);
+ kpos = pos & PAGE_MASK;
+ flush_cache_page(bprm->vma, kpos,
+ page_to_pfn(kmapped_page));
}
- if (new && offset)
- memset(kaddr, 0, offset);
- bytes_to_copy = PAGE_SIZE - offset;
- if (bytes_to_copy > len) {
- bytes_to_copy = len;
- if (new)
- memset(kaddr+offset+len, 0,
- PAGE_SIZE-offset-len);
- }
- err = copy_from_user(kaddr+offset, compat_ptr(str),
- bytes_to_copy);
- if (err) {
+ if (copy_from_user(kaddr+offset, compat_ptr(str),
+ bytes_to_copy)) {
ret = -EFAULT;
goto out;
}
-
- pos += bytes_to_copy;
- str += bytes_to_copy;
- len -= bytes_to_copy;
}
}
ret = 0;
out:
- if (kmapped_page)
+ if (kmapped_page) {
+ flush_kernel_dcache_page(kmapped_page);
kunmap(kmapped_page);
- return ret;
-}
-
-#ifdef CONFIG_MMU
-
-#define free_arg_pages(bprm) do { } while (0)
-
-#else
-
-static inline void free_arg_pages(struct linux_binprm *bprm)
-{
- int i;
-
- for (i = 0; i < MAX_ARG_PAGES; i++) {
- if (bprm->page[i])
- __free_page(bprm->page[i]);
- bprm->page[i] = NULL;
+ put_page(kmapped_page);
}
+ return ret;
}
-#endif /* CONFIG_MMU */
-
/*
* compat_do_execve() is mostly a copy of do_execve(), with the exception
* that it processes 32 bit argv and envp pointers.
@@ -1363,7 +1356,6 @@ int compat_do_execve(char * filename,
struct linux_binprm *bprm;
struct file *file;
int retval;
- int i;
retval = -ENOMEM;
bprm = kzalloc(sizeof(*bprm), GFP_KERNEL);
@@ -1377,24 +1369,19 @@ int compat_do_execve(char * filename,
sched_exec();
- bprm->p = PAGE_SIZE*MAX_ARG_PAGES-sizeof(void *);
bprm->file = file;
bprm->filename = filename;
bprm->interp = filename;
- bprm->mm = mm_alloc();
- retval = -ENOMEM;
- if (!bprm->mm)
- goto out_file;
- retval = init_new_context(current, bprm->mm);
- if (retval < 0)
- goto out_mm;
+ retval = bprm_mm_init(bprm);
+ if (retval)
+ goto out_file;
- bprm->argc = compat_count(argv, bprm->p / sizeof(compat_uptr_t));
+ bprm->argc = compat_count(argv, MAX_ARG_STRINGS);
if ((retval = bprm->argc) < 0)
goto out_mm;
- bprm->envc = compat_count(envp, bprm->p / sizeof(compat_uptr_t));
+ bprm->envc = compat_count(envp, MAX_ARG_STRINGS);
if ((retval = bprm->envc) < 0)
goto out_mm;
@@ -1421,8 +1408,6 @@ int compat_do_execve(char * filename,
retval = search_binary_handler(bprm, regs);
if (retval >= 0) {
- free_arg_pages(bprm);
-
/* execve success */
security_bprm_free(bprm);
acct_update_integrals(current);
@@ -1431,19 +1416,12 @@ int compat_do_execve(char * filename,
}
out:
- /* Something went wrong, return the inode and free the argument pages*/
- for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
- struct page * page = bprm->page[i];
- if (page)
- __free_page(page);
- }
-
if (bprm->security)
security_bprm_free(bprm);
out_mm:
if (bprm->mm)
- mmdrop(bprm->mm);
+ mmput(bprm->mm);
out_file:
if (bprm->file) {
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index e440a7b..9c3fd07 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -21,7 +21,6 @@
#include <linux/if.h>
#include <linux/if_bridge.h>
#include <linux/slab.h>
-#include <linux/hdreg.h>
#include <linux/raid/md.h>
#include <linux/kd.h>
#include <linux/dirent.h>
@@ -33,12 +32,10 @@
#include <linux/vt.h>
#include <linux/fs.h>
#include <linux/file.h>
-#include <linux/fd.h>
#include <linux/ppp_defs.h>
#include <linux/if_ppp.h>
#include <linux/if_pppox.h>
#include <linux/mtio.h>
-#include <linux/cdrom.h>
#include <linux/auto_fs.h>
#include <linux/auto_fs4.h>
#include <linux/tty.h>
@@ -48,7 +45,6 @@
#include <linux/netdevice.h>
#include <linux/raw.h>
#include <linux/smb_fs.h>
-#include <linux/blkpg.h>
#include <linux/blkdev.h>
#include <linux/elevator.h>
#include <linux/rtc.h>
@@ -62,7 +58,6 @@
#include <linux/i2c-dev.h>
#include <linux/wireless.h>
#include <linux/atalk.h>
-#include <linux/blktrace_api.h>
#include <linux/loop.h>
#include <net/bluetooth/bluetooth.h>
@@ -115,6 +110,10 @@
#include <linux/dvb/video.h>
#include <linux/lp.h>
+#ifdef CONFIG_SPARC
+#include <asm/fbio.h>
+#endif
+
static int do_ioctl32_pointer(unsigned int fd, unsigned int cmd,
unsigned long arg, struct file *f)
{
@@ -320,22 +319,21 @@ struct ifconf32 {
static int dev_ifname32(unsigned int fd, unsigned int cmd, unsigned long arg)
{
- struct net_device *dev;
- struct ifreq32 ifr32;
+ struct ifreq __user *uifr;
int err;
- if (copy_from_user(&ifr32, compat_ptr(arg), sizeof(ifr32)))
+ uifr = compat_alloc_user_space(sizeof(struct ifreq));
+ if (copy_in_user(uifr, compat_ptr(arg), sizeof(struct ifreq32)));
return -EFAULT;
- dev = dev_get_by_index(ifr32.ifr_ifindex);
- if (!dev)
- return -ENODEV;
+ err = sys_ioctl(fd, SIOCGIFNAME, (unsigned long)uifr);
+ if (err)
+ return err;
- strlcpy(ifr32.ifr_name, dev->name, sizeof(ifr32.ifr_name));
- dev_put(dev);
-
- err = copy_to_user(compat_ptr(arg), &ifr32, sizeof(ifr32));
- return (err ? -EFAULT : 0);
+ if (copy_in_user(compat_ptr(arg), uifr, sizeof(struct ifreq32)))
+ return -EFAULT;
+
+ return 0;
}
static int dev_ifconf(unsigned int fd, unsigned int cmd, unsigned long arg)
@@ -664,53 +662,6 @@ out:
#endif
#ifdef CONFIG_BLOCK
-struct hd_geometry32 {
- unsigned char heads;
- unsigned char sectors;
- unsigned short cylinders;
- u32 start;
-};
-
-static int hdio_getgeo(unsigned int fd, unsigned int cmd, unsigned long arg)
-{
- mm_segment_t old_fs = get_fs();
- struct hd_geometry geo;
- struct hd_geometry32 __user *ugeo;
- int err;
-
- set_fs (KERNEL_DS);
- err = sys_ioctl(fd, HDIO_GETGEO, (unsigned long)&geo);
- set_fs (old_fs);
- ugeo = compat_ptr(arg);
- if (!err) {
- err = copy_to_user (ugeo, &geo, 4);
- err |= __put_user (geo.start, &ugeo->start);
- if (err)
- err = -EFAULT;
- }
- return err;
-}
-
-static int hdio_ioctl_trans(unsigned int fd, unsigned int cmd, unsigned long arg)
-{
- mm_segment_t old_fs = get_fs();
- unsigned long kval;
- unsigned int __user *uvp;
- int error;
-
- set_fs(KERNEL_DS);
- error = sys_ioctl(fd, cmd, (long)&kval);
- set_fs(old_fs);
-
- if(error == 0) {
- uvp = compat_ptr(arg);
- if(put_user(kval, uvp))
- error = -EFAULT;
- }
- return error;
-}
-
-
typedef struct sg_io_hdr32 {
compat_int_t interface_id; /* [i] 'S' for SCSI generic (required) */
compat_int_t dxfer_direction; /* [i] data transfer direction */
@@ -1085,108 +1036,6 @@ static int mt_ioctl_trans(unsigned int fd, unsigned int cmd, unsigned long arg)
return err ? -EFAULT: 0;
}
-struct cdrom_read_audio32 {
- union cdrom_addr addr;
- u8 addr_format;
- compat_int_t nframes;
- compat_caddr_t buf;
-};
-
-struct cdrom_generic_command32 {
- unsigned char cmd[CDROM_PACKET_SIZE];
- compat_caddr_t buffer;
- compat_uint_t buflen;
- compat_int_t stat;
- compat_caddr_t sense;
- unsigned char data_direction;
- compat_int_t quiet;
- compat_int_t timeout;
- compat_caddr_t reserved[1];
-};
-
-static int cdrom_do_read_audio(unsigned int fd, unsigned int cmd, unsigned long arg)
-{
- struct cdrom_read_audio __user *cdread_audio;
- struct cdrom_read_audio32 __user *cdread_audio32;
- __u32 data;
- void __user *datap;
-
- cdread_audio = compat_alloc_user_space(sizeof(*cdread_audio));
- cdread_audio32 = compat_ptr(arg);
-
- if (copy_in_user(&cdread_audio->addr,
- &cdread_audio32->addr,
- (sizeof(*cdread_audio32) -
- sizeof(compat_caddr_t))))
- return -EFAULT;
-
- if (get_user(data, &cdread_audio32->buf))
- return -EFAULT;
- datap = compat_ptr(data);
- if (put_user(datap, &cdread_audio->buf))
- return -EFAULT;
-
- return sys_ioctl(fd, cmd, (unsigned long) cdread_audio);
-}
-
-static int cdrom_do_generic_command(unsigned int fd, unsigned int cmd, unsigned long arg)
-{
- struct cdrom_generic_command __user *cgc;
- struct cdrom_generic_command32 __user *cgc32;
- u32 data;
- unsigned char dir;
- int itmp;
-
- cgc = compat_alloc_user_space(sizeof(*cgc));
- cgc32 = compat_ptr(arg);
-
- if (copy_in_user(&cgc->cmd, &cgc32->cmd, sizeof(cgc->cmd)) ||
- get_user(data, &cgc32->buffer) ||
- put_user(compat_ptr(data), &cgc->buffer) ||
- copy_in_user(&cgc->buflen, &cgc32->buflen,
- (sizeof(unsigned int) + sizeof(int))) ||
- get_user(data, &cgc32->sense) ||
- put_user(compat_ptr(data), &cgc->sense) ||
- get_user(dir, &cgc32->data_direction) ||
- put_user(dir, &cgc->data_direction) ||
- get_user(itmp, &cgc32->quiet) ||
- put_user(itmp, &cgc->quiet) ||
- get_user(itmp, &cgc32->timeout) ||
- put_user(itmp, &cgc->timeout) ||
- get_user(data, &cgc32->reserved[0]) ||
- put_user(compat_ptr(data), &cgc->reserved[0]))
- return -EFAULT;
-
- return sys_ioctl(fd, cmd, (unsigned long) cgc);
-}
-
-static int cdrom_ioctl_trans(unsigned int fd, unsigned int cmd, unsigned long arg)
-{
- int err;
-
- switch(cmd) {
- case CDROMREADAUDIO:
- err = cdrom_do_read_audio(fd, cmd, arg);
- break;
-
- case CDROM_SEND_PACKET:
- err = cdrom_do_generic_command(fd, cmd, arg);
- break;
-
- default:
- do {
- static int count;
- if (++count <= 20)
- printk("cdrom_ioctl: Unknown cmd fd(%d) "
- "cmd(%08x) arg(%08x)\n",
- (int)fd, (unsigned int)cmd, (unsigned int)arg);
- } while(0);
- err = -EINVAL;
- break;
- };
-
- return err;
-}
#endif /* CONFIG_BLOCK */
#ifdef CONFIG_VT
@@ -1532,71 +1381,11 @@ ret_einval(unsigned int fd, unsigned int cmd, unsigned long arg)
return -EINVAL;
}
-#ifdef CONFIG_BLOCK
-static int broken_blkgetsize(unsigned int fd, unsigned int cmd, unsigned long arg)
-{
- /* The mkswap binary hard codes it to Intel value :-((( */
- return w_long(fd, BLKGETSIZE, arg);
-}
-
-struct blkpg_ioctl_arg32 {
- compat_int_t op;
- compat_int_t flags;
- compat_int_t datalen;
- compat_caddr_t data;
-};
-
-static int blkpg_ioctl_trans(unsigned int fd, unsigned int cmd, unsigned long arg)
-{
- struct blkpg_ioctl_arg32 __user *ua32 = compat_ptr(arg);
- struct blkpg_ioctl_arg __user *a = compat_alloc_user_space(sizeof(*a));
- compat_caddr_t udata;
- compat_int_t n;
- int err;
-
- err = get_user(n, &ua32->op);
- err |= put_user(n, &a->op);
- err |= get_user(n, &ua32->flags);
- err |= put_user(n, &a->flags);
- err |= get_user(n, &ua32->datalen);
- err |= put_user(n, &a->datalen);
- err |= get_user(udata, &ua32->data);
- err |= put_user(compat_ptr(udata), &a->data);
- if (err)
- return err;
-
- return sys_ioctl(fd, cmd, (unsigned long)a);
-}
-#endif
-
static int ioc_settimeout(unsigned int fd, unsigned int cmd, unsigned long arg)
{
return rw_long(fd, AUTOFS_IOC_SETTIMEOUT, arg);
}
-#ifdef CONFIG_BLOCK
-/* Fix sizeof(sizeof()) breakage */
-#define BLKBSZGET_32 _IOR(0x12,112,int)
-#define BLKBSZSET_32 _IOW(0x12,113,int)
-#define BLKGETSIZE64_32 _IOR(0x12,114,int)
-
-static int do_blkbszget(unsigned int fd, unsigned int cmd, unsigned long arg)
-{
- return sys_ioctl(fd, BLKBSZGET, (unsigned long)compat_ptr(arg));
-}
-
-static int do_blkbszset(unsigned int fd, unsigned int cmd, unsigned long arg)
-{
- return sys_ioctl(fd, BLKBSZSET, (unsigned long)compat_ptr(arg));
-}
-
-static int do_blkgetsize64(unsigned int fd, unsigned int cmd,
- unsigned long arg)
-{
- return sys_ioctl(fd, BLKGETSIZE64, (unsigned long)compat_ptr(arg));
-}
-#endif
-
/* Bluetooth ioctls */
#define HCIUARTSETPROTO _IOW('U', 200, int)
#define HCIUARTGETPROTO _IOR('U', 201, int)
@@ -1616,333 +1405,6 @@ static int do_blkgetsize64(unsigned int fd, unsigned int cmd,
#define HIDPGETCONNLIST _IOR('H', 210, int)
#define HIDPGETCONNINFO _IOR('H', 211, int)
-#ifdef CONFIG_BLOCK
-struct floppy_struct32 {
- compat_uint_t size;
- compat_uint_t sect;
- compat_uint_t head;
- compat_uint_t track;
- compat_uint_t stretch;
- unsigned char gap;
- unsigned char rate;
- unsigned char spec1;
- unsigned char fmt_gap;
- const compat_caddr_t name;
-};
-
-struct floppy_drive_params32 {
- char cmos;
- compat_ulong_t max_dtr;
- compat_ulong_t hlt;
- compat_ulong_t hut;
- compat_ulong_t srt;
- compat_ulong_t spinup;
- compat_ulong_t spindown;
- unsigned char spindown_offset;
- unsigned char select_delay;
- unsigned char rps;
- unsigned char tracks;
- compat_ulong_t timeout;
- unsigned char interleave_sect;
- struct floppy_max_errors max_errors;
- char flags;
- char read_track;
- short autodetect[8];
- compat_int_t checkfreq;
- compat_int_t native_format;
-};
-
-struct floppy_drive_struct32 {
- signed char flags;
- compat_ulong_t spinup_date;
- compat_ulong_t select_date;
- compat_ulong_t first_read_date;
- short probed_format;
- short track;
- short maxblock;
- short maxtrack;
- compat_int_t generation;
- compat_int_t keep_data;
- compat_int_t fd_ref;
- compat_int_t fd_device;
- compat_int_t last_checked;
- compat_caddr_t dmabuf;
- compat_int_t bufblocks;
-};
-
-struct floppy_fdc_state32 {
- compat_int_t spec1;
- compat_int_t spec2;
- compat_int_t dtr;
- unsigned char version;
- unsigned char dor;
- compat_ulong_t address;
- unsigned int rawcmd:2;
- unsigned int reset:1;
- unsigned int need_configure:1;
- unsigned int perp_mode:2;
- unsigned int has_fifo:1;
- unsigned int driver_version;
- unsigned char track[4];
-};
-
-struct floppy_write_errors32 {
- unsigned int write_errors;
- compat_ulong_t first_error_sector;
- compat_int_t first_error_generation;
- compat_ulong_t last_error_sector;
- compat_int_t last_error_generation;
- compat_uint_t badness;
-};
-
-#define FDSETPRM32 _IOW(2, 0x42, struct floppy_struct32)
-#define FDDEFPRM32 _IOW(2, 0x43, struct floppy_struct32)
-#define FDGETPRM32 _IOR(2, 0x04, struct floppy_struct32)
-#define FDSETDRVPRM32 _IOW(2, 0x90, struct floppy_drive_params32)
-#define FDGETDRVPRM32 _IOR(2, 0x11, struct floppy_drive_params32)
-#define FDGETDRVSTAT32 _IOR(2, 0x12, struct floppy_drive_struct32)
-#define FDPOLLDRVSTAT32 _IOR(2, 0x13, struct floppy_drive_struct32)
-#define FDGETFDCSTAT32 _IOR(2, 0x15, struct floppy_fdc_state32)
-#define FDWERRORGET32 _IOR(2, 0x17, struct floppy_write_errors32)
-
-static struct {
- unsigned int cmd32;
- unsigned int cmd;
-} fd_ioctl_trans_table[] = {
- { FDSETPRM32, FDSETPRM },
- { FDDEFPRM32, FDDEFPRM },
- { FDGETPRM32, FDGETPRM },
- { FDSETDRVPRM32, FDSETDRVPRM },
- { FDGETDRVPRM32, FDGETDRVPRM },
- { FDGETDRVSTAT32, FDGETDRVSTAT },
- { FDPOLLDRVSTAT32, FDPOLLDRVSTAT },
- { FDGETFDCSTAT32, FDGETFDCSTAT },
- { FDWERRORGET32, FDWERRORGET }
-};
-
-#define NR_FD_IOCTL_TRANS ARRAY_SIZE(fd_ioctl_trans_table)
-
-static int fd_ioctl_trans(unsigned int fd, unsigned int cmd, unsigned long arg)
-{
- mm_segment_t old_fs = get_fs();
- void *karg = NULL;
- unsigned int kcmd = 0;
- int i, err;
-
- for (i = 0; i < NR_FD_IOCTL_TRANS; i++)
- if (cmd == fd_ioctl_trans_table[i].cmd32) {
- kcmd = fd_ioctl_trans_table[i].cmd;
- break;
- }
- if (!kcmd)
- return -EINVAL;
-
- switch (cmd) {
- case FDSETPRM32:
- case FDDEFPRM32:
- case FDGETPRM32:
- {
- compat_uptr_t name;
- struct floppy_struct32 __user *uf;
- struct floppy_struct *f;
-
- uf = compat_ptr(arg);
- f = karg = kmalloc(sizeof(struct floppy_struct), GFP_KERNEL);
- if (!karg)
- return -ENOMEM;
- if (cmd == FDGETPRM32)
- break;
- err = __get_user(f->size, &uf->size);
- err |= __get_user(f->sect, &uf->sect);
- err |= __get_user(f->head, &uf->head);
- err |= __get_user(f->track, &uf->track);
- err |= __get_user(f->stretch, &uf->stretch);
- err |= __get_user(f->gap, &uf->gap);
- err |= __get_user(f->rate, &uf->rate);
- err |= __get_user(f->spec1, &uf->spec1);
- err |= __get_user(f->fmt_gap, &uf->fmt_gap);
- err |= __get_user(name, &uf->name);
- f->name = compat_ptr(name);
- if (err) {
- err = -EFAULT;
- goto out;
- }
- break;
- }
- case FDSETDRVPRM32:
- case FDGETDRVPRM32:
- {
- struct floppy_drive_params32 __user *uf;
- struct floppy_drive_params *f;
-
- uf = compat_ptr(arg);
- f = karg = kmalloc(sizeof(struct floppy_drive_params), GFP_KERNEL);
- if (!karg)
- return -ENOMEM;
- if (cmd == FDGETDRVPRM32)
- break;
- err = __get_user(f->cmos, &uf->cmos);
- err |= __get_user(f->max_dtr, &uf->max_dtr);
- err |= __get_user(f->hlt, &uf->hlt);
- err |= __get_user(f->hut, &uf->hut);
- err |= __get_user(f->srt, &uf->srt);
- err |= __get_user(f->spinup, &uf->spinup);
- err |= __get_user(f->spindown, &uf->spindown);
- err |= __get_user(f->spindown_offset, &uf->spindown_offset);
- err |= __get_user(f->select_delay, &uf->select_delay);
- err |= __get_user(f->rps, &uf->rps);
- err |= __get_user(f->tracks, &uf->tracks);
- err |= __get_user(f->timeout, &uf->timeout);
- err |= __get_user(f->interleave_sect, &uf->interleave_sect);
- err |= __copy_from_user(&f->max_errors, &uf->max_errors, sizeof(f->max_errors));
- err |= __get_user(f->flags, &uf->flags);
- err |= __get_user(f->read_track, &uf->read_track);
- err |= __copy_from_user(f->autodetect, uf->autodetect, sizeof(f->autodetect));
- err |= __get_user(f->checkfreq, &uf->checkfreq);
- err |= __get_user(f->native_format, &uf->native_format);
- if (err) {
- err = -EFAULT;
- goto out;
- }
- break;
- }
- case FDGETDRVSTAT32:
- case FDPOLLDRVSTAT32:
- karg = kmalloc(sizeof(struct floppy_drive_struct), GFP_KERNEL);
- if (!karg)
- return -ENOMEM;
- break;
- case FDGETFDCSTAT32:
- karg = kmalloc(sizeof(struct floppy_fdc_state), GFP_KERNEL);
- if (!karg)
- return -ENOMEM;
- break;
- case FDWERRORGET32:
- karg = kmalloc(sizeof(struct floppy_write_errors), GFP_KERNEL);
- if (!karg)
- return -ENOMEM;
- break;
- default:
- return -EINVAL;
- }
- set_fs (KERNEL_DS);
- err = sys_ioctl (fd, kcmd, (unsigned long)karg);
- set_fs (old_fs);
- if (err)
- goto out;
- switch (cmd) {
- case FDGETPRM32:
- {
- struct floppy_struct *f = karg;
- struct floppy_struct32 __user *uf = compat_ptr(arg);
-
- err = __put_user(f->size, &uf->size);
- err |= __put_user(f->sect, &uf->sect);
- err |= __put_user(f->head, &uf->head);
- err |= __put_user(f->track, &uf->track);
- err |= __put_user(f->stretch, &uf->stretch);
- err |= __put_user(f->gap, &uf->gap);
- err |= __put_user(f->rate, &uf->rate);
- err |= __put_user(f->spec1, &uf->spec1);
- err |= __put_user(f->fmt_gap, &uf->fmt_gap);
- err |= __put_user((u64)f->name, (compat_caddr_t __user *)&uf->name);
- break;
- }
- case FDGETDRVPRM32:
- {
- struct floppy_drive_params32 __user *uf;
- struct floppy_drive_params *f = karg;
-
- uf = compat_ptr(arg);
- err = __put_user(f->cmos, &uf->cmos);
- err |= __put_user(f->max_dtr, &uf->max_dtr);
- err |= __put_user(f->hlt, &uf->hlt);
- err |= __put_user(f->hut, &uf->hut);
- err |= __put_user(f->srt, &uf->srt);
- err |= __put_user(f->spinup, &uf->spinup);
- err |= __put_user(f->spindown, &uf->spindown);
- err |= __put_user(f->spindown_offset, &uf->spindown_offset);
- err |= __put_user(f->select_delay, &uf->select_delay);
- err |= __put_user(f->rps, &uf->rps);
- err |= __put_user(f->tracks, &uf->tracks);
- err |= __put_user(f->timeout, &uf->timeout);
- err |= __put_user(f->interleave_sect, &uf->interleave_sect);
- err |= __copy_to_user(&uf->max_errors, &f->max_errors, sizeof(f->max_errors));
- err |= __put_user(f->flags, &uf->flags);
- err |= __put_user(f->read_track, &uf->read_track);
- err |= __copy_to_user(uf->autodetect, f->autodetect, sizeof(f->autodetect));
- err |= __put_user(f->checkfreq, &uf->checkfreq);
- err |= __put_user(f->native_format, &uf->native_format);
- break;
- }
- case FDGETDRVSTAT32:
- case FDPOLLDRVSTAT32:
- {
- struct floppy_drive_struct32 __user *uf;
- struct floppy_drive_struct *f = karg;
-
- uf = compat_ptr(arg);
- err = __put_user(f->flags, &uf->flags);
- err |= __put_user(f->spinup_date, &uf->spinup_date);
- err |= __put_user(f->select_date, &uf->select_date);
- err |= __put_user(f->first_read_date, &uf->first_read_date);
- err |= __put_user(f->probed_format, &uf->probed_format);
- err |= __put_user(f->track, &uf->track);
- err |= __put_user(f->maxblock, &uf->maxblock);
- err |= __put_user(f->maxtrack, &uf->maxtrack);
- err |= __put_user(f->generation, &uf->generation);
- err |= __put_user(f->keep_data, &uf->keep_data);
- err |= __put_user(f->fd_ref, &uf->fd_ref);
- err |= __put_user(f->fd_device, &uf->fd_device);
- err |= __put_user(f->last_checked, &uf->last_checked);
- err |= __put_user((u64)f->dmabuf, &uf->dmabuf);
- err |= __put_user((u64)f->bufblocks, &uf->bufblocks);
- break;
- }
- case FDGETFDCSTAT32:
- {
- struct floppy_fdc_state32 __user *uf;
- struct floppy_fdc_state *f = karg;
-
- uf = compat_ptr(arg);
- err = __put_user(f->spec1, &uf->spec1);
- err |= __put_user(f->spec2, &uf->spec2);
- err |= __put_user(f->dtr, &uf->dtr);
- err |= __put_user(f->version, &uf->version);
- err |= __put_user(f->dor, &uf->dor);
- err |= __put_user(f->address, &uf->address);
- err |= __copy_to_user((char __user *)&uf->address + sizeof(uf->address),
- (char *)&f->address + sizeof(f->address), sizeof(int));
- err |= __put_user(f->driver_version, &uf->driver_version);
- err |= __copy_to_user(uf->track, f->track, sizeof(f->track));
- break;
- }
- case FDWERRORGET32:
- {
- struct floppy_write_errors32 __user *uf;
- struct floppy_write_errors *f = karg;
-
- uf = compat_ptr(arg);
- err = __put_user(f->write_errors, &uf->write_errors);
- err |= __put_user(f->first_error_sector, &uf->first_error_sector);
- err |= __put_user(f->first_error_generation, &uf->first_error_generation);
- err |= __put_user(f->last_error_sector, &uf->last_error_sector);
- err |= __put_user(f->last_error_generation, &uf->last_error_generation);
- err |= __put_user(f->badness, &uf->badness);
- break;
- }
- default:
- break;
- }
- if (err)
- err = -EFAULT;
-
-out:
- kfree(karg);
- return err;
-}
-#endif
-
struct mtd_oob_buf32 {
u_int32_t start;
u_int32_t length;
@@ -2307,8 +1769,10 @@ static int do_wireless_ioctl(unsigned int fd, unsigned int cmd, unsigned long ar
struct iwreq __user *iwr_u;
struct iw_point __user *iwp;
struct compat_iw_point __user *iwp_u;
- compat_caddr_t pointer;
+ compat_caddr_t pointer_u;
+ void __user *pointer;
__u16 length, flags;
+ int ret;
iwr_u = compat_ptr(arg);
iwp_u = (struct compat_iw_point __user *) &iwr_u->u.data;
@@ -2326,17 +1790,29 @@ static int do_wireless_ioctl(unsigned int fd, unsigned int cmd, unsigned long ar
sizeof(iwr->ifr_ifrn.ifrn_name)))
return -EFAULT;
- if (__get_user(pointer, &iwp_u->pointer) ||
+ if (__get_user(pointer_u, &iwp_u->pointer) ||
__get_user(length, &iwp_u->length) ||
__get_user(flags, &iwp_u->flags))
return -EFAULT;
- if (__put_user(compat_ptr(pointer), &iwp->pointer) ||
+ if (__put_user(compat_ptr(pointer_u), &iwp->pointer) ||
__put_user(length, &iwp->length) ||
__put_user(flags, &iwp->flags))
return -EFAULT;
- return sys_ioctl(fd, cmd, (unsigned long) iwr);
+ ret = sys_ioctl(fd, cmd, (unsigned long) iwr);
+
+ if (__get_user(pointer, &iwp->pointer) ||
+ __get_user(length, &iwp->length) ||
+ __get_user(flags, &iwp->flags))
+ return -EFAULT;
+
+ if (__put_user(ptr_to_compat(pointer), &iwp_u->pointer) ||
+ __put_user(length, &iwp_u->length) ||
+ __put_user(flags, &iwp_u->flags))
+ return -EFAULT;
+
+ return ret;
}
/* Since old style bridge ioctl's endup using SIOCDEVPRIVATE
@@ -2488,60 +1964,6 @@ COMPATIBLE_IOCTL(FIONREAD) /* This is also TIOCINQ */
/* 0x00 */
COMPATIBLE_IOCTL(FIBMAP)
COMPATIBLE_IOCTL(FIGETBSZ)
-/* 0x03 -- HD/IDE ioctl's used by hdparm and friends.
- * Some need translations, these do not.
- */
-COMPATIBLE_IOCTL(HDIO_GET_IDENTITY)
-COMPATIBLE_IOCTL(HDIO_DRIVE_TASK)
-COMPATIBLE_IOCTL(HDIO_DRIVE_CMD)
-ULONG_IOCTL(HDIO_SET_MULTCOUNT)
-ULONG_IOCTL(HDIO_SET_UNMASKINTR)
-ULONG_IOCTL(HDIO_SET_KEEPSETTINGS)
-ULONG_IOCTL(HDIO_SET_32BIT)
-ULONG_IOCTL(HDIO_SET_NOWERR)
-ULONG_IOCTL(HDIO_SET_DMA)
-ULONG_IOCTL(HDIO_SET_PIO_MODE)
-ULONG_IOCTL(HDIO_SET_NICE)
-ULONG_IOCTL(HDIO_SET_WCACHE)
-ULONG_IOCTL(HDIO_SET_ACOUSTIC)
-ULONG_IOCTL(HDIO_SET_BUSSTATE)
-ULONG_IOCTL(HDIO_SET_ADDRESS)
-COMPATIBLE_IOCTL(HDIO_SCAN_HWIF)
-/* 0x330 is reserved -- it used to be HDIO_GETGEO_BIG */
-COMPATIBLE_IOCTL(0x330)
-/* 0x02 -- Floppy ioctls */
-COMPATIBLE_IOCTL(FDMSGON)
-COMPATIBLE_IOCTL(FDMSGOFF)
-COMPATIBLE_IOCTL(FDSETEMSGTRESH)
-COMPATIBLE_IOCTL(FDFLUSH)
-COMPATIBLE_IOCTL(FDWERRORCLR)
-COMPATIBLE_IOCTL(FDSETMAXERRS)
-COMPATIBLE_IOCTL(FDGETMAXERRS)
-COMPATIBLE_IOCTL(FDGETDRVTYP)
-COMPATIBLE_IOCTL(FDEJECT)
-COMPATIBLE_IOCTL(FDCLRPRM)
-COMPATIBLE_IOCTL(FDFMTBEG)
-COMPATIBLE_IOCTL(FDFMTEND)
-COMPATIBLE_IOCTL(FDRESET)
-COMPATIBLE_IOCTL(FDTWADDLE)
-COMPATIBLE_IOCTL(FDFMTTRK)
-COMPATIBLE_IOCTL(FDRAWCMD)
-/* 0x12 */
-#ifdef CONFIG_BLOCK
-COMPATIBLE_IOCTL(BLKRASET)
-COMPATIBLE_IOCTL(BLKROSET)
-COMPATIBLE_IOCTL(BLKROGET)
-COMPATIBLE_IOCTL(BLKRRPART)
-COMPATIBLE_IOCTL(BLKFLSBUF)
-COMPATIBLE_IOCTL(BLKSECTSET)
-COMPATIBLE_IOCTL(BLKSSZGET)
-COMPATIBLE_IOCTL(BLKTRACESTART)
-COMPATIBLE_IOCTL(BLKTRACESTOP)
-COMPATIBLE_IOCTL(BLKTRACESETUP)
-COMPATIBLE_IOCTL(BLKTRACETEARDOWN)
-ULONG_IOCTL(BLKRASET)
-ULONG_IOCTL(BLKFRASET)
-#endif
/* RAID */
COMPATIBLE_IOCTL(RAID_VERSION)
COMPATIBLE_IOCTL(GET_ARRAY_INFO)
@@ -2789,50 +2211,6 @@ COMPATIBLE_IOCTL(PPGETMODE)
COMPATIBLE_IOCTL(PPGETPHASE)
COMPATIBLE_IOCTL(PPGETFLAGS)
COMPATIBLE_IOCTL(PPSETFLAGS)
-/* CDROM stuff */
-COMPATIBLE_IOCTL(CDROMPAUSE)
-COMPATIBLE_IOCTL(CDROMRESUME)
-COMPATIBLE_IOCTL(CDROMPLAYMSF)
-COMPATIBLE_IOCTL(CDROMPLAYTRKIND)
-COMPATIBLE_IOCTL(CDROMREADTOCHDR)
-COMPATIBLE_IOCTL(CDROMREADTOCENTRY)
-COMPATIBLE_IOCTL(CDROMSTOP)
-COMPATIBLE_IOCTL(CDROMSTART)
-COMPATIBLE_IOCTL(CDROMEJECT)
-COMPATIBLE_IOCTL(CDROMVOLCTRL)
-COMPATIBLE_IOCTL(CDROMSUBCHNL)
-ULONG_IOCTL(CDROMEJECT_SW)
-COMPATIBLE_IOCTL(CDROMMULTISESSION)
-COMPATIBLE_IOCTL(CDROM_GET_MCN)
-COMPATIBLE_IOCTL(CDROMRESET)
-COMPATIBLE_IOCTL(CDROMVOLREAD)
-COMPATIBLE_IOCTL(CDROMSEEK)
-COMPATIBLE_IOCTL(CDROMPLAYBLK)
-COMPATIBLE_IOCTL(CDROMCLOSETRAY)
-ULONG_IOCTL(CDROM_SET_OPTIONS)
-ULONG_IOCTL(CDROM_CLEAR_OPTIONS)
-ULONG_IOCTL(CDROM_SELECT_SPEED)
-ULONG_IOCTL(CDROM_SELECT_DISC)
-ULONG_IOCTL(CDROM_MEDIA_CHANGED)
-ULONG_IOCTL(CDROM_DRIVE_STATUS)
-COMPATIBLE_IOCTL(CDROM_DISC_STATUS)
-COMPATIBLE_IOCTL(CDROM_CHANGER_NSLOTS)
-ULONG_IOCTL(CDROM_LOCKDOOR)
-ULONG_IOCTL(CDROM_DEBUG)
-COMPATIBLE_IOCTL(CDROM_GET_CAPABILITY)
-/* Ignore cdrom.h about these next 5 ioctls, they absolutely do
- * not take a struct cdrom_read, instead they take a struct cdrom_msf
- * which is compatible.
- */
-COMPATIBLE_IOCTL(CDROMREADMODE2)
-COMPATIBLE_IOCTL(CDROMREADMODE1)
-COMPATIBLE_IOCTL(CDROMREADRAW)
-COMPATIBLE_IOCTL(CDROMREADCOOKED)
-COMPATIBLE_IOCTL(CDROMREADALL)
-/* DVD ioctls */
-COMPATIBLE_IOCTL(DVD_READ_STRUCT)
-COMPATIBLE_IOCTL(DVD_WRITE_STRUCT)
-COMPATIBLE_IOCTL(DVD_AUTH)
/* pktcdvd */
COMPATIBLE_IOCTL(PACKET_CTRL_CMD)
/* Big A */
@@ -3157,12 +2535,9 @@ COMPATIBLE_IOCTL(SIOCSIWSENS)
COMPATIBLE_IOCTL(SIOCGIWSENS)
COMPATIBLE_IOCTL(SIOCSIWRANGE)
COMPATIBLE_IOCTL(SIOCSIWPRIV)
-COMPATIBLE_IOCTL(SIOCGIWPRIV)
COMPATIBLE_IOCTL(SIOCSIWSTATS)
-COMPATIBLE_IOCTL(SIOCGIWSTATS)
COMPATIBLE_IOCTL(SIOCSIWAP)
COMPATIBLE_IOCTL(SIOCGIWAP)
-COMPATIBLE_IOCTL(SIOCSIWSCAN)
COMPATIBLE_IOCTL(SIOCSIWRATE)
COMPATIBLE_IOCTL(SIOCGIWRATE)
COMPATIBLE_IOCTL(SIOCSIWRTS)
@@ -3175,6 +2550,8 @@ COMPATIBLE_IOCTL(SIOCSIWRETRY)
COMPATIBLE_IOCTL(SIOCGIWRETRY)
COMPATIBLE_IOCTL(SIOCSIWPOWER)
COMPATIBLE_IOCTL(SIOCGIWPOWER)
+COMPATIBLE_IOCTL(SIOCSIWAUTH)
+COMPATIBLE_IOCTL(SIOCGIWAUTH)
/* hiddev */
COMPATIBLE_IOCTL(HIDIOCGVERSION)
COMPATIBLE_IOCTL(HIDIOCAPPLICATION)
@@ -3319,33 +2696,6 @@ HANDLE_IOCTL(SIOCGSTAMP, do_siocgstamp)
HANDLE_IOCTL(SIOCGSTAMPNS, do_siocgstampns)
#endif
#ifdef CONFIG_BLOCK
-HANDLE_IOCTL(HDIO_GETGEO, hdio_getgeo)
-HANDLE_IOCTL(BLKRAGET, w_long)
-HANDLE_IOCTL(BLKGETSIZE, w_long)
-HANDLE_IOCTL(0x1260, broken_blkgetsize)
-HANDLE_IOCTL(BLKFRAGET, w_long)
-HANDLE_IOCTL(BLKSECTGET, w_long)
-HANDLE_IOCTL(BLKPG, blkpg_ioctl_trans)
-HANDLE_IOCTL(HDIO_GET_UNMASKINTR, hdio_ioctl_trans)
-HANDLE_IOCTL(HDIO_GET_MULTCOUNT, hdio_ioctl_trans)
-HANDLE_IOCTL(HDIO_GET_KEEPSETTINGS, hdio_ioctl_trans)
-HANDLE_IOCTL(HDIO_GET_32BIT, hdio_ioctl_trans)
-HANDLE_IOCTL(HDIO_GET_NOWERR, hdio_ioctl_trans)
-HANDLE_IOCTL(HDIO_GET_DMA, hdio_ioctl_trans)
-HANDLE_IOCTL(HDIO_GET_NICE, hdio_ioctl_trans)
-HANDLE_IOCTL(HDIO_GET_WCACHE, hdio_ioctl_trans)
-HANDLE_IOCTL(HDIO_GET_ACOUSTIC, hdio_ioctl_trans)
-HANDLE_IOCTL(HDIO_GET_ADDRESS, hdio_ioctl_trans)
-HANDLE_IOCTL(HDIO_GET_BUSSTATE, hdio_ioctl_trans)
-HANDLE_IOCTL(FDSETPRM32, fd_ioctl_trans)
-HANDLE_IOCTL(FDDEFPRM32, fd_ioctl_trans)
-HANDLE_IOCTL(FDGETPRM32, fd_ioctl_trans)
-HANDLE_IOCTL(FDSETDRVPRM32, fd_ioctl_trans)
-HANDLE_IOCTL(FDGETDRVPRM32, fd_ioctl_trans)
-HANDLE_IOCTL(FDGETDRVSTAT32, fd_ioctl_trans)
-HANDLE_IOCTL(FDPOLLDRVSTAT32, fd_ioctl_trans)
-HANDLE_IOCTL(FDGETFDCSTAT32, fd_ioctl_trans)
-HANDLE_IOCTL(FDWERRORGET32, fd_ioctl_trans)
HANDLE_IOCTL(SG_IO,sg_ioctl_trans)
HANDLE_IOCTL(SG_GET_REQUEST_TABLE, sg_grt_trans)
#endif
@@ -3356,8 +2706,6 @@ HANDLE_IOCTL(PPPIOCSACTIVE32, ppp_sock_fprog_ioctl_trans)
#ifdef CONFIG_BLOCK
HANDLE_IOCTL(MTIOCGET32, mt_ioctl_trans)
HANDLE_IOCTL(MTIOCPOS32, mt_ioctl_trans)
-HANDLE_IOCTL(CDROMREADAUDIO, cdrom_ioctl_trans)
-HANDLE_IOCTL(CDROM_SEND_PACKET, cdrom_ioctl_trans)
#endif
#define AUTOFS_IOC_SETTIMEOUT32 _IOWR(0x93,0x64,unsigned int)
HANDLE_IOCTL(AUTOFS_IOC_SETTIMEOUT32, ioc_settimeout)
@@ -3398,9 +2746,6 @@ HANDLE_IOCTL(SONET_GETFRAMING, do_atm_ioctl)
HANDLE_IOCTL(SONET_GETFRSENSE, do_atm_ioctl)
/* block stuff */
#ifdef CONFIG_BLOCK
-HANDLE_IOCTL(BLKBSZGET_32, do_blkbszget)
-HANDLE_IOCTL(BLKBSZSET_32, do_blkbszset)
-HANDLE_IOCTL(BLKGETSIZE64_32, do_blkgetsize64)
/* Raw devices */
HANDLE_IOCTL(RAW_SETBIND, raw_ioctl)
HANDLE_IOCTL(RAW_GETBIND, raw_ioctl)
@@ -3493,6 +2838,22 @@ IGNORE_IOCTL(VFAT_IOCTL_READDIR_SHORT32)
/* loop */
IGNORE_IOCTL(LOOP_CLR_FD)
+
+#ifdef CONFIG_SPARC
+/* Sparc framebuffers, handled in sbusfb_compat_ioctl() */
+IGNORE_IOCTL(FBIOGTYPE)
+IGNORE_IOCTL(FBIOSATTR)
+IGNORE_IOCTL(FBIOGATTR)
+IGNORE_IOCTL(FBIOSVIDEO)
+IGNORE_IOCTL(FBIOGVIDEO)
+IGNORE_IOCTL(FBIOSCURPOS)
+IGNORE_IOCTL(FBIOGCURPOS)
+IGNORE_IOCTL(FBIOGCURMAX)
+IGNORE_IOCTL(FBIOPUTCMAP32)
+IGNORE_IOCTL(FBIOGETCMAP32)
+IGNORE_IOCTL(FBIOSCURSOR32)
+IGNORE_IOCTL(FBIOGCURSOR32)
+#endif
};
#define IOCTL_HASHSIZE 256
diff --git a/fs/configfs/mount.c b/fs/configfs/mount.c
index b00d962..871b0cb6 100644
--- a/fs/configfs/mount.c
+++ b/fs/configfs/mount.c
@@ -136,7 +136,7 @@ static int __init configfs_init(void)
configfs_dir_cachep = kmem_cache_create("configfs_dir_cache",
sizeof(struct configfs_dirent),
- 0, 0, NULL, NULL);
+ 0, 0, NULL);
if (!configfs_dir_cachep)
goto out;
diff --git a/fs/dcache.c b/fs/dcache.c
index cb9d050..678d39de 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -2165,10 +2165,10 @@ void __init vfs_caches_init(unsigned long mempages)
mempages -= reserve;
names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
+ SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0,
- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
+ SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
dcache_init(mempages);
inode_init(mempages);
diff --git a/fs/dcookies.c b/fs/dcookies.c
index 21af162..792cbf5 100644
--- a/fs/dcookies.c
+++ b/fs/dcookies.c
@@ -20,6 +20,7 @@
#include <linux/capability.h>
#include <linux/dcache.h>
#include <linux/mm.h>
+#include <linux/err.h>
#include <linux/errno.h>
#include <linux/dcookies.h>
#include <linux/mutex.h>
@@ -205,7 +206,7 @@ static int dcookie_init(void)
dcookie_cache = kmem_cache_create("dcookie_cache",
sizeof(struct dcookie_struct),
- 0, 0, NULL, NULL);
+ 0, 0, NULL);
if (!dcookie_cache)
goto out;
diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c
index 2e124e0..a9b99c0 100644
--- a/fs/debugfs/file.c
+++ b/fs/debugfs/file.c
@@ -221,6 +221,42 @@ struct dentry *debugfs_create_u64(const char *name, mode_t mode,
}
EXPORT_SYMBOL_GPL(debugfs_create_u64);
+DEFINE_SIMPLE_ATTRIBUTE(fops_x8, debugfs_u8_get, debugfs_u8_set, "0x%02llx\n");
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_x16, debugfs_u16_get, debugfs_u16_set, "0x%04llx\n");
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_x32, debugfs_u32_get, debugfs_u32_set, "0x%08llx\n");
+
+/**
+ * debugfs_create_x8 - create a debugfs file that is used to read and write an unsigned 8-bit value
+ * debugfs_create_x16 - create a debugfs file that is used to read and write an unsigned 16-bit value
+ * debugfs_create_x32 - create a debugfs file that is used to read and write an unsigned 32-bit value
+ *
+ * These functions are exactly the same as the above functions, (but use a hex
+ * output for the decimal challenged) for details look at the above unsigned
+ * decimal functions.
+ */
+struct dentry *debugfs_create_x8(const char *name, mode_t mode,
+ struct dentry *parent, u8 *value)
+{
+ return debugfs_create_file(name, mode, parent, value, &fops_x8);
+}
+EXPORT_SYMBOL_GPL(debugfs_create_x8);
+
+struct dentry *debugfs_create_x16(const char *name, mode_t mode,
+ struct dentry *parent, u16 *value)
+{
+ return debugfs_create_file(name, mode, parent, value, &fops_x16);
+}
+EXPORT_SYMBOL_GPL(debugfs_create_x16);
+
+struct dentry *debugfs_create_x32(const char *name, mode_t mode,
+ struct dentry *parent, u32 *value)
+{
+ return debugfs_create_file(name, mode, parent, value, &fops_x32);
+}
+EXPORT_SYMBOL_GPL(debugfs_create_x32);
+
static ssize_t read_file_bool(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index 1d533a2..11be8a3 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -345,11 +345,6 @@ void debugfs_remove(struct dentry *dentry)
switch (dentry->d_inode->i_mode & S_IFMT) {
case S_IFDIR:
ret = simple_rmdir(parent->d_inode, dentry);
- if (ret)
- printk(KERN_ERR
- "DebugFS rmdir on %s failed : "
- "directory not empty.\n",
- dentry->d_name.name);
break;
case S_IFLNK:
kfree(dentry->d_inode->i_private);
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 52bb263..b5928a7 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -264,15 +264,12 @@ static int dio_bio_complete(struct dio *dio, struct bio *bio);
/*
* Asynchronous IO callback.
*/
-static int dio_bio_end_aio(struct bio *bio, unsigned int bytes_done, int error)
+static void dio_bio_end_aio(struct bio *bio, int error)
{
struct dio *dio = bio->bi_private;
unsigned long remaining;
unsigned long flags;
- if (bio->bi_size)
- return 1;
-
/* cleanup the bio */
dio_bio_complete(dio, bio);
@@ -287,8 +284,6 @@ static int dio_bio_end_aio(struct bio *bio, unsigned int bytes_done, int error)
aio_complete(dio->iocb, ret, 0);
kfree(dio);
}
-
- return 0;
}
/*
@@ -298,21 +293,17 @@ static int dio_bio_end_aio(struct bio *bio, unsigned int bytes_done, int error)
* During I/O bi_private points at the dio. After I/O, bi_private is used to
* implement a singly-linked list of completed BIOs, at dio->bio_list.
*/
-static int dio_bio_end_io(struct bio *bio, unsigned int bytes_done, int error)
+static void dio_bio_end_io(struct bio *bio, int error)
{
struct dio *dio = bio->bi_private;
unsigned long flags;
- if (bio->bi_size)
- return 1;
-
spin_lock_irqsave(&dio->bio_lock, flags);
bio->bi_private = dio->bio_list;
dio->bio_list = bio;
if (--dio->refcount == 1 && dio->waiter)
wake_up_process(dio->waiter);
spin_unlock_irqrestore(&dio->bio_lock, flags);
- return 0;
}
static int
@@ -958,35 +949,22 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
ssize_t ret2;
size_t bytes;
- dio->bio = NULL;
dio->inode = inode;
dio->rw = rw;
dio->blkbits = blkbits;
dio->blkfactor = inode->i_blkbits - blkbits;
- dio->start_zero_done = 0;
- dio->size = 0;
dio->block_in_file = offset >> blkbits;
- dio->blocks_available = 0;
- dio->cur_page = NULL;
- dio->boundary = 0;
- dio->reap_counter = 0;
dio->get_block = get_block;
dio->end_io = end_io;
- dio->map_bh.b_private = NULL;
dio->final_block_in_bio = -1;
dio->next_block_for_io = -1;
- dio->page_errors = 0;
- dio->io_error = 0;
- dio->result = 0;
dio->iocb = iocb;
dio->i_size = i_size_read(inode);
spin_lock_init(&dio->bio_lock);
dio->refcount = 1;
- dio->bio_list = NULL;
- dio->waiter = NULL;
/*
* In case of non-aligned buffers, we may need 2 more
@@ -994,8 +972,6 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
*/
if (unlikely(dio->blkfactor))
dio->pages_in_io = 2;
- else
- dio->pages_in_io = 0;
for (seg = 0; seg < nr_segs; seg++) {
user_addr = (unsigned long)iov[seg].iov_base;
@@ -1183,7 +1159,7 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
}
}
- dio = kmalloc(sizeof(*dio), GFP_KERNEL);
+ dio = kzalloc(sizeof(*dio), GFP_KERNEL);
retval = -ENOMEM;
if (!dio)
goto out;
diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h
index 74901e9..d2fc238 100644
--- a/fs/dlm/dlm_internal.h
+++ b/fs/dlm/dlm_internal.h
@@ -491,6 +491,7 @@ struct dlm_ls {
uint64_t ls_recover_seq;
struct dlm_recover *ls_recover_args;
struct rw_semaphore ls_in_recovery; /* block local requests */
+ struct rw_semaphore ls_recv_active; /* block dlm_recv */
struct list_head ls_requestqueue;/* queue remote requests */
struct mutex ls_requestqueue_mutex;
char *ls_recover_buf;
diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
index b455919..3915b8e1 100644
--- a/fs/dlm/lock.c
+++ b/fs/dlm/lock.c
@@ -1670,9 +1670,10 @@ static int can_be_granted(struct dlm_rsb *r, struct dlm_lkb *lkb, int now,
with a deadlk here, we'd have to generate something like grant_lock with
the deadlk error.) */
-/* returns the highest requested mode of all blocked conversions */
+/* Returns the highest requested mode of all blocked conversions; sets
+ cw if there's a blocked conversion to DLM_LOCK_CW. */
-static int grant_pending_convert(struct dlm_rsb *r, int high)
+static int grant_pending_convert(struct dlm_rsb *r, int high, int *cw)
{
struct dlm_lkb *lkb, *s;
int hi, demoted, quit, grant_restart, demote_restart;
@@ -1709,6 +1710,9 @@ static int grant_pending_convert(struct dlm_rsb *r, int high)
}
hi = max_t(int, lkb->lkb_rqmode, hi);
+
+ if (cw && lkb->lkb_rqmode == DLM_LOCK_CW)
+ *cw = 1;
}
if (grant_restart)
@@ -1721,29 +1725,52 @@ static int grant_pending_convert(struct dlm_rsb *r, int high)
return max_t(int, high, hi);
}
-static int grant_pending_wait(struct dlm_rsb *r, int high)
+static int grant_pending_wait(struct dlm_rsb *r, int high, int *cw)
{
struct dlm_lkb *lkb, *s;
list_for_each_entry_safe(lkb, s, &r->res_waitqueue, lkb_statequeue) {
if (can_be_granted(r, lkb, 0, NULL))
grant_lock_pending(r, lkb);
- else
+ else {
high = max_t(int, lkb->lkb_rqmode, high);
+ if (lkb->lkb_rqmode == DLM_LOCK_CW)
+ *cw = 1;
+ }
}
return high;
}
+/* cw of 1 means there's a lock with a rqmode of DLM_LOCK_CW that's blocked
+ on either the convert or waiting queue.
+ high is the largest rqmode of all locks blocked on the convert or
+ waiting queue. */
+
+static int lock_requires_bast(struct dlm_lkb *gr, int high, int cw)
+{
+ if (gr->lkb_grmode == DLM_LOCK_PR && cw) {
+ if (gr->lkb_highbast < DLM_LOCK_EX)
+ return 1;
+ return 0;
+ }
+
+ if (gr->lkb_highbast < high &&
+ !__dlm_compat_matrix[gr->lkb_grmode+1][high+1])
+ return 1;
+ return 0;
+}
+
static void grant_pending_locks(struct dlm_rsb *r)
{
struct dlm_lkb *lkb, *s;
int high = DLM_LOCK_IV;
+ int cw = 0;
DLM_ASSERT(is_master(r), dlm_dump_rsb(r););
- high = grant_pending_convert(r, high);
- high = grant_pending_wait(r, high);
+ high = grant_pending_convert(r, high, &cw);
+ high = grant_pending_wait(r, high, &cw);
if (high == DLM_LOCK_IV)
return;
@@ -1751,27 +1778,41 @@ static void grant_pending_locks(struct dlm_rsb *r)
/*
* If there are locks left on the wait/convert queue then send blocking
* ASTs to granted locks based on the largest requested mode (high)
- * found above. FIXME: highbast < high comparison not valid for PR/CW.
+ * found above.
*/
list_for_each_entry_safe(lkb, s, &r->res_grantqueue, lkb_statequeue) {
- if (lkb->lkb_bastaddr && (lkb->lkb_highbast < high) &&
- !__dlm_compat_matrix[lkb->lkb_grmode+1][high+1]) {
- queue_bast(r, lkb, high);
+ if (lkb->lkb_bastaddr && lock_requires_bast(lkb, high, cw)) {
+ if (cw && high == DLM_LOCK_PR)
+ queue_bast(r, lkb, DLM_LOCK_CW);
+ else
+ queue_bast(r, lkb, high);
lkb->lkb_highbast = high;
}
}
}
+static int modes_require_bast(struct dlm_lkb *gr, struct dlm_lkb *rq)
+{
+ if ((gr->lkb_grmode == DLM_LOCK_PR && rq->lkb_rqmode == DLM_LOCK_CW) ||
+ (gr->lkb_grmode == DLM_LOCK_CW && rq->lkb_rqmode == DLM_LOCK_PR)) {
+ if (gr->lkb_highbast < DLM_LOCK_EX)
+ return 1;
+ return 0;
+ }
+
+ if (gr->lkb_highbast < rq->lkb_rqmode && !modes_compat(gr, rq))
+ return 1;
+ return 0;
+}
+
static void send_bast_queue(struct dlm_rsb *r, struct list_head *head,
struct dlm_lkb *lkb)
{
struct dlm_lkb *gr;
list_for_each_entry(gr, head, lkb_statequeue) {
- if (gr->lkb_bastaddr &&
- gr->lkb_highbast < lkb->lkb_rqmode &&
- !modes_compat(gr, lkb)) {
+ if (gr->lkb_bastaddr && modes_require_bast(gr, lkb)) {
queue_bast(r, gr, lkb->lkb_rqmode);
gr->lkb_highbast = lkb->lkb_rqmode;
}
@@ -2235,7 +2276,7 @@ static int do_convert(struct dlm_rsb *r, struct dlm_lkb *lkb)
before we try again to grant this one. */
if (is_demoted(lkb)) {
- grant_pending_convert(r, DLM_LOCK_IV);
+ grant_pending_convert(r, DLM_LOCK_IV, NULL);
if (_can_be_granted(r, lkb, 1)) {
grant_lock(r, lkb);
queue_cast(r, lkb, 0);
@@ -3597,55 +3638,8 @@ static void receive_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms)
dlm_put_lkb(lkb);
}
-int dlm_receive_message(struct dlm_header *hd, int nodeid, int recovery)
+static void _receive_message(struct dlm_ls *ls, struct dlm_message *ms)
{
- struct dlm_message *ms = (struct dlm_message *) hd;
- struct dlm_ls *ls;
- int error = 0;
-
- if (!recovery)
- dlm_message_in(ms);
-
- ls = dlm_find_lockspace_global(hd->h_lockspace);
- if (!ls) {
- log_print("drop message %d from %d for unknown lockspace %d",
- ms->m_type, nodeid, hd->h_lockspace);
- return -EINVAL;
- }
-
- /* recovery may have just ended leaving a bunch of backed-up requests
- in the requestqueue; wait while dlm_recoverd clears them */
-
- if (!recovery)
- dlm_wait_requestqueue(ls);
-
- /* recovery may have just started while there were a bunch of
- in-flight requests -- save them in requestqueue to be processed
- after recovery. we can't let dlm_recvd block on the recovery
- lock. if dlm_recoverd is calling this function to clear the
- requestqueue, it needs to be interrupted (-EINTR) if another
- recovery operation is starting. */
-
- while (1) {
- if (dlm_locking_stopped(ls)) {
- if (recovery) {
- error = -EINTR;
- goto out;
- }
- error = dlm_add_requestqueue(ls, nodeid, hd);
- if (error == -EAGAIN)
- continue;
- else {
- error = -EINTR;
- goto out;
- }
- }
-
- if (dlm_lock_recovery_try(ls))
- break;
- schedule();
- }
-
switch (ms->m_type) {
/* messages sent to a master node */
@@ -3720,17 +3714,90 @@ int dlm_receive_message(struct dlm_header *hd, int nodeid, int recovery)
log_error(ls, "unknown message type %d", ms->m_type);
}
- dlm_unlock_recovery(ls);
- out:
- dlm_put_lockspace(ls);
dlm_astd_wake();
- return error;
}
+/* If the lockspace is in recovery mode (locking stopped), then normal
+ messages are saved on the requestqueue for processing after recovery is
+ done. When not in recovery mode, we wait for dlm_recoverd to drain saved
+ messages off the requestqueue before we process new ones. This occurs right
+ after recovery completes when we transition from saving all messages on
+ requestqueue, to processing all the saved messages, to processing new
+ messages as they arrive. */
-/*
- * Recovery related
- */
+static void dlm_receive_message(struct dlm_ls *ls, struct dlm_message *ms,
+ int nodeid)
+{
+ if (dlm_locking_stopped(ls)) {
+ dlm_add_requestqueue(ls, nodeid, (struct dlm_header *) ms);
+ } else {
+ dlm_wait_requestqueue(ls);
+ _receive_message(ls, ms);
+ }
+}
+
+/* This is called by dlm_recoverd to process messages that were saved on
+ the requestqueue. */
+
+void dlm_receive_message_saved(struct dlm_ls *ls, struct dlm_message *ms)
+{
+ _receive_message(ls, ms);
+}
+
+/* This is called by the midcomms layer when something is received for
+ the lockspace. It could be either a MSG (normal message sent as part of
+ standard locking activity) or an RCOM (recovery message sent as part of
+ lockspace recovery). */
+
+void dlm_receive_buffer(struct dlm_header *hd, int nodeid)
+{
+ struct dlm_message *ms = (struct dlm_message *) hd;
+ struct dlm_rcom *rc = (struct dlm_rcom *) hd;
+ struct dlm_ls *ls;
+ int type = 0;
+
+ switch (hd->h_cmd) {
+ case DLM_MSG:
+ dlm_message_in(ms);
+ type = ms->m_type;
+ break;
+ case DLM_RCOM:
+ dlm_rcom_in(rc);
+ type = rc->rc_type;
+ break;
+ default:
+ log_print("invalid h_cmd %d from %u", hd->h_cmd, nodeid);
+ return;
+ }
+
+ if (hd->h_nodeid != nodeid) {
+ log_print("invalid h_nodeid %d from %d lockspace %x",
+ hd->h_nodeid, nodeid, hd->h_lockspace);
+ return;
+ }
+
+ ls = dlm_find_lockspace_global(hd->h_lockspace);
+ if (!ls) {
+ log_print("invalid h_lockspace %x from %d cmd %d type %d",
+ hd->h_lockspace, nodeid, hd->h_cmd, type);
+
+ if (hd->h_cmd == DLM_RCOM && type == DLM_RCOM_STATUS)
+ dlm_send_ls_not_ready(nodeid, rc);
+ return;
+ }
+
+ /* this rwsem allows dlm_ls_stop() to wait for all dlm_recv threads to
+ be inactive (in this ls) before transitioning to recovery mode */
+
+ down_read(&ls->ls_recv_active);
+ if (hd->h_cmd == DLM_MSG)
+ dlm_receive_message(ls, ms, nodeid);
+ else
+ dlm_receive_rcom(ls, rc, nodeid);
+ up_read(&ls->ls_recv_active);
+
+ dlm_put_lockspace(ls);
+}
static void recover_convert_waiter(struct dlm_ls *ls, struct dlm_lkb *lkb)
{
@@ -4388,7 +4455,8 @@ int dlm_user_unlock(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
if (lvb_in && ua->lksb.sb_lvbptr)
memcpy(ua->lksb.sb_lvbptr, lvb_in, DLM_USER_LVB_LEN);
- ua->castparam = ua_tmp->castparam;
+ if (ua_tmp->castparam)
+ ua->castparam = ua_tmp->castparam;
ua->user_lksb = ua_tmp->user_lksb;
error = set_unlock_args(flags, ua, &args);
@@ -4433,7 +4501,8 @@ int dlm_user_cancel(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
goto out;
ua = (struct dlm_user_args *)lkb->lkb_astparam;
- ua->castparam = ua_tmp->castparam;
+ if (ua_tmp->castparam)
+ ua->castparam = ua_tmp->castparam;
ua->user_lksb = ua_tmp->user_lksb;
error = set_unlock_args(flags, ua, &args);
diff --git a/fs/dlm/lock.h b/fs/dlm/lock.h
index 1720313..ada0468 100644
--- a/fs/dlm/lock.h
+++ b/fs/dlm/lock.h
@@ -16,7 +16,8 @@
void dlm_print_rsb(struct dlm_rsb *r);
void dlm_dump_rsb(struct dlm_rsb *r);
void dlm_print_lkb(struct dlm_lkb *lkb);
-int dlm_receive_message(struct dlm_header *hd, int nodeid, int recovery);
+void dlm_receive_message_saved(struct dlm_ls *ls, struct dlm_message *ms);
+void dlm_receive_buffer(struct dlm_header *hd, int nodeid);
int dlm_modes_compat(int mode1, int mode2);
int dlm_find_rsb(struct dlm_ls *ls, char *name, int namelen,
unsigned int flags, struct dlm_rsb **r_ret);
diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
index 1dc7210..6353a83 100644
--- a/fs/dlm/lockspace.c
+++ b/fs/dlm/lockspace.c
@@ -167,7 +167,6 @@ static struct kobj_type dlm_ktype = {
};
static struct kset dlm_kset = {
- .kobj = {.name = "dlm",},
.ktype = &dlm_ktype,
};
@@ -228,6 +227,7 @@ int dlm_lockspace_init(void)
INIT_LIST_HEAD(&lslist);
spin_lock_init(&lslist_lock);
+ kobject_set_name(&dlm_kset.kobj, "dlm");
kobj_set_kset_s(&dlm_kset, kernel_subsys);
error = kset_register(&dlm_kset);
if (error)
@@ -519,6 +519,7 @@ static int new_lockspace(char *name, int namelen, void **lockspace,
ls->ls_recover_seq = 0;
ls->ls_recover_args = NULL;
init_rwsem(&ls->ls_in_recovery);
+ init_rwsem(&ls->ls_recv_active);
INIT_LIST_HEAD(&ls->ls_requestqueue);
mutex_init(&ls->ls_requestqueue_mutex);
mutex_init(&ls->ls_clear_proc_locks);
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
index 0553a61..58bf3f5 100644
--- a/fs/dlm/lowcomms.c
+++ b/fs/dlm/lowcomms.c
@@ -313,6 +313,7 @@ static void make_sockaddr(struct sockaddr_storage *saddr, uint16_t port,
in6_addr->sin6_port = cpu_to_be16(port);
*addr_len = sizeof(struct sockaddr_in6);
}
+ memset((char *)saddr + *addr_len, 0, sizeof(struct sockaddr_storage) - *addr_len);
}
/* Close a remote connection and tidy up */
@@ -332,6 +333,7 @@ static void close_connection(struct connection *con, bool and_other)
__free_page(con->rx_page);
con->rx_page = NULL;
}
+
con->retries = 0;
mutex_unlock(&con->sock_mutex);
}
@@ -631,7 +633,7 @@ out_resched:
out_close:
mutex_unlock(&con->sock_mutex);
- if (ret != -EAGAIN && !test_bit(CF_IS_OTHERCON, &con->flags)) {
+ if (ret != -EAGAIN) {
close_connection(con, false);
/* Reconnect when there is something to send */
}
@@ -719,6 +721,8 @@ static int tcp_accept_from_sock(struct connection *con)
INIT_WORK(&othercon->swork, process_send_sockets);
INIT_WORK(&othercon->rwork, process_recv_sockets);
set_bit(CF_IS_OTHERCON, &othercon->flags);
+ }
+ if (!othercon->sock) {
newcon->othercon = othercon;
othercon->sock = newsock;
newsock->sk->sk_user_data = othercon;
@@ -1122,8 +1126,6 @@ static int tcp_listen_for_all(void)
log_print("Using TCP for communications");
- set_bit(CF_IS_OTHERCON, &con->flags);
-
sock = tcp_create_listen_sock(con, dlm_local_addr[0]);
if (sock) {
add_sock(sock, con);
@@ -1262,14 +1264,15 @@ static void send_to_sock(struct connection *con)
if (len) {
ret = sendpage(con->sock, e->page, offset, len,
msg_flags);
- if (ret == -EAGAIN || ret == 0)
+ if (ret == -EAGAIN || ret == 0) {
+ cond_resched();
goto out;
+ }
if (ret <= 0)
goto send_error;
- } else {
+ }
/* Don't starve people filling buffers */
cond_resched();
- }
spin_lock(&con->writequeue_lock);
e->offset += ret;
@@ -1407,7 +1410,7 @@ void dlm_lowcomms_stop(void)
for (i = 0; i <= max_nodeid; i++) {
con = __nodeid2con(i, 0);
if (con) {
- con->flags |= 0xFF;
+ con->flags |= 0x0F;
if (con->sock)
con->sock->sk->sk_user_data = NULL;
}
@@ -1423,8 +1426,6 @@ void dlm_lowcomms_stop(void)
con = __nodeid2con(i, 0);
if (con) {
close_connection(con, true);
- if (con->othercon)
- kmem_cache_free(con_cache, con->othercon);
kmem_cache_free(con_cache, con);
}
}
@@ -1449,7 +1450,7 @@ int dlm_lowcomms_start(void)
error = -ENOMEM;
con_cache = kmem_cache_create("dlm_conn", sizeof(struct connection),
__alignof__(struct connection), 0,
- NULL, NULL);
+ NULL);
if (!con_cache)
goto out;
diff --git a/fs/dlm/member.c b/fs/dlm/member.c
index 073599d..e9cdcab 100644
--- a/fs/dlm/member.c
+++ b/fs/dlm/member.c
@@ -18,10 +18,6 @@
#include "rcom.h"
#include "config.h"
-/*
- * Following called by dlm_recoverd thread
- */
-
static void add_ordered_member(struct dlm_ls *ls, struct dlm_member *new)
{
struct dlm_member *memb = NULL;
@@ -56,8 +52,10 @@ static int dlm_add_member(struct dlm_ls *ls, int nodeid)
return -ENOMEM;
w = dlm_node_weight(ls->ls_name, nodeid);
- if (w < 0)
+ if (w < 0) {
+ kfree(memb);
return w;
+ }
memb->nodeid = nodeid;
memb->weight = w;
@@ -248,18 +246,30 @@ int dlm_recover_members(struct dlm_ls *ls, struct dlm_recover *rv, int *neg_out)
return error;
}
-/*
- * Following called from lockspace.c
- */
+/* Userspace guarantees that dlm_ls_stop() has completed on all nodes before
+ dlm_ls_start() is called on any of them to start the new recovery. */
int dlm_ls_stop(struct dlm_ls *ls)
{
int new;
/*
- * A stop cancels any recovery that's in progress (see RECOVERY_STOP,
- * dlm_recovery_stopped()) and prevents any new locks from being
- * processed (see RUNNING, dlm_locking_stopped()).
+ * Prevent dlm_recv from being in the middle of something when we do
+ * the stop. This includes ensuring dlm_recv isn't processing a
+ * recovery message (rcom), while dlm_recoverd is aborting and
+ * resetting things from an in-progress recovery. i.e. we want
+ * dlm_recoverd to abort its recovery without worrying about dlm_recv
+ * processing an rcom at the same time. Stopping dlm_recv also makes
+ * it easy for dlm_receive_message() to check locking stopped and add a
+ * message to the requestqueue without races.
+ */
+
+ down_write(&ls->ls_recv_active);
+
+ /*
+ * Abort any recovery that's in progress (see RECOVERY_STOP,
+ * dlm_recovery_stopped()) and tell any other threads running in the
+ * dlm to quit any processing (see RUNNING, dlm_locking_stopped()).
*/
spin_lock(&ls->ls_recover_lock);
@@ -269,8 +279,14 @@ int dlm_ls_stop(struct dlm_ls *ls)
spin_unlock(&ls->ls_recover_lock);
/*
+ * Let dlm_recv run again, now any normal messages will be saved on the
+ * requestqueue for later.
+ */
+
+ up_write(&ls->ls_recv_active);
+
+ /*
* This in_recovery lock does two things:
- *
* 1) Keeps this function from returning until all threads are out
* of locking routines and locking is truely stopped.
* 2) Keeps any new requests from being processed until it's unlocked
@@ -282,9 +298,8 @@ int dlm_ls_stop(struct dlm_ls *ls)
/*
* The recoverd suspend/resume makes sure that dlm_recoverd (if
- * running) has noticed the clearing of RUNNING above and quit
- * processing the previous recovery. This will be true for all nodes
- * before any nodes start the new recovery.
+ * running) has noticed RECOVERY_STOP above and quit processing the
+ * previous recovery.
*/
dlm_recoverd_suspend(ls);
diff --git a/fs/dlm/memory.c b/fs/dlm/memory.c
index f858fef..ecf0e5c 100644
--- a/fs/dlm/memory.c
+++ b/fs/dlm/memory.c
@@ -23,7 +23,7 @@ int dlm_memory_init(void)
int ret = 0;
lkb_cache = kmem_cache_create("dlm_lkb", sizeof(struct dlm_lkb),
- __alignof__(struct dlm_lkb), 0, NULL, NULL);
+ __alignof__(struct dlm_lkb), 0, NULL);
if (!lkb_cache)
ret = -ENOMEM;
return ret;
@@ -39,9 +39,7 @@ char *allocate_lvb(struct dlm_ls *ls)
{
char *p;
- p = kmalloc(ls->ls_lvblen, GFP_KERNEL);
- if (p)
- memset(p, 0, ls->ls_lvblen);
+ p = kzalloc(ls->ls_lvblen, GFP_KERNEL);
return p;
}
@@ -59,9 +57,7 @@ struct dlm_rsb *allocate_rsb(struct dlm_ls *ls, int namelen)
DLM_ASSERT(namelen <= DLM_RESNAME_MAXLEN,);
- r = kmalloc(sizeof(*r) + namelen, GFP_KERNEL);
- if (r)
- memset(r, 0, sizeof(*r) + namelen);
+ r = kzalloc(sizeof(*r) + namelen, GFP_KERNEL);
return r;
}
@@ -101,9 +97,7 @@ struct dlm_direntry *allocate_direntry(struct dlm_ls *ls, int namelen)
DLM_ASSERT(namelen <= DLM_RESNAME_MAXLEN,
printk("namelen = %d\n", namelen););
- de = kmalloc(sizeof(*de) + namelen, GFP_KERNEL);
- if (de)
- memset(de, 0, sizeof(*de) + namelen);
+ de = kzalloc(sizeof(*de) + namelen, GFP_KERNEL);
return de;
}
diff --git a/fs/dlm/midcomms.c b/fs/dlm/midcomms.c
index a5126e0..f8c69dd 100644
--- a/fs/dlm/midcomms.c
+++ b/fs/dlm/midcomms.c
@@ -2,7 +2,7 @@
*******************************************************************************
**
** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
-** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
+** Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
**
** This copyrighted material is made available to anyone wishing to use,
** modify, copy, or redistribute it subject to the terms and conditions
@@ -27,7 +27,6 @@
#include "dlm_internal.h"
#include "lowcomms.h"
#include "config.h"
-#include "rcom.h"
#include "lock.h"
#include "midcomms.h"
@@ -117,19 +116,7 @@ int dlm_process_incoming_buffer(int nodeid, const void *base,
offset &= (limit - 1);
len -= msglen;
- switch (msg->h_cmd) {
- case DLM_MSG:
- dlm_receive_message(msg, nodeid, 0);
- break;
-
- case DLM_RCOM:
- dlm_receive_rcom(msg, nodeid);
- break;
-
- default:
- log_print("unknown msg type %x from %u: %u %u %u %u",
- msg->h_cmd, nodeid, msglen, len, offset, ret);
- }
+ dlm_receive_buffer(msg, nodeid);
}
if (msg != (struct dlm_header *) __tmp)
diff --git a/fs/dlm/rcom.c b/fs/dlm/rcom.c
index e3a1527..ae2fd97f 100644
--- a/fs/dlm/rcom.c
+++ b/fs/dlm/rcom.c
@@ -2,7 +2,7 @@
*******************************************************************************
**
** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
-** Copyright (C) 2005 Red Hat, Inc. All rights reserved.
+** Copyright (C) 2005-2007 Red Hat, Inc. All rights reserved.
**
** This copyrighted material is made available to anyone wishing to use,
** modify, copy, or redistribute it subject to the terms and conditions
@@ -386,8 +386,10 @@ static void receive_rcom_lock_reply(struct dlm_ls *ls, struct dlm_rcom *rc_in)
dlm_recover_process_copy(ls, rc_in);
}
-static int send_ls_not_ready(struct dlm_ls *ls, int nodeid,
- struct dlm_rcom *rc_in)
+/* If the lockspace doesn't exist then still send a status message
+ back; it's possible that it just doesn't have its global_id yet. */
+
+int dlm_send_ls_not_ready(int nodeid, struct dlm_rcom *rc_in)
{
struct dlm_rcom *rc;
struct rcom_config *rf;
@@ -395,7 +397,7 @@ static int send_ls_not_ready(struct dlm_ls *ls, int nodeid,
char *mb;
int mb_len = sizeof(struct dlm_rcom) + sizeof(struct rcom_config);
- mh = dlm_lowcomms_get_buffer(nodeid, mb_len, ls->ls_allocation, &mb);
+ mh = dlm_lowcomms_get_buffer(nodeid, mb_len, GFP_NOFS, &mb);
if (!mh)
return -ENOBUFS;
memset(mb, 0, mb_len);
@@ -447,28 +449,11 @@ static int is_old_reply(struct dlm_ls *ls, struct dlm_rcom *rc)
return rv;
}
-/* Called by dlm_recvd; corresponds to dlm_receive_message() but special
+/* Called by dlm_recv; corresponds to dlm_receive_message() but special
recovery-only comms are sent through here. */
-void dlm_receive_rcom(struct dlm_header *hd, int nodeid)
+void dlm_receive_rcom(struct dlm_ls *ls, struct dlm_rcom *rc, int nodeid)
{
- struct dlm_rcom *rc = (struct dlm_rcom *) hd;
- struct dlm_ls *ls;
-
- dlm_rcom_in(rc);
-
- /* If the lockspace doesn't exist then still send a status message
- back; it's possible that it just doesn't have its global_id yet. */
-
- ls = dlm_find_lockspace_global(hd->h_lockspace);
- if (!ls) {
- log_print("lockspace %x from %d type %x not found",
- hd->h_lockspace, nodeid, rc->rc_type);
- if (rc->rc_type == DLM_RCOM_STATUS)
- send_ls_not_ready(ls, nodeid, rc);
- return;
- }
-
if (dlm_recovery_stopped(ls) && (rc->rc_type != DLM_RCOM_STATUS)) {
log_debug(ls, "ignoring recovery message %x from %d",
rc->rc_type, nodeid);
@@ -478,12 +463,6 @@ void dlm_receive_rcom(struct dlm_header *hd, int nodeid)
if (is_old_reply(ls, rc))
goto out;
- if (nodeid != rc->rc_header.h_nodeid) {
- log_error(ls, "bad rcom nodeid %d from %d",
- rc->rc_header.h_nodeid, nodeid);
- goto out;
- }
-
switch (rc->rc_type) {
case DLM_RCOM_STATUS:
receive_rcom_status(ls, rc);
@@ -521,6 +500,6 @@ void dlm_receive_rcom(struct dlm_header *hd, int nodeid)
DLM_ASSERT(0, printk("rc_type=%x\n", rc->rc_type););
}
out:
- dlm_put_lockspace(ls);
+ return;
}
diff --git a/fs/dlm/rcom.h b/fs/dlm/rcom.h
index d798432..b09abd2 100644
--- a/fs/dlm/rcom.h
+++ b/fs/dlm/rcom.h
@@ -2,7 +2,7 @@
*******************************************************************************
**
** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
-** Copyright (C) 2005 Red Hat, Inc. All rights reserved.
+** Copyright (C) 2005-2007 Red Hat, Inc. All rights reserved.
**
** This copyrighted material is made available to anyone wishing to use,
** modify, copy, or redistribute it subject to the terms and conditions
@@ -18,7 +18,8 @@ int dlm_rcom_status(struct dlm_ls *ls, int nodeid);
int dlm_rcom_names(struct dlm_ls *ls, int nodeid, char *last_name,int last_len);
int dlm_send_rcom_lookup(struct dlm_rsb *r, int dir_nodeid);
int dlm_send_rcom_lock(struct dlm_rsb *r, struct dlm_lkb *lkb);
-void dlm_receive_rcom(struct dlm_header *hd, int nodeid);
+void dlm_receive_rcom(struct dlm_ls *ls, struct dlm_rcom *rc, int nodeid);
+int dlm_send_ls_not_ready(int nodeid, struct dlm_rcom *rc_in);
#endif
diff --git a/fs/dlm/recoverd.c b/fs/dlm/recoverd.c
index 6657599..4b89e20 100644
--- a/fs/dlm/recoverd.c
+++ b/fs/dlm/recoverd.c
@@ -24,19 +24,28 @@
/* If the start for which we're re-enabling locking (seq) has been superseded
- by a newer stop (ls_recover_seq), we need to leave locking disabled. */
+ by a newer stop (ls_recover_seq), we need to leave locking disabled.
+
+ We suspend dlm_recv threads here to avoid the race where dlm_recv a) sees
+ locking stopped and b) adds a message to the requestqueue, but dlm_recoverd
+ enables locking and clears the requestqueue between a and b. */
static int enable_locking(struct dlm_ls *ls, uint64_t seq)
{
int error = -EINTR;
+ down_write(&ls->ls_recv_active);
+
spin_lock(&ls->ls_recover_lock);
if (ls->ls_recover_seq == seq) {
set_bit(LSFL_RUNNING, &ls->ls_flags);
+ /* unblocks processes waiting to enter the dlm */
up_write(&ls->ls_in_recovery);
error = 0;
}
spin_unlock(&ls->ls_recover_lock);
+
+ up_write(&ls->ls_recv_active);
return error;
}
diff --git a/fs/dlm/requestqueue.c b/fs/dlm/requestqueue.c
index 65008d7..0de04f1 100644
--- a/fs/dlm/requestqueue.c
+++ b/fs/dlm/requestqueue.c
@@ -1,7 +1,7 @@
/******************************************************************************
*******************************************************************************
**
-** Copyright (C) 2005 Red Hat, Inc. All rights reserved.
+** Copyright (C) 2005-2007 Red Hat, Inc. All rights reserved.
**
** This copyrighted material is made available to anyone wishing to use,
** modify, copy, or redistribute it subject to the terms and conditions
@@ -20,7 +20,7 @@
struct rq_entry {
struct list_head list;
int nodeid;
- char request[1];
+ char request[0];
};
/*
@@ -30,42 +30,39 @@ struct rq_entry {
* lockspace is enabled on some while still suspended on others.
*/
-int dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, struct dlm_header *hd)
+void dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, struct dlm_header *hd)
{
struct rq_entry *e;
int length = hd->h_length;
- int rv = 0;
e = kmalloc(sizeof(struct rq_entry) + length, GFP_KERNEL);
if (!e) {
- log_print("dlm_add_requestqueue: out of memory\n");
- return 0;
+ log_print("dlm_add_requestqueue: out of memory len %d", length);
+ return;
}
e->nodeid = nodeid;
memcpy(e->request, hd, length);
- /* We need to check dlm_locking_stopped() after taking the mutex to
- avoid a race where dlm_recoverd enables locking and runs
- process_requestqueue between our earlier dlm_locking_stopped check
- and this addition to the requestqueue. */
-
mutex_lock(&ls->ls_requestqueue_mutex);
- if (dlm_locking_stopped(ls))
- list_add_tail(&e->list, &ls->ls_requestqueue);
- else {
- log_debug(ls, "dlm_add_requestqueue skip from %d", nodeid);
- kfree(e);
- rv = -EAGAIN;
- }
+ list_add_tail(&e->list, &ls->ls_requestqueue);
mutex_unlock(&ls->ls_requestqueue_mutex);
- return rv;
}
+/*
+ * Called by dlm_recoverd to process normal messages saved while recovery was
+ * happening. Normal locking has been enabled before this is called. dlm_recv
+ * upon receiving a message, will wait for all saved messages to be drained
+ * here before processing the message it got. If a new dlm_ls_stop() arrives
+ * while we're processing these saved messages, it may block trying to suspend
+ * dlm_recv if dlm_recv is waiting for us in dlm_wait_requestqueue. In that
+ * case, we don't abort since locking_stopped is still 0. If dlm_recv is not
+ * waiting for us, then this processing may be aborted due to locking_stopped.
+ */
+
int dlm_process_requestqueue(struct dlm_ls *ls)
{
struct rq_entry *e;
- struct dlm_header *hd;
int error = 0;
mutex_lock(&ls->ls_requestqueue_mutex);
@@ -79,14 +76,7 @@ int dlm_process_requestqueue(struct dlm_ls *ls)
e = list_entry(ls->ls_requestqueue.next, struct rq_entry, list);
mutex_unlock(&ls->ls_requestqueue_mutex);
- hd = (struct dlm_header *) e->request;
- error = dlm_receive_message(hd, e->nodeid, 1);
-
- if (error == -EINTR) {
- /* entry is left on requestqueue */
- log_debug(ls, "process_requestqueue abort eintr");
- break;
- }
+ dlm_receive_message_saved(ls, (struct dlm_message *)e->request);
mutex_lock(&ls->ls_requestqueue_mutex);
list_del(&e->list);
@@ -106,10 +96,12 @@ int dlm_process_requestqueue(struct dlm_ls *ls)
/*
* After recovery is done, locking is resumed and dlm_recoverd takes all the
- * saved requests and processes them as they would have been by dlm_recvd. At
- * the same time, dlm_recvd will start receiving new requests from remote
- * nodes. We want to delay dlm_recvd processing new requests until
- * dlm_recoverd has finished processing the old saved requests.
+ * saved requests and processes them as they would have been by dlm_recv. At
+ * the same time, dlm_recv will start receiving new requests from remote nodes.
+ * We want to delay dlm_recv processing new requests until dlm_recoverd has
+ * finished processing the old saved requests. We don't check for locking
+ * stopped here because dlm_ls_stop won't stop locking until it's suspended us
+ * (dlm_recv).
*/
void dlm_wait_requestqueue(struct dlm_ls *ls)
@@ -118,8 +110,6 @@ void dlm_wait_requestqueue(struct dlm_ls *ls)
mutex_lock(&ls->ls_requestqueue_mutex);
if (list_empty(&ls->ls_requestqueue))
break;
- if (dlm_locking_stopped(ls))
- break;
mutex_unlock(&ls->ls_requestqueue_mutex);
schedule();
}
diff --git a/fs/dlm/requestqueue.h b/fs/dlm/requestqueue.h
index 6a53ea0..aba34fc 100644
--- a/fs/dlm/requestqueue.h
+++ b/fs/dlm/requestqueue.h
@@ -1,7 +1,7 @@
/******************************************************************************
*******************************************************************************
**
-** Copyright (C) 2005 Red Hat, Inc. All rights reserved.
+** Copyright (C) 2005-2007 Red Hat, Inc. All rights reserved.
**
** This copyrighted material is made available to anyone wishing to use,
** modify, copy, or redistribute it subject to the terms and conditions
@@ -13,7 +13,7 @@
#ifndef __REQUESTQUEUE_DOT_H__
#define __REQUESTQUEUE_DOT_H__
-int dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, struct dlm_header *hd);
+void dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, struct dlm_header *hd);
int dlm_process_requestqueue(struct dlm_ls *ls);
void dlm_wait_requestqueue(struct dlm_ls *ls);
void dlm_purge_requestqueue(struct dlm_ls *ls);
diff --git a/fs/dnotify.c b/fs/dnotify.c
index 936409f..28d01ed 100644
--- a/fs/dnotify.c
+++ b/fs/dnotify.c
@@ -176,7 +176,7 @@ EXPORT_SYMBOL_GPL(dnotify_parent);
static int __init dnotify_init(void)
{
dn_cache = kmem_cache_create("dnotify_cache",
- sizeof(struct dnotify_struct), 0, SLAB_PANIC, NULL, NULL);
+ sizeof(struct dnotify_struct), 0, SLAB_PANIC, NULL);
return 0;
}
diff --git a/fs/dquot.c b/fs/dquot.c
index 7e27315..de9a29f6 100644
--- a/fs/dquot.c
+++ b/fs/dquot.c
@@ -1848,11 +1848,11 @@ static int __init dquot_init(void)
register_sysctl_table(sys_table);
- dquot_cachep = kmem_cache_create("dquot",
+ dquot_cachep = kmem_cache_create("dquot",
sizeof(struct dquot), sizeof(unsigned long) * 4,
(SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD|SLAB_PANIC),
- NULL, NULL);
+ NULL);
order = 0;
dquot_hash = (struct hlist_head *)__get_free_pages(GFP_ATOMIC, order);
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index e77a2ec..131954b 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -353,6 +353,10 @@ static struct dentry *ecryptfs_lookup(struct inode *dir, struct dentry *dentry,
ecryptfs_printk(KERN_DEBUG, "Is a symlink; returning\n");
goto out;
}
+ if (special_file(lower_inode->i_mode)) {
+ ecryptfs_printk(KERN_DEBUG, "Is a special file; returning\n");
+ goto out;
+ }
if (!nd) {
ecryptfs_printk(KERN_DEBUG, "We have a NULL nd, just leave"
"as we *think* we are about to unlink\n");
@@ -902,8 +906,9 @@ static int ecryptfs_setattr(struct dentry *dentry, struct iattr *ia)
mutex_lock(&crypt_stat->cs_mutex);
if (S_ISDIR(dentry->d_inode->i_mode))
crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED);
- else if (!(crypt_stat->flags & ECRYPTFS_POLICY_APPLIED)
- || !(crypt_stat->flags & ECRYPTFS_KEY_VALID)) {
+ else if (S_ISREG(dentry->d_inode->i_mode)
+ && (!(crypt_stat->flags & ECRYPTFS_POLICY_APPLIED)
+ || !(crypt_stat->flags & ECRYPTFS_KEY_VALID))) {
struct vfsmount *lower_mnt;
struct file *lower_file = NULL;
struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
index 02ca6f1..a984972 100644
--- a/fs/ecryptfs/main.c
+++ b/fs/ecryptfs/main.c
@@ -677,7 +677,7 @@ static int ecryptfs_init_kmem_caches(void)
info = &ecryptfs_cache_infos[i];
*(info->cache) = kmem_cache_create(info->name, info->size,
- 0, SLAB_HWCACHE_ALIGN, info->ctor, NULL);
+ 0, SLAB_HWCACHE_ALIGN, info->ctor);
if (!*(info->cache)) {
ecryptfs_free_kmem_caches();
ecryptfs_printk(KERN_WARNING, "%s: "
@@ -813,6 +813,15 @@ out:
return rc;
}
+static void do_sysfs_unregistration(void)
+{
+ sysfs_remove_file(&ecryptfs_subsys.kobj,
+ &sysfs_attr_version.attr);
+ sysfs_remove_file(&ecryptfs_subsys.kobj,
+ &sysfs_attr_version_str.attr);
+ subsystem_unregister(&ecryptfs_subsys);
+}
+
static int __init ecryptfs_init(void)
{
int rc;
@@ -851,6 +860,9 @@ static int __init ecryptfs_init(void)
if (rc) {
ecryptfs_printk(KERN_ERR, "Failure occured while attempting to "
"initialize the eCryptfs netlink socket\n");
+ do_sysfs_unregistration();
+ unregister_filesystem(&ecryptfs_fs_type);
+ ecryptfs_free_kmem_caches();
}
out:
return rc;
@@ -858,11 +870,7 @@ out:
static void __exit ecryptfs_exit(void)
{
- sysfs_remove_file(&ecryptfs_subsys.kobj,
- &sysfs_attr_version.attr);
- sysfs_remove_file(&ecryptfs_subsys.kobj,
- &sysfs_attr_version_str.attr);
- subsystem_unregister(&ecryptfs_subsys);
+ do_sysfs_unregistration();
ecryptfs_release_messaging(ecryptfs_transport);
unregister_filesystem(&ecryptfs_fs_type);
ecryptfs_free_kmem_caches();
diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c
index 7d5a43c..fd3f94d 100644
--- a/fs/ecryptfs/mmap.c
+++ b/fs/ecryptfs/mmap.c
@@ -409,8 +409,7 @@ static int ecryptfs_prepare_write(struct file *file, struct page *page,
if (!PageUptodate(page))
rc = ecryptfs_do_readpage(file, page, page->index);
if (page->index != 0) {
- loff_t end_of_prev_pg_pos =
- (((loff_t)page->index << PAGE_CACHE_SHIFT) - 1);
+ loff_t end_of_prev_pg_pos = page_offset(page) - 1;
if (end_of_prev_pg_pos > i_size_read(page->mapping->host)) {
rc = ecryptfs_truncate(file->f_path.dentry,
@@ -736,7 +735,7 @@ static int ecryptfs_commit_write(struct file *file, struct page *page,
goto out;
}
inode->i_blocks = lower_inode->i_blocks;
- pos = (page->index << PAGE_CACHE_SHIFT) + to;
+ pos = page_offset(page) + to;
if (pos > i_size_read(inode)) {
i_size_write(inode, pos);
ecryptfs_printk(KERN_DEBUG, "Expanded file size to "
@@ -835,7 +834,8 @@ static void ecryptfs_sync_page(struct page *page)
ecryptfs_printk(KERN_DEBUG, "find_lock_page failed\n");
return;
}
- lower_page->mapping->a_ops->sync_page(lower_page);
+ if (lower_page->mapping->a_ops->sync_page)
+ lower_page->mapping->a_ops->sync_page(lower_page);
ecryptfs_printk(KERN_DEBUG, "Unlocking page with index = [0x%.16x]\n",
lower_page->index);
unlock_page(lower_page);
diff --git a/fs/ecryptfs/netlink.c b/fs/ecryptfs/netlink.c
index fe91863..9aa3451 100644
--- a/fs/ecryptfs/netlink.c
+++ b/fs/ecryptfs/netlink.c
@@ -165,22 +165,10 @@ static int ecryptfs_process_nl_quit(struct sk_buff *skb)
* it to its desired netlink context element and wake up the process
* that is waiting for a response.
*/
-static void ecryptfs_receive_nl_message(struct sock *sk, int len)
+static void ecryptfs_receive_nl_message(struct sk_buff *skb)
{
- struct sk_buff *skb;
struct nlmsghdr *nlh;
- int rc = 0; /* skb_recv_datagram requires this */
-receive:
- skb = skb_recv_datagram(sk, 0, 0, &rc);
- if (rc == -EINTR)
- goto receive;
- else if (rc < 0) {
- ecryptfs_printk(KERN_ERR, "Error occurred while "
- "receiving eCryptfs netlink message; "
- "rc = [%d]\n", rc);
- return;
- }
nlh = nlmsg_hdr(skb);
if (!NLMSG_OK(nlh, skb->len)) {
ecryptfs_printk(KERN_ERR, "Received corrupt netlink "
@@ -227,7 +215,7 @@ int ecryptfs_init_netlink(void)
{
int rc;
- ecryptfs_nl_sock = netlink_kernel_create(NETLINK_ECRYPTFS, 0,
+ ecryptfs_nl_sock = netlink_kernel_create(&init_net, NETLINK_ECRYPTFS, 0,
ecryptfs_receive_nl_message,
NULL, THIS_MODULE);
if (!ecryptfs_nl_sock) {
diff --git a/fs/efs/super.c b/fs/efs/super.c
index d360c81..ce4acb8 100644
--- a/fs/efs/super.c
+++ b/fs/efs/super.c
@@ -75,13 +75,13 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
inode_init_once(&ei->vfs_inode);
}
-
+
static int init_inodecache(void)
{
efs_inode_cachep = kmem_cache_create("efs_inode_cache",
sizeof(struct efs_inode_info),
0, SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
- init_once, NULL);
+ init_once);
if (efs_inode_cachep == NULL)
return -ENOMEM;
return 0;
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 0b73cd4..77b9953 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -1324,12 +1324,12 @@ static int __init eventpoll_init(void)
/* Allocates slab cache used to allocate "struct epitem" items */
epi_cache = kmem_cache_create("eventpoll_epi", sizeof(struct epitem),
0, SLAB_HWCACHE_ALIGN|EPI_SLAB_DEBUG|SLAB_PANIC,
- NULL, NULL);
+ NULL);
/* Allocates slab cache used to allocate "struct eppoll_entry" */
pwq_cache = kmem_cache_create("eventpoll_pwq",
sizeof(struct eppoll_entry), 0,
- EPI_SLAB_DEBUG|SLAB_PANIC, NULL, NULL);
+ EPI_SLAB_DEBUG|SLAB_PANIC, NULL);
return 0;
}
diff --git a/fs/exec.c b/fs/exec.c
index f20561f..073b0b8 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -50,10 +50,10 @@
#include <linux/tsacct_kern.h>
#include <linux/cn_proc.h>
#include <linux/audit.h>
-#include <linux/signalfd.h>
#include <asm/uaccess.h>
#include <asm/mmu_context.h>
+#include <asm/tlb.h>
#ifdef CONFIG_KMOD
#include <linux/kmod.h>
@@ -178,6 +178,207 @@ exit:
goto out;
}
+#ifdef CONFIG_MMU
+
+static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
+ int write)
+{
+ struct page *page;
+ int ret;
+
+#ifdef CONFIG_STACK_GROWSUP
+ if (write) {
+ ret = expand_stack_downwards(bprm->vma, pos);
+ if (ret < 0)
+ return NULL;
+ }
+#endif
+ ret = get_user_pages(current, bprm->mm, pos,
+ 1, write, 1, &page, NULL);
+ if (ret <= 0)
+ return NULL;
+
+ if (write) {
+ struct rlimit *rlim = current->signal->rlim;
+ unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start;
+
+ /*
+ * Limit to 1/4-th the stack size for the argv+env strings.
+ * This ensures that:
+ * - the remaining binfmt code will not run out of stack space,
+ * - the program will have a reasonable amount of stack left
+ * to work from.
+ */
+ if (size > rlim[RLIMIT_STACK].rlim_cur / 4) {
+ put_page(page);
+ return NULL;
+ }
+ }
+
+ return page;
+}
+
+static void put_arg_page(struct page *page)
+{
+ put_page(page);
+}
+
+static void free_arg_page(struct linux_binprm *bprm, int i)
+{
+}
+
+static void free_arg_pages(struct linux_binprm *bprm)
+{
+}
+
+static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos,
+ struct page *page)
+{
+ flush_cache_page(bprm->vma, pos, page_to_pfn(page));
+}
+
+static int __bprm_mm_init(struct linux_binprm *bprm)
+{
+ int err = -ENOMEM;
+ struct vm_area_struct *vma = NULL;
+ struct mm_struct *mm = bprm->mm;
+
+ bprm->vma = vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
+ if (!vma)
+ goto err;
+
+ down_write(&mm->mmap_sem);
+ vma->vm_mm = mm;
+
+ /*
+ * Place the stack at the largest stack address the architecture
+ * supports. Later, we'll move this to an appropriate place. We don't
+ * use STACK_TOP because that can depend on attributes which aren't
+ * configured yet.
+ */
+ vma->vm_end = STACK_TOP_MAX;
+ vma->vm_start = vma->vm_end - PAGE_SIZE;
+
+ vma->vm_flags = VM_STACK_FLAGS;
+ vma->vm_page_prot = protection_map[vma->vm_flags & 0x7];
+ err = insert_vm_struct(mm, vma);
+ if (err) {
+ up_write(&mm->mmap_sem);
+ goto err;
+ }
+
+ mm->stack_vm = mm->total_vm = 1;
+ up_write(&mm->mmap_sem);
+
+ bprm->p = vma->vm_end - sizeof(void *);
+
+ return 0;
+
+err:
+ if (vma) {
+ bprm->vma = NULL;
+ kmem_cache_free(vm_area_cachep, vma);
+ }
+
+ return err;
+}
+
+static bool valid_arg_len(struct linux_binprm *bprm, long len)
+{
+ return len <= MAX_ARG_STRLEN;
+}
+
+#else
+
+static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
+ int write)
+{
+ struct page *page;
+
+ page = bprm->page[pos / PAGE_SIZE];
+ if (!page && write) {
+ page = alloc_page(GFP_HIGHUSER|__GFP_ZERO);
+ if (!page)
+ return NULL;
+ bprm->page[pos / PAGE_SIZE] = page;
+ }
+
+ return page;
+}
+
+static void put_arg_page(struct page *page)
+{
+}
+
+static void free_arg_page(struct linux_binprm *bprm, int i)
+{
+ if (bprm->page[i]) {
+ __free_page(bprm->page[i]);
+ bprm->page[i] = NULL;
+ }
+}
+
+static void free_arg_pages(struct linux_binprm *bprm)
+{
+ int i;
+
+ for (i = 0; i < MAX_ARG_PAGES; i++)
+ free_arg_page(bprm, i);
+}
+
+static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos,
+ struct page *page)
+{
+}
+
+static int __bprm_mm_init(struct linux_binprm *bprm)
+{
+ bprm->p = PAGE_SIZE * MAX_ARG_PAGES - sizeof(void *);
+ return 0;
+}
+
+static bool valid_arg_len(struct linux_binprm *bprm, long len)
+{
+ return len <= bprm->p;
+}
+
+#endif /* CONFIG_MMU */
+
+/*
+ * Create a new mm_struct and populate it with a temporary stack
+ * vm_area_struct. We don't have enough context at this point to set the stack
+ * flags, permissions, and offset, so we use temporary values. We'll update
+ * them later in setup_arg_pages().
+ */
+int bprm_mm_init(struct linux_binprm *bprm)
+{
+ int err;
+ struct mm_struct *mm = NULL;
+
+ bprm->mm = mm = mm_alloc();
+ err = -ENOMEM;
+ if (!mm)
+ goto err;
+
+ err = init_new_context(current, mm);
+ if (err)
+ goto err;
+
+ err = __bprm_mm_init(bprm);
+ if (err)
+ goto err;
+
+ return 0;
+
+err:
+ if (mm) {
+ bprm->mm = NULL;
+ mmdrop(mm);
+ }
+
+ return err;
+}
+
/*
* count() counts the number of strings in array ARGV.
*/
@@ -203,15 +404,16 @@ static int count(char __user * __user * argv, int max)
}
/*
- * 'copy_strings()' copies argument/environment strings from user
- * memory to free pages in kernel mem. These are in a format ready
- * to be put directly into the top of new user memory.
+ * 'copy_strings()' copies argument/environment strings from the old
+ * processes's memory to the new process's stack. The call to get_user_pages()
+ * ensures the destination page is created and not swapped out.
*/
static int copy_strings(int argc, char __user * __user * argv,
struct linux_binprm *bprm)
{
struct page *kmapped_page = NULL;
char *kaddr = NULL;
+ unsigned long kpos = 0;
int ret;
while (argc-- > 0) {
@@ -220,69 +422,69 @@ static int copy_strings(int argc, char __user * __user * argv,
unsigned long pos;
if (get_user(str, argv+argc) ||
- !(len = strnlen_user(str, bprm->p))) {
+ !(len = strnlen_user(str, MAX_ARG_STRLEN))) {
ret = -EFAULT;
goto out;
}
- if (bprm->p < len) {
+ if (!valid_arg_len(bprm, len)) {
ret = -E2BIG;
goto out;
}
- bprm->p -= len;
- /* XXX: add architecture specific overflow check here. */
+ /* We're going to work our way backwords. */
pos = bprm->p;
+ str += len;
+ bprm->p -= len;
while (len > 0) {
- int i, new, err;
int offset, bytes_to_copy;
- struct page *page;
offset = pos % PAGE_SIZE;
- i = pos/PAGE_SIZE;
- page = bprm->page[i];
- new = 0;
- if (!page) {
- page = alloc_page(GFP_HIGHUSER);
- bprm->page[i] = page;
+ if (offset == 0)
+ offset = PAGE_SIZE;
+
+ bytes_to_copy = offset;
+ if (bytes_to_copy > len)
+ bytes_to_copy = len;
+
+ offset -= bytes_to_copy;
+ pos -= bytes_to_copy;
+ str -= bytes_to_copy;
+ len -= bytes_to_copy;
+
+ if (!kmapped_page || kpos != (pos & PAGE_MASK)) {
+ struct page *page;
+
+ page = get_arg_page(bprm, pos, 1);
if (!page) {
- ret = -ENOMEM;
+ ret = -E2BIG;
goto out;
}
- new = 1;
- }
- if (page != kmapped_page) {
- if (kmapped_page)
+ if (kmapped_page) {
+ flush_kernel_dcache_page(kmapped_page);
kunmap(kmapped_page);
+ put_arg_page(kmapped_page);
+ }
kmapped_page = page;
kaddr = kmap(kmapped_page);
+ kpos = pos & PAGE_MASK;
+ flush_arg_page(bprm, kpos, kmapped_page);
}
- if (new && offset)
- memset(kaddr, 0, offset);
- bytes_to_copy = PAGE_SIZE - offset;
- if (bytes_to_copy > len) {
- bytes_to_copy = len;
- if (new)
- memset(kaddr+offset+len, 0,
- PAGE_SIZE-offset-len);
- }
- err = copy_from_user(kaddr+offset, str, bytes_to_copy);
- if (err) {
+ if (copy_from_user(kaddr+offset, str, bytes_to_copy)) {
ret = -EFAULT;
goto out;
}
-
- pos += bytes_to_copy;
- str += bytes_to_copy;
- len -= bytes_to_copy;
}
}
ret = 0;
out:
- if (kmapped_page)
+ if (kmapped_page) {
+ flush_kernel_dcache_page(kmapped_page);
kunmap(kmapped_page);
+ put_arg_page(kmapped_page);
+ }
return ret;
}
@@ -298,181 +500,172 @@ int copy_strings_kernel(int argc,char ** argv, struct linux_binprm *bprm)
set_fs(oldfs);
return r;
}
-
EXPORT_SYMBOL(copy_strings_kernel);
#ifdef CONFIG_MMU
+
/*
- * This routine is used to map in a page into an address space: needed by
- * execve() for the initial stack and environment pages.
+ * During bprm_mm_init(), we create a temporary stack at STACK_TOP_MAX. Once
+ * the binfmt code determines where the new stack should reside, we shift it to
+ * its final location. The process proceeds as follows:
*
- * vma->vm_mm->mmap_sem is held for writing.
+ * 1) Use shift to calculate the new vma endpoints.
+ * 2) Extend vma to cover both the old and new ranges. This ensures the
+ * arguments passed to subsequent functions are consistent.
+ * 3) Move vma's page tables to the new range.
+ * 4) Free up any cleared pgd range.
+ * 5) Shrink the vma to cover only the new range.
*/
-void install_arg_page(struct vm_area_struct *vma,
- struct page *page, unsigned long address)
+static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
{
struct mm_struct *mm = vma->vm_mm;
- pte_t * pte;
- spinlock_t *ptl;
+ unsigned long old_start = vma->vm_start;
+ unsigned long old_end = vma->vm_end;
+ unsigned long length = old_end - old_start;
+ unsigned long new_start = old_start - shift;
+ unsigned long new_end = old_end - shift;
+ struct mmu_gather *tlb;
- if (unlikely(anon_vma_prepare(vma)))
- goto out;
+ BUG_ON(new_start > new_end);
- flush_dcache_page(page);
- pte = get_locked_pte(mm, address, &ptl);
- if (!pte)
- goto out;
- if (!pte_none(*pte)) {
- pte_unmap_unlock(pte, ptl);
- goto out;
+ /*
+ * ensure there are no vmas between where we want to go
+ * and where we are
+ */
+ if (vma != find_vma(mm, new_start))
+ return -EFAULT;
+
+ /*
+ * cover the whole range: [new_start, old_end)
+ */
+ vma_adjust(vma, new_start, old_end, vma->vm_pgoff, NULL);
+
+ /*
+ * move the page tables downwards, on failure we rely on
+ * process cleanup to remove whatever mess we made.
+ */
+ if (length != move_page_tables(vma, old_start,
+ vma, new_start, length))
+ return -ENOMEM;
+
+ lru_add_drain();
+ tlb = tlb_gather_mmu(mm, 0);
+ if (new_end > old_start) {
+ /*
+ * when the old and new regions overlap clear from new_end.
+ */
+ free_pgd_range(&tlb, new_end, old_end, new_end,
+ vma->vm_next ? vma->vm_next->vm_start : 0);
+ } else {
+ /*
+ * otherwise, clean from old_start; this is done to not touch
+ * the address space in [new_end, old_start) some architectures
+ * have constraints on va-space that make this illegal (IA64) -
+ * for the others its just a little faster.
+ */
+ free_pgd_range(&tlb, old_start, old_end, new_end,
+ vma->vm_next ? vma->vm_next->vm_start : 0);
}
- inc_mm_counter(mm, anon_rss);
- lru_cache_add_active(page);
- set_pte_at(mm, address, pte, pte_mkdirty(pte_mkwrite(mk_pte(
- page, vma->vm_page_prot))));
- page_add_new_anon_rmap(page, vma, address);
- pte_unmap_unlock(pte, ptl);
-
- /* no need for flush_tlb */
- return;
-out:
- __free_page(page);
- force_sig(SIGKILL, current);
+ tlb_finish_mmu(tlb, new_end, old_end);
+
+ /*
+ * shrink the vma to just the new range.
+ */
+ vma_adjust(vma, new_start, new_end, vma->vm_pgoff, NULL);
+
+ return 0;
}
#define EXTRA_STACK_VM_PAGES 20 /* random */
+/*
+ * Finalizes the stack vm_area_struct. The flags and permissions are updated,
+ * the stack is optionally relocated, and some extra space is added.
+ */
int setup_arg_pages(struct linux_binprm *bprm,
unsigned long stack_top,
int executable_stack)
{
- unsigned long stack_base;
- struct vm_area_struct *mpnt;
+ unsigned long ret;
+ unsigned long stack_shift;
struct mm_struct *mm = current->mm;
- int i, ret;
- long arg_size;
+ struct vm_area_struct *vma = bprm->vma;
+ struct vm_area_struct *prev = NULL;
+ unsigned long vm_flags;
+ unsigned long stack_base;
#ifdef CONFIG_STACK_GROWSUP
- /* Move the argument and environment strings to the bottom of the
- * stack space.
- */
- int offset, j;
- char *to, *from;
-
- /* Start by shifting all the pages down */
- i = 0;
- for (j = 0; j < MAX_ARG_PAGES; j++) {
- struct page *page = bprm->page[j];
- if (!page)
- continue;
- bprm->page[i++] = page;
- }
-
- /* Now move them within their pages */
- offset = bprm->p % PAGE_SIZE;
- to = kmap(bprm->page[0]);
- for (j = 1; j < i; j++) {
- memmove(to, to + offset, PAGE_SIZE - offset);
- from = kmap(bprm->page[j]);
- memcpy(to + PAGE_SIZE - offset, from, offset);
- kunmap(bprm->page[j - 1]);
- to = from;
- }
- memmove(to, to + offset, PAGE_SIZE - offset);
- kunmap(bprm->page[j - 1]);
-
/* Limit stack size to 1GB */
stack_base = current->signal->rlim[RLIMIT_STACK].rlim_max;
if (stack_base > (1 << 30))
stack_base = 1 << 30;
- stack_base = PAGE_ALIGN(stack_top - stack_base);
- /* Adjust bprm->p to point to the end of the strings. */
- bprm->p = stack_base + PAGE_SIZE * i - offset;
+ /* Make sure we didn't let the argument array grow too large. */
+ if (vma->vm_end - vma->vm_start > stack_base)
+ return -ENOMEM;
- mm->arg_start = stack_base;
- arg_size = i << PAGE_SHIFT;
+ stack_base = PAGE_ALIGN(stack_top - stack_base);
- /* zero pages that were copied above */
- while (i < MAX_ARG_PAGES)
- bprm->page[i++] = NULL;
+ stack_shift = vma->vm_start - stack_base;
+ mm->arg_start = bprm->p - stack_shift;
+ bprm->p = vma->vm_end - stack_shift;
#else
- stack_base = arch_align_stack(stack_top - MAX_ARG_PAGES*PAGE_SIZE);
- stack_base = PAGE_ALIGN(stack_base);
- bprm->p += stack_base;
+ stack_top = arch_align_stack(stack_top);
+ stack_top = PAGE_ALIGN(stack_top);
+ stack_shift = vma->vm_end - stack_top;
+
+ bprm->p -= stack_shift;
mm->arg_start = bprm->p;
- arg_size = stack_top - (PAGE_MASK & (unsigned long) mm->arg_start);
#endif
- arg_size += EXTRA_STACK_VM_PAGES * PAGE_SIZE;
-
if (bprm->loader)
- bprm->loader += stack_base;
- bprm->exec += stack_base;
-
- mpnt = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
- if (!mpnt)
- return -ENOMEM;
+ bprm->loader -= stack_shift;
+ bprm->exec -= stack_shift;
down_write(&mm->mmap_sem);
- {
- mpnt->vm_mm = mm;
-#ifdef CONFIG_STACK_GROWSUP
- mpnt->vm_start = stack_base;
- mpnt->vm_end = stack_base + arg_size;
-#else
- mpnt->vm_end = stack_top;
- mpnt->vm_start = mpnt->vm_end - arg_size;
-#endif
- /* Adjust stack execute permissions; explicitly enable
- * for EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X
- * and leave alone (arch default) otherwise. */
- if (unlikely(executable_stack == EXSTACK_ENABLE_X))
- mpnt->vm_flags = VM_STACK_FLAGS | VM_EXEC;
- else if (executable_stack == EXSTACK_DISABLE_X)
- mpnt->vm_flags = VM_STACK_FLAGS & ~VM_EXEC;
- else
- mpnt->vm_flags = VM_STACK_FLAGS;
- mpnt->vm_flags |= mm->def_flags;
- mpnt->vm_page_prot = protection_map[mpnt->vm_flags & 0x7];
- if ((ret = insert_vm_struct(mm, mpnt))) {
+ vm_flags = vma->vm_flags;
+
+ /*
+ * Adjust stack execute permissions; explicitly enable for
+ * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
+ * (arch default) otherwise.
+ */
+ if (unlikely(executable_stack == EXSTACK_ENABLE_X))
+ vm_flags |= VM_EXEC;
+ else if (executable_stack == EXSTACK_DISABLE_X)
+ vm_flags &= ~VM_EXEC;
+ vm_flags |= mm->def_flags;
+
+ ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
+ vm_flags);
+ if (ret)
+ goto out_unlock;
+ BUG_ON(prev != vma);
+
+ /* Move stack pages down in memory. */
+ if (stack_shift) {
+ ret = shift_arg_pages(vma, stack_shift);
+ if (ret) {
up_write(&mm->mmap_sem);
- kmem_cache_free(vm_area_cachep, mpnt);
return ret;
}
- mm->stack_vm = mm->total_vm = vma_pages(mpnt);
}
- for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
- struct page *page = bprm->page[i];
- if (page) {
- bprm->page[i] = NULL;
- install_arg_page(mpnt, page, stack_base);
- }
- stack_base += PAGE_SIZE;
- }
+#ifdef CONFIG_STACK_GROWSUP
+ stack_base = vma->vm_end + EXTRA_STACK_VM_PAGES * PAGE_SIZE;
+#else
+ stack_base = vma->vm_start - EXTRA_STACK_VM_PAGES * PAGE_SIZE;
+#endif
+ ret = expand_stack(vma, stack_base);
+ if (ret)
+ ret = -EFAULT;
+
+out_unlock:
up_write(&mm->mmap_sem);
-
return 0;
}
-
EXPORT_SYMBOL(setup_arg_pages);
-#define free_arg_pages(bprm) do { } while (0)
-
-#else
-
-static inline void free_arg_pages(struct linux_binprm *bprm)
-{
- int i;
-
- for (i = 0; i < MAX_ARG_PAGES; i++) {
- if (bprm->page[i])
- __free_page(bprm->page[i]);
- bprm->page[i] = NULL;
- }
-}
-
#endif /* CONFIG_MMU */
struct file *open_exec(const char *name)
@@ -586,18 +779,10 @@ static int de_thread(struct task_struct *tsk)
int count;
/*
- * Tell all the sighand listeners that this sighand has
- * been detached. The signalfd_detach() function grabs the
- * sighand lock, if signal listeners are present on the sighand.
- */
- signalfd_detach(tsk);
-
- /*
* If we don't share sighandlers, then we aren't sharing anything
* and we can just re-use it all.
*/
if (atomic_read(&oldsighand->count) <= 1) {
- BUG_ON(atomic_read(&sig->count) != 1);
exit_itimers(sig);
return 0;
}
@@ -740,8 +925,6 @@ no_thread_group:
if (leader)
release_task(leader);
- BUG_ON(atomic_read(&sig->count) != 1);
-
if (atomic_read(&oldsighand->count) == 1) {
/*
* Now that we nuked the rest of the thread group,
@@ -864,9 +1047,9 @@ int flush_old_exec(struct linux_binprm * bprm)
current->sas_ss_sp = current->sas_ss_size = 0;
if (current->euid == current->uid && current->egid == current->gid)
- current->mm->dumpable = 1;
+ set_dumpable(current->mm, 1);
else
- current->mm->dumpable = suid_dumpable;
+ set_dumpable(current->mm, suid_dumpable);
name = bprm->filename;
@@ -890,11 +1073,14 @@ int flush_old_exec(struct linux_binprm * bprm)
*/
current->mm->task_size = TASK_SIZE;
- if (bprm->e_uid != current->euid || bprm->e_gid != current->egid ||
- file_permission(bprm->file, MAY_READ) ||
- (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP)) {
+ if (bprm->e_uid != current->euid || bprm->e_gid != current->egid) {
suid_keys(current);
- current->mm->dumpable = suid_dumpable;
+ set_dumpable(current->mm, suid_dumpable);
+ current->pdeath_signal = 0;
+ } else if (file_permission(bprm->file, MAY_READ) ||
+ (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP)) {
+ suid_keys(current);
+ set_dumpable(current->mm, suid_dumpable);
}
/* An exec changes our domain. We are no longer part of the thread
@@ -983,8 +1169,10 @@ void compute_creds(struct linux_binprm *bprm)
{
int unsafe;
- if (bprm->e_uid != current->uid)
+ if (bprm->e_uid != current->uid) {
suid_keys(current);
+ current->pdeath_signal = 0;
+ }
exec_keys(current);
task_lock(current);
@@ -1000,43 +1188,42 @@ EXPORT_SYMBOL(compute_creds);
* points to; chop off the first by relocating brpm->p to right after
* the first '\0' encountered.
*/
-void remove_arg_zero(struct linux_binprm *bprm)
+int remove_arg_zero(struct linux_binprm *bprm)
{
- if (bprm->argc) {
- char ch;
+ int ret = 0;
+ unsigned long offset;
+ char *kaddr;
+ struct page *page;
- do {
- unsigned long offset;
- unsigned long index;
- char *kaddr;
- struct page *page;
-
- offset = bprm->p & ~PAGE_MASK;
- index = bprm->p >> PAGE_SHIFT;
+ if (!bprm->argc)
+ return 0;
- page = bprm->page[index];
- kaddr = kmap_atomic(page, KM_USER0);
+ do {
+ offset = bprm->p & ~PAGE_MASK;
+ page = get_arg_page(bprm, bprm->p, 0);
+ if (!page) {
+ ret = -EFAULT;
+ goto out;
+ }
+ kaddr = kmap_atomic(page, KM_USER0);
- /* run through page until we reach end or find NUL */
- do {
- ch = *(kaddr + offset);
+ for (; offset < PAGE_SIZE && kaddr[offset];
+ offset++, bprm->p++)
+ ;
- /* discard that character... */
- bprm->p++;
- offset++;
- } while (offset < PAGE_SIZE && ch != '\0');
+ kunmap_atomic(kaddr, KM_USER0);
+ put_arg_page(page);
- kunmap_atomic(kaddr, KM_USER0);
+ if (offset == PAGE_SIZE)
+ free_arg_page(bprm, (bprm->p >> PAGE_SHIFT) - 1);
+ } while (offset == PAGE_SIZE);
- /* free the old page */
- if (offset == PAGE_SIZE) {
- __free_page(page);
- bprm->page[index] = NULL;
- }
- } while (ch != '\0');
+ bprm->p++;
+ bprm->argc--;
+ ret = 0;
- bprm->argc--;
- }
+out:
+ return ret;
}
EXPORT_SYMBOL(remove_arg_zero);
@@ -1062,7 +1249,7 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
fput(bprm->file);
bprm->file = NULL;
- loader = PAGE_SIZE*MAX_ARG_PAGES-sizeof(void *);
+ loader = bprm->vma->vm_end - sizeof(void *);
file = open_exec("/sbin/loader");
retval = PTR_ERR(file);
@@ -1154,8 +1341,8 @@ int do_execve(char * filename,
{
struct linux_binprm *bprm;
struct file *file;
+ unsigned long env_p;
int retval;
- int i;
retval = -ENOMEM;
bprm = kzalloc(sizeof(*bprm), GFP_KERNEL);
@@ -1169,25 +1356,19 @@ int do_execve(char * filename,
sched_exec();
- bprm->p = PAGE_SIZE*MAX_ARG_PAGES-sizeof(void *);
-
bprm->file = file;
bprm->filename = filename;
bprm->interp = filename;
- bprm->mm = mm_alloc();
- retval = -ENOMEM;
- if (!bprm->mm)
- goto out_file;
- retval = init_new_context(current, bprm->mm);
- if (retval < 0)
- goto out_mm;
+ retval = bprm_mm_init(bprm);
+ if (retval)
+ goto out_file;
- bprm->argc = count(argv, bprm->p / sizeof(void *));
+ bprm->argc = count(argv, MAX_ARG_STRINGS);
if ((retval = bprm->argc) < 0)
goto out_mm;
- bprm->envc = count(envp, bprm->p / sizeof(void *));
+ bprm->envc = count(envp, MAX_ARG_STRINGS);
if ((retval = bprm->envc) < 0)
goto out_mm;
@@ -1208,15 +1389,16 @@ int do_execve(char * filename,
if (retval < 0)
goto out;
+ env_p = bprm->p;
retval = copy_strings(bprm->argc, argv, bprm);
if (retval < 0)
goto out;
+ bprm->argv_len = env_p - bprm->p;
retval = search_binary_handler(bprm,regs);
if (retval >= 0) {
- free_arg_pages(bprm);
-
/* execve success */
+ free_arg_pages(bprm);
security_bprm_free(bprm);
acct_update_integrals(current);
kfree(bprm);
@@ -1224,26 +1406,19 @@ int do_execve(char * filename,
}
out:
- /* Something went wrong, return the inode and free the argument pages*/
- for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
- struct page * page = bprm->page[i];
- if (page)
- __free_page(page);
- }
-
+ free_arg_pages(bprm);
if (bprm->security)
security_bprm_free(bprm);
out_mm:
if (bprm->mm)
- mmdrop(bprm->mm);
+ mmput (bprm->mm);
out_file:
if (bprm->file) {
allow_write_access(bprm->file);
fput(bprm->file);
}
-
out_kfree:
kfree(bprm);
@@ -1484,6 +1659,56 @@ fail:
return core_waiters;
}
+/*
+ * set_dumpable converts traditional three-value dumpable to two flags and
+ * stores them into mm->flags. It modifies lower two bits of mm->flags, but
+ * these bits are not changed atomically. So get_dumpable can observe the
+ * intermediate state. To avoid doing unexpected behavior, get get_dumpable
+ * return either old dumpable or new one by paying attention to the order of
+ * modifying the bits.
+ *
+ * dumpable | mm->flags (binary)
+ * old new | initial interim final
+ * ---------+-----------------------
+ * 0 1 | 00 01 01
+ * 0 2 | 00 10(*) 11
+ * 1 0 | 01 00 00
+ * 1 2 | 01 11 11
+ * 2 0 | 11 10(*) 00
+ * 2 1 | 11 11 01
+ *
+ * (*) get_dumpable regards interim value of 10 as 11.
+ */
+void set_dumpable(struct mm_struct *mm, int value)
+{
+ switch (value) {
+ case 0:
+ clear_bit(MMF_DUMPABLE, &mm->flags);
+ smp_wmb();
+ clear_bit(MMF_DUMP_SECURELY, &mm->flags);
+ break;
+ case 1:
+ set_bit(MMF_DUMPABLE, &mm->flags);
+ smp_wmb();
+ clear_bit(MMF_DUMP_SECURELY, &mm->flags);
+ break;
+ case 2:
+ set_bit(MMF_DUMP_SECURELY, &mm->flags);
+ smp_wmb();
+ set_bit(MMF_DUMPABLE, &mm->flags);
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(set_dumpable);
+
+int get_dumpable(struct mm_struct *mm)
+{
+ int ret;
+
+ ret = mm->flags & 0x3;
+ return (ret >= 2) ? 2 : ret;
+}
+
int do_coredump(long signr, int exit_code, struct pt_regs * regs)
{
char corename[CORENAME_MAX_SIZE + 1];
@@ -1502,7 +1727,7 @@ int do_coredump(long signr, int exit_code, struct pt_regs * regs)
if (!binfmt || !binfmt->core_dump)
goto fail;
down_write(&mm->mmap_sem);
- if (!mm->dumpable) {
+ if (!get_dumpable(mm)) {
up_write(&mm->mmap_sem);
goto fail;
}
@@ -1512,11 +1737,11 @@ int do_coredump(long signr, int exit_code, struct pt_regs * regs)
* process nor do we know its entire history. We only know it
* was tainted so we dump it as root in mode 2.
*/
- if (mm->dumpable == 2) { /* Setuid core dump mode */
+ if (get_dumpable(mm) == 2) { /* Setuid core dump mode */
flag = O_EXCL; /* Stop rewrite attacks */
current->fsuid = 0; /* Dump root private */
}
- mm->dumpable = 0;
+ set_dumpable(mm, 0);
retval = coredump_wait(exit_code);
if (retval < 0)
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 3eefa97..639a32c 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -167,14 +167,14 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
#endif
inode_init_once(&ei->vfs_inode);
}
-
+
static int init_inodecache(void)
{
ext2_inode_cachep = kmem_cache_create("ext2_inode_cache",
sizeof(struct ext2_inode_info),
0, (SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD),
- init_once, NULL);
+ init_once);
if (ext2_inode_cachep == NULL)
return -ENOMEM;
return 0;
@@ -580,7 +580,7 @@ static int ext2_check_descriptors (struct super_block * sb)
return 0;
}
if (le32_to_cpu(gdp->bg_inode_table) < first_block ||
- le32_to_cpu(gdp->bg_inode_table) + sbi->s_itb_per_group >
+ le32_to_cpu(gdp->bg_inode_table) + sbi->s_itb_per_group - 1 >
last_block)
{
ext2_error (sb, "ext2_check_descriptors",
@@ -883,13 +883,11 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
goto failed_mount;
}
bgl_lock_init(&sbi->s_blockgroup_lock);
- sbi->s_debts = kmalloc(sbi->s_groups_count * sizeof(*sbi->s_debts),
- GFP_KERNEL);
+ sbi->s_debts = kcalloc(sbi->s_groups_count, sizeof(*sbi->s_debts), GFP_KERNEL);
if (!sbi->s_debts) {
printk ("EXT2-fs: not enough memory\n");
goto failed_mount_group_desc;
}
- memset(sbi->s_debts, 0, sbi->s_groups_count * sizeof(*sbi->s_debts));
for (i = 0; i < db_count; i++) {
block = descriptor_loc(sb, logic_sb_block, i);
sbi->s_group_desc[i] = sb_bread(sb, block);
diff --git a/fs/ext3/dir.c b/fs/ext3/dir.c
index 8528698..c00723a 100644
--- a/fs/ext3/dir.c
+++ b/fs/ext3/dir.c
@@ -136,12 +136,14 @@ static int ext3_readdir(struct file * filp,
err = ext3_get_blocks_handle(NULL, inode, blk, 1,
&map_bh, 0, 0);
if (err > 0) {
- page_cache_readahead(sb->s_bdev->bd_inode->i_mapping,
- &filp->f_ra,
- filp,
- map_bh.b_blocknr >>
- (PAGE_CACHE_SHIFT - inode->i_blkbits),
- 1);
+ pgoff_t index = map_bh.b_blocknr >>
+ (PAGE_CACHE_SHIFT - inode->i_blkbits);
+ if (!ra_has_index(&filp->f_ra, index))
+ page_cache_sync_readahead(
+ sb->s_bdev->bd_inode->i_mapping,
+ &filp->f_ra, filp,
+ index, 1);
+ filp->f_ra.prev_index = index;
bh = ext3_bread(NULL, inode, blk, 0, &err);
}
diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c
index 1586807..c1fa190 100644
--- a/fs/ext3/namei.c
+++ b/fs/ext3/namei.c
@@ -140,7 +140,8 @@ struct dx_frame
struct dx_map_entry
{
u32 hash;
- u32 offs;
+ u16 offs;
+ u16 size;
};
#ifdef CONFIG_EXT3_INDEX
@@ -379,13 +380,28 @@ dx_probe(struct dentry *dentry, struct inode *dir,
entries = (struct dx_entry *) (((char *)&root->info) +
root->info.info_length);
- assert(dx_get_limit(entries) == dx_root_limit(dir,
- root->info.info_length));
+
+ if (dx_get_limit(entries) != dx_root_limit(dir,
+ root->info.info_length)) {
+ ext3_warning(dir->i_sb, __FUNCTION__,
+ "dx entry: limit != root limit");
+ brelse(bh);
+ *err = ERR_BAD_DX_DIR;
+ goto fail;
+ }
+
dxtrace (printk("Look up %x", hash));
while (1)
{
count = dx_get_count(entries);
- assert (count && count <= dx_get_limit(entries));
+ if (!count || count > dx_get_limit(entries)) {
+ ext3_warning(dir->i_sb, __FUNCTION__,
+ "dx entry: no count or count > limit");
+ brelse(bh);
+ *err = ERR_BAD_DX_DIR;
+ goto fail2;
+ }
+
p = entries + 1;
q = entries + count - 1;
while (p <= q)
@@ -423,8 +439,15 @@ dx_probe(struct dentry *dentry, struct inode *dir,
if (!(bh = ext3_bread (NULL,dir, dx_get_block(at), 0, err)))
goto fail2;
at = entries = ((struct dx_node *) bh->b_data)->entries;
- assert (dx_get_limit(entries) == dx_node_limit (dir));
+ if (dx_get_limit(entries) != dx_node_limit (dir)) {
+ ext3_warning(dir->i_sb, __FUNCTION__,
+ "dx entry: limit != node limit");
+ brelse(bh);
+ *err = ERR_BAD_DX_DIR;
+ goto fail2;
+ }
frame++;
+ frame->bh = NULL;
}
fail2:
while (frame >= frame_in) {
@@ -432,6 +455,10 @@ fail2:
frame--;
}
fail:
+ if (*err == ERR_BAD_DX_DIR)
+ ext3_warning(dir->i_sb, __FUNCTION__,
+ "Corrupt dir inode %ld, running e2fsck is "
+ "recommended.", dir->i_ino);
return NULL;
}
@@ -671,6 +698,10 @@ errout:
* Directory block splitting, compacting
*/
+/*
+ * Create map of hash values, offsets, and sizes, stored at end of block.
+ * Returns number of entries mapped.
+ */
static int dx_make_map (struct ext3_dir_entry_2 *de, int size,
struct dx_hash_info *hinfo, struct dx_map_entry *map_tail)
{
@@ -684,7 +715,8 @@ static int dx_make_map (struct ext3_dir_entry_2 *de, int size,
ext3fs_dirhash(de->name, de->name_len, &h);
map_tail--;
map_tail->hash = h.hash;
- map_tail->offs = (u32) ((char *) de - base);
+ map_tail->offs = (u16) ((char *) de - base);
+ map_tail->size = le16_to_cpu(de->rec_len);
count++;
cond_resched();
}
@@ -694,6 +726,7 @@ static int dx_make_map (struct ext3_dir_entry_2 *de, int size,
return count;
}
+/* Sort map by hash value */
static void dx_sort_map (struct dx_map_entry *map, unsigned count)
{
struct dx_map_entry *p, *q, *top = map + count - 1;
@@ -1091,6 +1124,10 @@ static inline void ext3_set_de_type(struct super_block *sb,
}
#ifdef CONFIG_EXT3_INDEX
+/*
+ * Move count entries from end of map between two memory locations.
+ * Returns pointer to last entry moved.
+ */
static struct ext3_dir_entry_2 *
dx_move_dirents(char *from, char *to, struct dx_map_entry *map, int count)
{
@@ -1109,6 +1146,10 @@ dx_move_dirents(char *from, char *to, struct dx_map_entry *map, int count)
return (struct ext3_dir_entry_2 *) (to - rec_len);
}
+/*
+ * Compact each dir entry in the range to the minimal rec_len.
+ * Returns pointer to last entry in range.
+ */
static struct ext3_dir_entry_2* dx_pack_dirents(char *base, int size)
{
struct ext3_dir_entry_2 *next, *to, *prev, *de = (struct ext3_dir_entry_2 *) base;
@@ -1131,6 +1172,11 @@ static struct ext3_dir_entry_2* dx_pack_dirents(char *base, int size)
return prev;
}
+/*
+ * Split a full leaf block to make room for a new dir entry.
+ * Allocate a new block, and move entries so that they are approx. equally full.
+ * Returns pointer to de in block into which the new entry will be inserted.
+ */
static struct ext3_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
struct buffer_head **bh,struct dx_frame *frame,
struct dx_hash_info *hinfo, int *error)
@@ -1142,7 +1188,7 @@ static struct ext3_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
u32 hash2;
struct dx_map_entry *map;
char *data1 = (*bh)->b_data, *data2;
- unsigned split;
+ unsigned split, move, size, i;
struct ext3_dir_entry_2 *de = NULL, *de2;
int err = 0;
@@ -1170,8 +1216,19 @@ static struct ext3_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
count = dx_make_map ((struct ext3_dir_entry_2 *) data1,
blocksize, hinfo, map);
map -= count;
- split = count/2; // need to adjust to actual middle
dx_sort_map (map, count);
+ /* Split the existing block in the middle, size-wise */
+ size = 0;
+ move = 0;
+ for (i = count-1; i >= 0; i--) {
+ /* is more than half of this entry in 2nd half of the block? */
+ if (size + map[i].size/2 > blocksize/2)
+ break;
+ size += map[i].size;
+ move++;
+ }
+ /* map index at which we will split */
+ split = count - move;
hash2 = map[split].hash;
continued = hash2 == map[split - 1].hash;
dxtrace(printk("Split block %i at %x, %i/%i\n",
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 4f84dc8..9537316 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -490,7 +490,7 @@ static int init_inodecache(void)
sizeof(struct ext3_inode_info),
0, (SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD),
- init_once, NULL);
+ init_once);
if (ext3_inode_cachep == NULL)
return -ENOMEM;
return 0;
@@ -1221,7 +1221,7 @@ static int ext3_check_descriptors (struct super_block * sb)
return 0;
}
if (le32_to_cpu(gdp->bg_inode_table) < first_block ||
- le32_to_cpu(gdp->bg_inode_table) + sbi->s_itb_per_group >
+ le32_to_cpu(gdp->bg_inode_table) + sbi->s_itb_per_group - 1 >
last_block)
{
ext3_error (sb, "ext3_check_descriptors",
@@ -2578,8 +2578,11 @@ static int ext3_release_dquot(struct dquot *dquot)
handle = ext3_journal_start(dquot_to_inode(dquot),
EXT3_QUOTA_DEL_BLOCKS(dquot->dq_sb));
- if (IS_ERR(handle))
+ if (IS_ERR(handle)) {
+ /* Release dquot anyway to avoid endless cycle in dqput() */
+ dquot_release(dquot);
return PTR_ERR(handle);
+ }
ret = dquot_release(dquot);
err = ext3_journal_stop(handle);
if (!ret)
@@ -2712,6 +2715,12 @@ static ssize_t ext3_quota_write(struct super_block *sb, int type,
struct buffer_head *bh;
handle_t *handle = journal_current_handle();
+ if (!handle) {
+ printk(KERN_WARNING "EXT3-fs: Quota write (off=%Lu, len=%Lu)"
+ " cancelled because transaction is not started.\n",
+ (unsigned long long)off, (unsigned long long)len);
+ return -EIO;
+ }
mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
while (towrite > 0) {
tocopy = sb->s_blocksize - offset < towrite ?
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index 9de54ae..e53b4af 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -517,7 +517,7 @@ do_more:
/*
* An HJ special. This is expensive...
*/
-#ifdef CONFIG_JBD_DEBUG
+#ifdef CONFIG_JBD2_DEBUG
jbd_unlock_bh_state(bitmap_bh);
{
struct buffer_head *debug_bh;
@@ -1597,7 +1597,7 @@ allocated:
performed_allocation = 1;
-#ifdef CONFIG_JBD_DEBUG
+#ifdef CONFIG_JBD2_DEBUG
{
struct buffer_head *debug_bh;
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index e8ad06e..3ab01c0 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -135,12 +135,14 @@ static int ext4_readdir(struct file * filp,
map_bh.b_state = 0;
err = ext4_get_blocks_wrap(NULL, inode, blk, 1, &map_bh, 0, 0);
if (err > 0) {
- page_cache_readahead(sb->s_bdev->bd_inode->i_mapping,
- &filp->f_ra,
- filp,
- map_bh.b_blocknr >>
- (PAGE_CACHE_SHIFT - inode->i_blkbits),
- 1);
+ pgoff_t index = map_bh.b_blocknr >>
+ (PAGE_CACHE_SHIFT - inode->i_blkbits);
+ if (!ra_has_index(&filp->f_ra, index))
+ page_cache_sync_readahead(
+ sb->s_bdev->bd_inode->i_mapping,
+ &filp->f_ra, filp,
+ index, 1);
+ filp->f_ra.prev_index = index;
bh = ext4_bread(NULL, inode, blk, 0, &err);
}
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index b9ce2412..78beb09 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -39,6 +39,7 @@
#include <linux/quotaops.h>
#include <linux/string.h>
#include <linux/slab.h>
+#include <linux/falloc.h>
#include <linux/ext4_fs_extents.h>
#include <asm/uaccess.h>
@@ -91,36 +92,6 @@ static void ext4_idx_store_pblock(struct ext4_extent_idx *ix, ext4_fsblk_t pb)
ix->ei_leaf_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) & 0xffff);
}
-static int ext4_ext_check_header(const char *function, struct inode *inode,
- struct ext4_extent_header *eh)
-{
- const char *error_msg = NULL;
-
- if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) {
- error_msg = "invalid magic";
- goto corrupted;
- }
- if (unlikely(eh->eh_max == 0)) {
- error_msg = "invalid eh_max";
- goto corrupted;
- }
- if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) {
- error_msg = "invalid eh_entries";
- goto corrupted;
- }
- return 0;
-
-corrupted:
- ext4_error(inode->i_sb, function,
- "bad header in inode #%lu: %s - magic %x, "
- "entries %u, max %u, depth %u",
- inode->i_ino, error_msg, le16_to_cpu(eh->eh_magic),
- le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max),
- le16_to_cpu(eh->eh_depth));
-
- return -EIO;
-}
-
static handle_t *ext4_ext_journal_restart(handle_t *handle, int needed)
{
int err;
@@ -269,6 +240,70 @@ static int ext4_ext_space_root_idx(struct inode *inode)
return size;
}
+static int
+ext4_ext_max_entries(struct inode *inode, int depth)
+{
+ int max;
+
+ if (depth == ext_depth(inode)) {
+ if (depth == 0)
+ max = ext4_ext_space_root(inode);
+ else
+ max = ext4_ext_space_root_idx(inode);
+ } else {
+ if (depth == 0)
+ max = ext4_ext_space_block(inode);
+ else
+ max = ext4_ext_space_block_idx(inode);
+ }
+
+ return max;
+}
+
+static int __ext4_ext_check_header(const char *function, struct inode *inode,
+ struct ext4_extent_header *eh,
+ int depth)
+{
+ const char *error_msg;
+ int max = 0;
+
+ if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) {
+ error_msg = "invalid magic";
+ goto corrupted;
+ }
+ if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) {
+ error_msg = "unexpected eh_depth";
+ goto corrupted;
+ }
+ if (unlikely(eh->eh_max == 0)) {
+ error_msg = "invalid eh_max";
+ goto corrupted;
+ }
+ max = ext4_ext_max_entries(inode, depth);
+ if (unlikely(le16_to_cpu(eh->eh_max) > max)) {
+ error_msg = "too large eh_max";
+ goto corrupted;
+ }
+ if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) {
+ error_msg = "invalid eh_entries";
+ goto corrupted;
+ }
+ return 0;
+
+corrupted:
+ ext4_error(inode->i_sb, function,
+ "bad header in inode #%lu: %s - magic %x, "
+ "entries %u, max %u(%u), depth %u(%u)",
+ inode->i_ino, error_msg, le16_to_cpu(eh->eh_magic),
+ le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max),
+ max, le16_to_cpu(eh->eh_depth), depth);
+
+ return -EIO;
+}
+
+#define ext4_ext_check_header(inode, eh, depth) \
+ __ext4_ext_check_header(__FUNCTION__, inode, eh, depth)
+
#ifdef EXT_DEBUG
static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
{
@@ -282,7 +317,7 @@ static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
} else if (path->p_ext) {
ext_debug(" %d:%d:%llu ",
le32_to_cpu(path->p_ext->ee_block),
- le16_to_cpu(path->p_ext->ee_len),
+ ext4_ext_get_actual_len(path->p_ext),
ext_pblock(path->p_ext));
} else
ext_debug(" []");
@@ -305,7 +340,7 @@ static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path)
for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) {
ext_debug("%d:%d:%llu ", le32_to_cpu(ex->ee_block),
- le16_to_cpu(ex->ee_len), ext_pblock(ex));
+ ext4_ext_get_actual_len(ex), ext_pblock(ex));
}
ext_debug("\n");
}
@@ -329,6 +364,7 @@ static void ext4_ext_drop_refs(struct ext4_ext_path *path)
/*
* ext4_ext_binsearch_idx:
* binary search for the closest index of the given block
+ * the header must be checked before calling this
*/
static void
ext4_ext_binsearch_idx(struct inode *inode, struct ext4_ext_path *path, int block)
@@ -336,27 +372,25 @@ ext4_ext_binsearch_idx(struct inode *inode, struct ext4_ext_path *path, int bloc
struct ext4_extent_header *eh = path->p_hdr;
struct ext4_extent_idx *r, *l, *m;
- BUG_ON(eh->eh_magic != EXT4_EXT_MAGIC);
- BUG_ON(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max));
- BUG_ON(le16_to_cpu(eh->eh_entries) <= 0);
ext_debug("binsearch for %d(idx): ", block);
l = EXT_FIRST_INDEX(eh) + 1;
- r = EXT_FIRST_INDEX(eh) + le16_to_cpu(eh->eh_entries) - 1;
+ r = EXT_LAST_INDEX(eh);
while (l <= r) {
m = l + (r - l) / 2;
if (block < le32_to_cpu(m->ei_block))
r = m - 1;
else
l = m + 1;
- ext_debug("%p(%u):%p(%u):%p(%u) ", l, l->ei_block,
- m, m->ei_block, r, r->ei_block);
+ ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ei_block),
+ m, le32_to_cpu(m->ei_block),
+ r, le32_to_cpu(r->ei_block));
}
path->p_idx = l - 1;
ext_debug(" -> %d->%lld ", le32_to_cpu(path->p_idx->ei_block),
- idx_block(path->p_idx));
+ idx_pblock(path->p_idx));
#ifdef CHECK_BINSEARCH
{
@@ -388,6 +422,7 @@ ext4_ext_binsearch_idx(struct inode *inode, struct ext4_ext_path *path, int bloc
/*
* ext4_ext_binsearch:
* binary search for closest extent of the given block
+ * the header must be checked before calling this
*/
static void
ext4_ext_binsearch(struct inode *inode, struct ext4_ext_path *path, int block)
@@ -395,9 +430,6 @@ ext4_ext_binsearch(struct inode *inode, struct ext4_ext_path *path, int block)
struct ext4_extent_header *eh = path->p_hdr;
struct ext4_extent *r, *l, *m;
- BUG_ON(eh->eh_magic != EXT4_EXT_MAGIC);
- BUG_ON(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max));
-
if (eh->eh_entries == 0) {
/*
* this leaf is empty:
@@ -409,7 +441,7 @@ ext4_ext_binsearch(struct inode *inode, struct ext4_ext_path *path, int block)
ext_debug("binsearch for %d: ", block);
l = EXT_FIRST_EXTENT(eh) + 1;
- r = EXT_FIRST_EXTENT(eh) + le16_to_cpu(eh->eh_entries) - 1;
+ r = EXT_LAST_EXTENT(eh);
while (l <= r) {
m = l + (r - l) / 2;
@@ -417,15 +449,16 @@ ext4_ext_binsearch(struct inode *inode, struct ext4_ext_path *path, int block)
r = m - 1;
else
l = m + 1;
- ext_debug("%p(%u):%p(%u):%p(%u) ", l, l->ee_block,
- m, m->ee_block, r, r->ee_block);
+ ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ee_block),
+ m, le32_to_cpu(m->ee_block),
+ r, le32_to_cpu(r->ee_block));
}
path->p_ext = l - 1;
ext_debug(" -> %d:%llu:%d ",
le32_to_cpu(path->p_ext->ee_block),
ext_pblock(path->p_ext),
- le16_to_cpu(path->p_ext->ee_len));
+ ext4_ext_get_actual_len(path->p_ext));
#ifdef CHECK_BINSEARCH
{
@@ -468,11 +501,10 @@ ext4_ext_find_extent(struct inode *inode, int block, struct ext4_ext_path *path)
short int depth, i, ppos = 0, alloc = 0;
eh = ext_inode_hdr(inode);
- BUG_ON(eh == NULL);
- if (ext4_ext_check_header(__FUNCTION__, inode, eh))
+ depth = ext_depth(inode);
+ if (ext4_ext_check_header(inode, eh, depth))
return ERR_PTR(-EIO);
- i = depth = ext_depth(inode);
/* account possible depth increase */
if (!path) {
@@ -484,10 +516,12 @@ ext4_ext_find_extent(struct inode *inode, int block, struct ext4_ext_path *path)
}
path[0].p_hdr = eh;
+ i = depth;
/* walk through the tree */
while (i) {
ext_debug("depth %d: num %d, max %d\n",
ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
+
ext4_ext_binsearch_idx(inode, path + ppos, block);
path[ppos].p_block = idx_pblock(path[ppos].p_idx);
path[ppos].p_depth = i;
@@ -504,7 +538,7 @@ ext4_ext_find_extent(struct inode *inode, int block, struct ext4_ext_path *path)
path[ppos].p_hdr = eh;
i--;
- if (ext4_ext_check_header(__FUNCTION__, inode, eh))
+ if (ext4_ext_check_header(inode, eh, i))
goto err;
}
@@ -513,9 +547,6 @@ ext4_ext_find_extent(struct inode *inode, int block, struct ext4_ext_path *path)
path[ppos].p_ext = NULL;
path[ppos].p_idx = NULL;
- if (ext4_ext_check_header(__FUNCTION__, inode, eh))
- goto err;
-
/* find extent */
ext4_ext_binsearch(inode, path + ppos, block);
@@ -553,7 +584,7 @@ static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
if (curp->p_idx != EXT_LAST_INDEX(curp->p_hdr)) {
len = (len - 1) * sizeof(struct ext4_extent_idx);
len = len < 0 ? 0 : len;
- ext_debug("insert new index %d after: %d. "
+ ext_debug("insert new index %d after: %llu. "
"move %d from 0x%p to 0x%p\n",
logical, ptr, len,
(curp->p_idx + 1), (curp->p_idx + 2));
@@ -564,7 +595,7 @@ static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
/* insert before */
len = len * sizeof(struct ext4_extent_idx);
len = len < 0 ? 0 : len;
- ext_debug("insert new index %d before: %d. "
+ ext_debug("insert new index %d before: %llu. "
"move %d from 0x%p to 0x%p\n",
logical, ptr, len,
curp->p_idx, (curp->p_idx + 1));
@@ -686,7 +717,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
ext_debug("move %d:%llu:%d in new leaf %llu\n",
le32_to_cpu(path[depth].p_ext->ee_block),
ext_pblock(path[depth].p_ext),
- le16_to_cpu(path[depth].p_ext->ee_len),
+ ext4_ext_get_actual_len(path[depth].p_ext),
newblock);
/*memmove(ex++, path[depth].p_ext++,
sizeof(struct ext4_extent));
@@ -764,7 +795,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
BUG_ON(EXT_MAX_INDEX(path[i].p_hdr) !=
EXT_LAST_INDEX(path[i].p_hdr));
while (path[i].p_idx <= EXT_MAX_INDEX(path[i].p_hdr)) {
- ext_debug("%d: move %d:%d in new index %llu\n", i,
+ ext_debug("%d: move %d:%llu in new index %llu\n", i,
le32_to_cpu(path[i].p_idx->ei_block),
idx_pblock(path[i].p_idx),
newblock);
@@ -893,8 +924,13 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
curp->p_hdr->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode));
curp->p_hdr->eh_entries = cpu_to_le16(1);
curp->p_idx = EXT_FIRST_INDEX(curp->p_hdr);
- /* FIXME: it works, but actually path[0] can be index */
- curp->p_idx->ei_block = EXT_FIRST_EXTENT(path[0].p_hdr)->ee_block;
+
+ if (path[0].p_hdr->eh_depth)
+ curp->p_idx->ei_block =
+ EXT_FIRST_INDEX(path[0].p_hdr)->ei_block;
+ else
+ curp->p_idx->ei_block =
+ EXT_FIRST_EXTENT(path[0].p_hdr)->ee_block;
ext4_idx_store_pblock(curp->p_idx, newblock);
neh = ext_inode_hdr(inode);
@@ -1106,7 +1142,24 @@ static int
ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
struct ext4_extent *ex2)
{
- if (le32_to_cpu(ex1->ee_block) + le16_to_cpu(ex1->ee_len) !=
+ unsigned short ext1_ee_len, ext2_ee_len, max_len;
+
+ /*
+ * Make sure that either both extents are uninitialized, or
+ * both are _not_.
+ */
+ if (ext4_ext_is_uninitialized(ex1) ^ ext4_ext_is_uninitialized(ex2))
+ return 0;
+
+ if (ext4_ext_is_uninitialized(ex1))
+ max_len = EXT_UNINIT_MAX_LEN;
+ else
+ max_len = EXT_INIT_MAX_LEN;
+
+ ext1_ee_len = ext4_ext_get_actual_len(ex1);
+ ext2_ee_len = ext4_ext_get_actual_len(ex2);
+
+ if (le32_to_cpu(ex1->ee_block) + ext1_ee_len !=
le32_to_cpu(ex2->ee_block))
return 0;
@@ -1115,19 +1168,66 @@ ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
* as an RO_COMPAT feature, refuse to merge to extents if
* this can result in the top bit of ee_len being set.
*/
- if (le16_to_cpu(ex1->ee_len) + le16_to_cpu(ex2->ee_len) > EXT_MAX_LEN)
+ if (ext1_ee_len + ext2_ee_len > max_len)
return 0;
#ifdef AGGRESSIVE_TEST
if (le16_to_cpu(ex1->ee_len) >= 4)
return 0;
#endif
- if (ext_pblock(ex1) + le16_to_cpu(ex1->ee_len) == ext_pblock(ex2))
+ if (ext_pblock(ex1) + ext1_ee_len == ext_pblock(ex2))
return 1;
return 0;
}
/*
+ * This function tries to merge the "ex" extent to the next extent in the tree.
+ * It always tries to merge towards right. If you want to merge towards
+ * left, pass "ex - 1" as argument instead of "ex".
+ * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns
+ * 1 if they got merged.
+ */
+int ext4_ext_try_to_merge(struct inode *inode,
+ struct ext4_ext_path *path,
+ struct ext4_extent *ex)
+{
+ struct ext4_extent_header *eh;
+ unsigned int depth, len;
+ int merge_done = 0;
+ int uninitialized = 0;
+
+ depth = ext_depth(inode);
+ BUG_ON(path[depth].p_hdr == NULL);
+ eh = path[depth].p_hdr;
+
+ while (ex < EXT_LAST_EXTENT(eh)) {
+ if (!ext4_can_extents_be_merged(inode, ex, ex + 1))
+ break;
+ /* merge with next extent! */
+ if (ext4_ext_is_uninitialized(ex))
+ uninitialized = 1;
+ ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
+ + ext4_ext_get_actual_len(ex + 1));
+ if (uninitialized)
+ ext4_ext_mark_uninitialized(ex);
+
+ if (ex + 1 < EXT_LAST_EXTENT(eh)) {
+ len = (EXT_LAST_EXTENT(eh) - ex - 1)
+ * sizeof(struct ext4_extent);
+ memmove(ex + 1, ex + 2, len);
+ }
+ eh->eh_entries = cpu_to_le16(le16_to_cpu(eh->eh_entries) - 1);
+ merge_done = 1;
+ WARN_ON(eh->eh_entries == 0);
+ if (!eh->eh_entries)
+ ext4_error(inode->i_sb, "ext4_ext_try_to_merge",
+ "inode#%lu, eh->eh_entries = 0!", inode->i_ino);
+ }
+
+ return merge_done;
+}
+
+/*
* check if a portion of the "newext" extent overlaps with an
* existing extent.
*
@@ -1144,7 +1244,7 @@ unsigned int ext4_ext_check_overlap(struct inode *inode,
unsigned int ret = 0;
b1 = le32_to_cpu(newext->ee_block);
- len1 = le16_to_cpu(newext->ee_len);
+ len1 = ext4_ext_get_actual_len(newext);
depth = ext_depth(inode);
if (!path[depth].p_ext)
goto out;
@@ -1191,8 +1291,9 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
struct ext4_extent *nearex; /* nearest extent */
struct ext4_ext_path *npath = NULL;
int depth, len, err, next;
+ unsigned uninitialized = 0;
- BUG_ON(newext->ee_len == 0);
+ BUG_ON(ext4_ext_get_actual_len(newext) == 0);
depth = ext_depth(inode);
ex = path[depth].p_ext;
BUG_ON(path[depth].p_hdr == NULL);
@@ -1200,14 +1301,24 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
/* try to insert block into found extent and return */
if (ex && ext4_can_extents_be_merged(inode, ex, newext)) {
ext_debug("append %d block to %d:%d (from %llu)\n",
- le16_to_cpu(newext->ee_len),
+ ext4_ext_get_actual_len(newext),
le32_to_cpu(ex->ee_block),
- le16_to_cpu(ex->ee_len), ext_pblock(ex));
+ ext4_ext_get_actual_len(ex), ext_pblock(ex));
err = ext4_ext_get_access(handle, inode, path + depth);
if (err)
return err;
- ex->ee_len = cpu_to_le16(le16_to_cpu(ex->ee_len)
- + le16_to_cpu(newext->ee_len));
+
+ /*
+ * ext4_can_extents_be_merged should have checked that either
+ * both extents are uninitialized, or both aren't. Thus we
+ * need to check only one of them here.
+ */
+ if (ext4_ext_is_uninitialized(ex))
+ uninitialized = 1;
+ ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
+ + ext4_ext_get_actual_len(newext));
+ if (uninitialized)
+ ext4_ext_mark_uninitialized(ex);
eh = path[depth].p_hdr;
nearex = ex;
goto merge;
@@ -1263,7 +1374,7 @@ has_space:
ext_debug("first extent in the leaf: %d:%llu:%d\n",
le32_to_cpu(newext->ee_block),
ext_pblock(newext),
- le16_to_cpu(newext->ee_len));
+ ext4_ext_get_actual_len(newext));
path[depth].p_ext = EXT_FIRST_EXTENT(eh);
} else if (le32_to_cpu(newext->ee_block)
> le32_to_cpu(nearex->ee_block)) {
@@ -1276,7 +1387,7 @@ has_space:
"move %d from 0x%p to 0x%p\n",
le32_to_cpu(newext->ee_block),
ext_pblock(newext),
- le16_to_cpu(newext->ee_len),
+ ext4_ext_get_actual_len(newext),
nearex, len, nearex + 1, nearex + 2);
memmove(nearex + 2, nearex + 1, len);
}
@@ -1289,7 +1400,7 @@ has_space:
"move %d from 0x%p to 0x%p\n",
le32_to_cpu(newext->ee_block),
ext_pblock(newext),
- le16_to_cpu(newext->ee_len),
+ ext4_ext_get_actual_len(newext),
nearex, len, nearex + 1, nearex + 2);
memmove(nearex + 1, nearex, len);
path[depth].p_ext = nearex;
@@ -1304,20 +1415,7 @@ has_space:
merge:
/* try to merge extents to the right */
- while (nearex < EXT_LAST_EXTENT(eh)) {
- if (!ext4_can_extents_be_merged(inode, nearex, nearex + 1))
- break;
- /* merge with next extent! */
- nearex->ee_len = cpu_to_le16(le16_to_cpu(nearex->ee_len)
- + le16_to_cpu(nearex[1].ee_len));
- if (nearex + 1 < EXT_LAST_EXTENT(eh)) {
- len = (EXT_LAST_EXTENT(eh) - nearex - 1)
- * sizeof(struct ext4_extent);
- memmove(nearex + 1, nearex + 2, len);
- }
- eh->eh_entries = cpu_to_le16(le16_to_cpu(eh->eh_entries)-1);
- BUG_ON(eh->eh_entries == 0);
- }
+ ext4_ext_try_to_merge(inode, path, nearex);
/* try to merge extents to the left */
@@ -1379,8 +1477,8 @@ int ext4_ext_walk_space(struct inode *inode, unsigned long block,
end = le32_to_cpu(ex->ee_block);
if (block + num < end)
end = block + num;
- } else if (block >=
- le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len)) {
+ } else if (block >= le32_to_cpu(ex->ee_block)
+ + ext4_ext_get_actual_len(ex)) {
/* need to allocate space after found extent */
start = block;
end = block + num;
@@ -1392,7 +1490,8 @@ int ext4_ext_walk_space(struct inode *inode, unsigned long block,
* by found extent
*/
start = block;
- end = le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len);
+ end = le32_to_cpu(ex->ee_block)
+ + ext4_ext_get_actual_len(ex);
if (block + num < end)
end = block + num;
exists = 1;
@@ -1408,7 +1507,7 @@ int ext4_ext_walk_space(struct inode *inode, unsigned long block,
cbex.ec_type = EXT4_EXT_CACHE_GAP;
} else {
cbex.ec_block = le32_to_cpu(ex->ee_block);
- cbex.ec_len = le16_to_cpu(ex->ee_len);
+ cbex.ec_len = ext4_ext_get_actual_len(ex);
cbex.ec_start = ext_pblock(ex);
cbex.ec_type = EXT4_EXT_CACHE_EXTENT;
}
@@ -1445,7 +1544,7 @@ int ext4_ext_walk_space(struct inode *inode, unsigned long block,
static void
ext4_ext_put_in_cache(struct inode *inode, __u32 block,
- __u32 len, __u32 start, int type)
+ __u32 len, ext4_fsblk_t start, int type)
{
struct ext4_ext_cache *cex;
BUG_ON(len == 0);
@@ -1481,15 +1580,15 @@ ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
ext_debug("cache gap(before): %lu [%lu:%lu]",
(unsigned long) block,
(unsigned long) le32_to_cpu(ex->ee_block),
- (unsigned long) le16_to_cpu(ex->ee_len));
+ (unsigned long) ext4_ext_get_actual_len(ex));
} else if (block >= le32_to_cpu(ex->ee_block)
- + le16_to_cpu(ex->ee_len)) {
+ + ext4_ext_get_actual_len(ex)) {
lblock = le32_to_cpu(ex->ee_block)
- + le16_to_cpu(ex->ee_len);
+ + ext4_ext_get_actual_len(ex);
len = ext4_ext_next_allocated_block(path);
ext_debug("cache gap(after): [%lu:%lu] %lu",
(unsigned long) le32_to_cpu(ex->ee_block),
- (unsigned long) le16_to_cpu(ex->ee_len),
+ (unsigned long) ext4_ext_get_actual_len(ex),
(unsigned long) block);
BUG_ON(len == lblock);
len = len - lblock;
@@ -1619,12 +1718,12 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
unsigned long from, unsigned long to)
{
struct buffer_head *bh;
+ unsigned short ee_len = ext4_ext_get_actual_len(ex);
int i;
#ifdef EXTENTS_STATS
{
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
- unsigned short ee_len = le16_to_cpu(ex->ee_len);
spin_lock(&sbi->s_ext_stats_lock);
sbi->s_ext_blocks += ee_len;
sbi->s_ext_extents++;
@@ -1638,12 +1737,12 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
}
#endif
if (from >= le32_to_cpu(ex->ee_block)
- && to == le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len) - 1) {
+ && to == le32_to_cpu(ex->ee_block) + ee_len - 1) {
/* tail removal */
unsigned long num;
ext4_fsblk_t start;
- num = le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len) - from;
- start = ext_pblock(ex) + le16_to_cpu(ex->ee_len) - num;
+ num = le32_to_cpu(ex->ee_block) + ee_len - from;
+ start = ext_pblock(ex) + ee_len - num;
ext_debug("free last %lu blocks starting %llu\n", num, start);
for (i = 0; i < num; i++) {
bh = sb_find_get_block(inode->i_sb, start + i);
@@ -1651,12 +1750,12 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
}
ext4_free_blocks(handle, inode, start, num);
} else if (from == le32_to_cpu(ex->ee_block)
- && to <= le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len) - 1) {
+ && to <= le32_to_cpu(ex->ee_block) + ee_len - 1) {
printk("strange request: removal %lu-%lu from %u:%u\n",
- from, to, le32_to_cpu(ex->ee_block), le16_to_cpu(ex->ee_len));
+ from, to, le32_to_cpu(ex->ee_block), ee_len);
} else {
printk("strange request: removal(2) %lu-%lu from %u:%u\n",
- from, to, le32_to_cpu(ex->ee_block), le16_to_cpu(ex->ee_len));
+ from, to, le32_to_cpu(ex->ee_block), ee_len);
}
return 0;
}
@@ -1671,21 +1770,23 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
unsigned a, b, block, num;
unsigned long ex_ee_block;
unsigned short ex_ee_len;
+ unsigned uninitialized = 0;
struct ext4_extent *ex;
+ /* the header must be checked already in ext4_ext_remove_space() */
ext_debug("truncate since %lu in leaf\n", start);
if (!path[depth].p_hdr)
path[depth].p_hdr = ext_block_hdr(path[depth].p_bh);
eh = path[depth].p_hdr;
BUG_ON(eh == NULL);
- BUG_ON(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max));
- BUG_ON(eh->eh_magic != EXT4_EXT_MAGIC);
/* find where to start removing */
ex = EXT_LAST_EXTENT(eh);
ex_ee_block = le32_to_cpu(ex->ee_block);
- ex_ee_len = le16_to_cpu(ex->ee_len);
+ if (ext4_ext_is_uninitialized(ex))
+ uninitialized = 1;
+ ex_ee_len = ext4_ext_get_actual_len(ex);
while (ex >= EXT_FIRST_EXTENT(eh) &&
ex_ee_block + ex_ee_len > start) {
@@ -1753,6 +1854,12 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
ex->ee_block = cpu_to_le32(block);
ex->ee_len = cpu_to_le16(num);
+ /*
+ * Do not mark uninitialized if all the blocks in the
+ * extent have been removed.
+ */
+ if (uninitialized && num)
+ ext4_ext_mark_uninitialized(ex);
err = ext4_ext_dirty(handle, inode, path + depth);
if (err)
@@ -1762,7 +1869,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
ext_pblock(ex));
ex--;
ex_ee_block = le32_to_cpu(ex->ee_block);
- ex_ee_len = le16_to_cpu(ex->ee_len);
+ ex_ee_len = ext4_ext_get_actual_len(ex);
}
if (correct_index && eh->eh_entries)
@@ -1825,7 +1932,7 @@ int ext4_ext_remove_space(struct inode *inode, unsigned long start)
return -ENOMEM;
}
path[0].p_hdr = ext_inode_hdr(inode);
- if (ext4_ext_check_header(__FUNCTION__, inode, path[0].p_hdr)) {
+ if (ext4_ext_check_header(inode, path[0].p_hdr, depth)) {
err = -EIO;
goto out;
}
@@ -1846,17 +1953,8 @@ int ext4_ext_remove_space(struct inode *inode, unsigned long start)
if (!path[i].p_hdr) {
ext_debug("initialize header\n");
path[i].p_hdr = ext_block_hdr(path[i].p_bh);
- if (ext4_ext_check_header(__FUNCTION__, inode,
- path[i].p_hdr)) {
- err = -EIO;
- goto out;
- }
}
- BUG_ON(le16_to_cpu(path[i].p_hdr->eh_entries)
- > le16_to_cpu(path[i].p_hdr->eh_max));
- BUG_ON(path[i].p_hdr->eh_magic != EXT4_EXT_MAGIC);
-
if (!path[i].p_idx) {
/* this level hasn't been touched yet */
path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr);
@@ -1873,17 +1971,27 @@ int ext4_ext_remove_space(struct inode *inode, unsigned long start)
i, EXT_FIRST_INDEX(path[i].p_hdr),
path[i].p_idx);
if (ext4_ext_more_to_rm(path + i)) {
+ struct buffer_head *bh;
/* go to the next level */
ext_debug("move to level %d (block %llu)\n",
i + 1, idx_pblock(path[i].p_idx));
memset(path + i + 1, 0, sizeof(*path));
- path[i+1].p_bh =
- sb_bread(sb, idx_pblock(path[i].p_idx));
- if (!path[i+1].p_bh) {
+ bh = sb_bread(sb, idx_pblock(path[i].p_idx));
+ if (!bh) {
/* should we reset i_size? */
err = -EIO;
break;
}
+ if (WARN_ON(i + 1 > depth)) {
+ err = -EIO;
+ break;
+ }
+ if (ext4_ext_check_header(inode, ext_block_hdr(bh),
+ depth - i - 1)) {
+ err = -EIO;
+ break;
+ }
+ path[i + 1].p_bh = bh;
/* save actual number of indexes since this
* number is changed at the next iteration */
@@ -1977,15 +2085,158 @@ void ext4_ext_release(struct super_block *sb)
#endif
}
+/*
+ * This function is called by ext4_ext_get_blocks() if someone tries to write
+ * to an uninitialized extent. It may result in splitting the uninitialized
+ * extent into multiple extents (upto three - one initialized and two
+ * uninitialized).
+ * There are three possibilities:
+ * a> There is no split required: Entire extent should be initialized
+ * b> Splits in two extents: Write is happening at either end of the extent
+ * c> Splits in three extents: Somone is writing in middle of the extent
+ */
+int ext4_ext_convert_to_initialized(handle_t *handle, struct inode *inode,
+ struct ext4_ext_path *path,
+ ext4_fsblk_t iblock,
+ unsigned long max_blocks)
+{
+ struct ext4_extent *ex, newex;
+ struct ext4_extent *ex1 = NULL;
+ struct ext4_extent *ex2 = NULL;
+ struct ext4_extent *ex3 = NULL;
+ struct ext4_extent_header *eh;
+ unsigned int allocated, ee_block, ee_len, depth;
+ ext4_fsblk_t newblock;
+ int err = 0;
+ int ret = 0;
+
+ depth = ext_depth(inode);
+ eh = path[depth].p_hdr;
+ ex = path[depth].p_ext;
+ ee_block = le32_to_cpu(ex->ee_block);
+ ee_len = ext4_ext_get_actual_len(ex);
+ allocated = ee_len - (iblock - ee_block);
+ newblock = iblock - ee_block + ext_pblock(ex);
+ ex2 = ex;
+
+ /* ex1: ee_block to iblock - 1 : uninitialized */
+ if (iblock > ee_block) {
+ ex1 = ex;
+ ex1->ee_len = cpu_to_le16(iblock - ee_block);
+ ext4_ext_mark_uninitialized(ex1);
+ ex2 = &newex;
+ }
+ /*
+ * for sanity, update the length of the ex2 extent before
+ * we insert ex3, if ex1 is NULL. This is to avoid temporary
+ * overlap of blocks.
+ */
+ if (!ex1 && allocated > max_blocks)
+ ex2->ee_len = cpu_to_le16(max_blocks);
+ /* ex3: to ee_block + ee_len : uninitialised */
+ if (allocated > max_blocks) {
+ unsigned int newdepth;
+ ex3 = &newex;
+ ex3->ee_block = cpu_to_le32(iblock + max_blocks);
+ ext4_ext_store_pblock(ex3, newblock + max_blocks);
+ ex3->ee_len = cpu_to_le16(allocated - max_blocks);
+ ext4_ext_mark_uninitialized(ex3);
+ err = ext4_ext_insert_extent(handle, inode, path, ex3);
+ if (err)
+ goto out;
+ /*
+ * The depth, and hence eh & ex might change
+ * as part of the insert above.
+ */
+ newdepth = ext_depth(inode);
+ if (newdepth != depth) {
+ depth = newdepth;
+ path = ext4_ext_find_extent(inode, iblock, NULL);
+ if (IS_ERR(path)) {
+ err = PTR_ERR(path);
+ path = NULL;
+ goto out;
+ }
+ eh = path[depth].p_hdr;
+ ex = path[depth].p_ext;
+ if (ex2 != &newex)
+ ex2 = ex;
+ }
+ allocated = max_blocks;
+ }
+ /*
+ * If there was a change of depth as part of the
+ * insertion of ex3 above, we need to update the length
+ * of the ex1 extent again here
+ */
+ if (ex1 && ex1 != ex) {
+ ex1 = ex;
+ ex1->ee_len = cpu_to_le16(iblock - ee_block);
+ ext4_ext_mark_uninitialized(ex1);
+ ex2 = &newex;
+ }
+ /* ex2: iblock to iblock + maxblocks-1 : initialised */
+ ex2->ee_block = cpu_to_le32(iblock);
+ ex2->ee_start = cpu_to_le32(newblock);
+ ext4_ext_store_pblock(ex2, newblock);
+ ex2->ee_len = cpu_to_le16(allocated);
+ if (ex2 != ex)
+ goto insert;
+ err = ext4_ext_get_access(handle, inode, path + depth);
+ if (err)
+ goto out;
+ /*
+ * New (initialized) extent starts from the first block
+ * in the current extent. i.e., ex2 == ex
+ * We have to see if it can be merged with the extent
+ * on the left.
+ */
+ if (ex2 > EXT_FIRST_EXTENT(eh)) {
+ /*
+ * To merge left, pass "ex2 - 1" to try_to_merge(),
+ * since it merges towards right _only_.
+ */
+ ret = ext4_ext_try_to_merge(inode, path, ex2 - 1);
+ if (ret) {
+ err = ext4_ext_correct_indexes(handle, inode, path);
+ if (err)
+ goto out;
+ depth = ext_depth(inode);
+ ex2--;
+ }
+ }
+ /*
+ * Try to Merge towards right. This might be required
+ * only when the whole extent is being written to.
+ * i.e. ex2 == ex and ex3 == NULL.
+ */
+ if (!ex3) {
+ ret = ext4_ext_try_to_merge(inode, path, ex2);
+ if (ret) {
+ err = ext4_ext_correct_indexes(handle, inode, path);
+ if (err)
+ goto out;
+ }
+ }
+ /* Mark modified extent as dirty */
+ err = ext4_ext_dirty(handle, inode, path + depth);
+ goto out;
+insert:
+ err = ext4_ext_insert_extent(handle, inode, path, &newex);
+out:
+ return err ? err : allocated;
+}
+
int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
ext4_fsblk_t iblock,
unsigned long max_blocks, struct buffer_head *bh_result,
int create, int extend_disksize)
{
struct ext4_ext_path *path = NULL;
+ struct ext4_extent_header *eh;
struct ext4_extent newex, *ex;
ext4_fsblk_t goal, newblock;
- int err = 0, depth;
+ int err = 0, depth, ret;
unsigned long allocated = 0;
__clear_bit(BH_New, &bh_result->b_state);
@@ -1998,8 +2249,10 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
if (goal) {
if (goal == EXT4_EXT_CACHE_GAP) {
if (!create) {
- /* block isn't allocated yet and
- * user doesn't want to allocate it */
+ /*
+ * block isn't allocated yet and
+ * user doesn't want to allocate it
+ */
goto out2;
}
/* we should allocate requested block */
@@ -2033,21 +2286,19 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
* this is why assert can't be put in ext4_ext_find_extent()
*/
BUG_ON(path[depth].p_ext == NULL && depth != 0);
+ eh = path[depth].p_hdr;
ex = path[depth].p_ext;
if (ex) {
unsigned long ee_block = le32_to_cpu(ex->ee_block);
ext4_fsblk_t ee_start = ext_pblock(ex);
- unsigned short ee_len = le16_to_cpu(ex->ee_len);
+ unsigned short ee_len;
/*
- * Allow future support for preallocated extents to be added
- * as an RO_COMPAT feature:
* Uninitialized extents are treated as holes, except that
- * we avoid (fail) allocating new blocks during a write.
+ * we split out initialized portions during a write.
*/
- if (ee_len > EXT_MAX_LEN)
- goto out2;
+ ee_len = ext4_ext_get_actual_len(ex);
/* if found extent covers block, simply return it */
if (iblock >= ee_block && iblock < ee_block + ee_len) {
newblock = iblock - ee_block + ee_start;
@@ -2055,9 +2306,27 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
allocated = ee_len - (iblock - ee_block);
ext_debug("%d fit into %lu:%d -> %llu\n", (int) iblock,
ee_block, ee_len, newblock);
- ext4_ext_put_in_cache(inode, ee_block, ee_len,
- ee_start, EXT4_EXT_CACHE_EXTENT);
- goto out;
+
+ /* Do not put uninitialized extent in the cache */
+ if (!ext4_ext_is_uninitialized(ex)) {
+ ext4_ext_put_in_cache(inode, ee_block,
+ ee_len, ee_start,
+ EXT4_EXT_CACHE_EXTENT);
+ goto out;
+ }
+ if (create == EXT4_CREATE_UNINITIALIZED_EXT)
+ goto out;
+ if (!create)
+ goto out2;
+
+ ret = ext4_ext_convert_to_initialized(handle, inode,
+ path, iblock,
+ max_blocks);
+ if (ret <= 0)
+ goto out2;
+ else
+ allocated = ret;
+ goto outnew;
}
}
@@ -2066,8 +2335,10 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
* we couldn't try to create block if create flag is zero
*/
if (!create) {
- /* put just found gap into cache to speed up
- * subsequent requests */
+ /*
+ * put just found gap into cache to speed up
+ * subsequent requests
+ */
ext4_ext_put_gap_in_cache(inode, path, iblock);
goto out2;
}
@@ -2081,6 +2352,19 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
/* allocate new block */
goal = ext4_ext_find_goal(inode, path, iblock);
+ /*
+ * See if request is beyond maximum number of blocks we can have in
+ * a single extent. For an initialized extent this limit is
+ * EXT_INIT_MAX_LEN and for an uninitialized extent this limit is
+ * EXT_UNINIT_MAX_LEN.
+ */
+ if (max_blocks > EXT_INIT_MAX_LEN &&
+ create != EXT4_CREATE_UNINITIALIZED_EXT)
+ max_blocks = EXT_INIT_MAX_LEN;
+ else if (max_blocks > EXT_UNINIT_MAX_LEN &&
+ create == EXT4_CREATE_UNINITIALIZED_EXT)
+ max_blocks = EXT_UNINIT_MAX_LEN;
+
/* Check if we can really insert (iblock)::(iblock+max_blocks) extent */
newex.ee_block = cpu_to_le32(iblock);
newex.ee_len = cpu_to_le16(max_blocks);
@@ -2098,6 +2382,8 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
/* try to insert new extent into found leaf and return */
ext4_ext_store_pblock(&newex, newblock);
newex.ee_len = cpu_to_le16(allocated);
+ if (create == EXT4_CREATE_UNINITIALIZED_EXT) /* Mark uninitialized */
+ ext4_ext_mark_uninitialized(&newex);
err = ext4_ext_insert_extent(handle, inode, path, &newex);
if (err) {
/* free data blocks we just allocated */
@@ -2111,10 +2397,13 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
/* previous routine could use block we allocated */
newblock = ext_pblock(&newex);
+outnew:
__set_bit(BH_New, &bh_result->b_state);
- ext4_ext_put_in_cache(inode, iblock, allocated, newblock,
- EXT4_EXT_CACHE_EXTENT);
+ /* Cache only when it is _not_ an uninitialized extent */
+ if (create != EXT4_CREATE_UNINITIALIZED_EXT)
+ ext4_ext_put_in_cache(inode, iblock, allocated, newblock,
+ EXT4_EXT_CACHE_EXTENT);
out:
if (allocated > max_blocks)
allocated = max_blocks;
@@ -2178,7 +2467,8 @@ void ext4_ext_truncate(struct inode * inode, struct page *page)
err = ext4_ext_remove_space(inode, last_block);
/* In a multi-transaction truncate, we only make the final
- * transaction synchronous. */
+ * transaction synchronous.
+ */
if (IS_SYNC(inode))
handle->h_sync = 1;
@@ -2217,3 +2507,127 @@ int ext4_ext_writepage_trans_blocks(struct inode *inode, int num)
return needed;
}
+
+/*
+ * preallocate space for a file. This implements ext4's fallocate inode
+ * operation, which gets called from sys_fallocate system call.
+ * For block-mapped files, posix_fallocate should fall back to the method
+ * of writing zeroes to the required new blocks (the same behavior which is
+ * expected for file systems which do not support fallocate() system call).
+ */
+long ext4_fallocate(struct inode *inode, int mode, loff_t offset, loff_t len)
+{
+ handle_t *handle;
+ ext4_fsblk_t block, max_blocks;
+ ext4_fsblk_t nblocks = 0;
+ int ret = 0;
+ int ret2 = 0;
+ int retries = 0;
+ struct buffer_head map_bh;
+ unsigned int credits, blkbits = inode->i_blkbits;
+
+ /*
+ * currently supporting (pre)allocate mode for extent-based
+ * files _only_
+ */
+ if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL))
+ return -EOPNOTSUPP;
+
+ /* preallocation to directories is currently not supported */
+ if (S_ISDIR(inode->i_mode))
+ return -ENODEV;
+
+ block = offset >> blkbits;
+ max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits)
+ - block;
+
+ /*
+ * credits to insert 1 extent into extent tree + buffers to be able to
+ * modify 1 super block, 1 block bitmap and 1 group descriptor.
+ */
+ credits = EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + 3;
+retry:
+ while (ret >= 0 && ret < max_blocks) {
+ block = block + ret;
+ max_blocks = max_blocks - ret;
+ handle = ext4_journal_start(inode, credits);
+ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
+ break;
+ }
+
+ ret = ext4_ext_get_blocks(handle, inode, block,
+ max_blocks, &map_bh,
+ EXT4_CREATE_UNINITIALIZED_EXT, 0);
+ WARN_ON(!ret);
+ if (!ret) {
+ ext4_error(inode->i_sb, "ext4_fallocate",
+ "ext4_ext_get_blocks returned 0! inode#%lu"
+ ", block=%llu, max_blocks=%llu",
+ inode->i_ino, block, max_blocks);
+ ret = -EIO;
+ ext4_mark_inode_dirty(handle, inode);
+ ret2 = ext4_journal_stop(handle);
+ break;
+ }
+ if (ret > 0) {
+ /* check wrap through sign-bit/zero here */
+ if ((block + ret) < 0 || (block + ret) < block) {
+ ret = -EIO;
+ ext4_mark_inode_dirty(handle, inode);
+ ret2 = ext4_journal_stop(handle);
+ break;
+ }
+ if (buffer_new(&map_bh) && ((block + ret) >
+ (EXT4_BLOCK_ALIGN(i_size_read(inode), blkbits)
+ >> blkbits)))
+ nblocks = nblocks + ret;
+ }
+
+ /* Update ctime if new blocks get allocated */
+ if (nblocks) {
+ struct timespec now;
+
+ now = current_fs_time(inode->i_sb);
+ if (!timespec_equal(&inode->i_ctime, &now))
+ inode->i_ctime = now;
+ }
+
+ ext4_mark_inode_dirty(handle, inode);
+ ret2 = ext4_journal_stop(handle);
+ if (ret2)
+ break;
+ }
+
+ if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
+ goto retry;
+
+ /*
+ * Time to update the file size.
+ * Update only when preallocation was requested beyond the file size.
+ */
+ if (!(mode & FALLOC_FL_KEEP_SIZE) &&
+ (offset + len) > i_size_read(inode)) {
+ if (ret > 0) {
+ /*
+ * if no error, we assume preallocation succeeded
+ * completely
+ */
+ mutex_lock(&inode->i_mutex);
+ i_size_write(inode, offset + len);
+ EXT4_I(inode)->i_disksize = i_size_read(inode);
+ mutex_unlock(&inode->i_mutex);
+ } else if (ret < 0 && nblocks) {
+ /* Handle partial allocation scenario */
+ loff_t newsize;
+
+ mutex_lock(&inode->i_mutex);
+ newsize = (nblocks << blkbits) + i_size_read(inode);
+ i_size_write(inode, EXT4_BLOCK_ALIGN(newsize, blkbits));
+ EXT4_I(inode)->i_disksize = i_size_read(inode);
+ mutex_unlock(&inode->i_mutex);
+ }
+ }
+
+ return ret > 0 ? ret2 : ret;
+}
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index d4c8186..1a81cd6 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -134,5 +134,6 @@ const struct inode_operations ext4_file_inode_operations = {
.removexattr = generic_removexattr,
#endif
.permission = ext4_permission,
+ .fallocate = ext4_fallocate,
};
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index c88b439..427f830 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -563,7 +563,8 @@ got:
inode->i_ino = ino;
/* This is the optimal IO size (for stat), not the fs block size */
inode->i_blocks = 0;
- inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
+ inode->i_mtime = inode->i_atime = inode->i_ctime = ei->i_crtime =
+ ext4_current_time(inode);
memset(ei->i_data, 0, sizeof(ei->i_data));
ei->i_dir_start_lookup = 0;
@@ -595,9 +596,8 @@ got:
spin_unlock(&sbi->s_next_gen_lock);
ei->i_state = EXT4_STATE_NEW;
- ei->i_extra_isize =
- (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) ?
- sizeof(struct ext4_inode) - EXT4_GOOD_OLD_INODE_SIZE : 0;
+
+ ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize;
ret = inode;
if(DQUOT_ALLOC_INODE(inode)) {
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 8416fa2..a4848e0 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -726,7 +726,7 @@ static int ext4_splice_branch(handle_t *handle, struct inode *inode,
/* We are done with atomic stuff, now do the rest of housekeeping */
- inode->i_ctime = CURRENT_TIME_SEC;
+ inode->i_ctime = ext4_current_time(inode);
ext4_mark_inode_dirty(handle, inode);
/* had we spliced it onto indirect block? */
@@ -1766,7 +1766,6 @@ int ext4_block_truncate_page(handle_t *handle, struct page *page,
struct inode *inode = mapping->host;
struct buffer_head *bh;
int err = 0;
- void *kaddr;
blocksize = inode->i_sb->s_blocksize;
length = blocksize - (offset & (blocksize - 1));
@@ -1778,10 +1777,7 @@ int ext4_block_truncate_page(handle_t *handle, struct page *page,
*/
if (!page_has_buffers(page) && test_opt(inode->i_sb, NOBH) &&
ext4_should_writeback_data(inode) && PageUptodate(page)) {
- kaddr = kmap_atomic(page, KM_USER0);
- memset(kaddr + offset, 0, length);
- flush_dcache_page(page);
- kunmap_atomic(kaddr, KM_USER0);
+ zero_user_page(page, offset, length, KM_USER0);
set_page_dirty(page);
goto unlock;
}
@@ -1834,10 +1830,7 @@ int ext4_block_truncate_page(handle_t *handle, struct page *page,
goto unlock;
}
- kaddr = kmap_atomic(page, KM_USER0);
- memset(kaddr + offset, 0, length);
- flush_dcache_page(page);
- kunmap_atomic(kaddr, KM_USER0);
+ zero_user_page(page, offset, length, KM_USER0);
BUFFER_TRACE(bh, "zeroed end of block");
@@ -2375,7 +2368,7 @@ do_indirects:
ext4_discard_reservation(inode);
mutex_unlock(&ei->truncate_mutex);
- inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
+ inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
ext4_mark_inode_dirty(handle, inode);
/*
@@ -2583,6 +2576,25 @@ void ext4_set_inode_flags(struct inode *inode)
inode->i_flags |= S_DIRSYNC;
}
+/* Propagate flags from i_flags to EXT4_I(inode)->i_flags */
+void ext4_get_inode_flags(struct ext4_inode_info *ei)
+{
+ unsigned int flags = ei->vfs_inode.i_flags;
+
+ ei->i_flags &= ~(EXT4_SYNC_FL|EXT4_APPEND_FL|
+ EXT4_IMMUTABLE_FL|EXT4_NOATIME_FL|EXT4_DIRSYNC_FL);
+ if (flags & S_SYNC)
+ ei->i_flags |= EXT4_SYNC_FL;
+ if (flags & S_APPEND)
+ ei->i_flags |= EXT4_APPEND_FL;
+ if (flags & S_IMMUTABLE)
+ ei->i_flags |= EXT4_IMMUTABLE_FL;
+ if (flags & S_NOATIME)
+ ei->i_flags |= EXT4_NOATIME_FL;
+ if (flags & S_DIRSYNC)
+ ei->i_flags |= EXT4_DIRSYNC_FL;
+}
+
void ext4_read_inode(struct inode * inode)
{
struct ext4_iloc iloc;
@@ -2610,10 +2622,6 @@ void ext4_read_inode(struct inode * inode)
}
inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
inode->i_size = le32_to_cpu(raw_inode->i_size);
- inode->i_atime.tv_sec = (signed)le32_to_cpu(raw_inode->i_atime);
- inode->i_ctime.tv_sec = (signed)le32_to_cpu(raw_inode->i_ctime);
- inode->i_mtime.tv_sec = (signed)le32_to_cpu(raw_inode->i_mtime);
- inode->i_atime.tv_nsec = inode->i_ctime.tv_nsec = inode->i_mtime.tv_nsec = 0;
ei->i_state = 0;
ei->i_dir_start_lookup = 0;
@@ -2691,6 +2699,11 @@ void ext4_read_inode(struct inode * inode)
} else
ei->i_extra_isize = 0;
+ EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode);
+ EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode);
+ EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode);
+ EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode);
+
if (S_ISREG(inode->i_mode)) {
inode->i_op = &ext4_file_inode_operations;
inode->i_fop = &ext4_file_operations;
@@ -2744,6 +2757,7 @@ static int ext4_do_update_inode(handle_t *handle,
if (ei->i_state & EXT4_STATE_NEW)
memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
+ ext4_get_inode_flags(ei);
raw_inode->i_mode = cpu_to_le16(inode->i_mode);
if(!(test_opt(inode->i_sb, NO_UID32))) {
raw_inode->i_uid_low = cpu_to_le16(low_16_bits(inode->i_uid));
@@ -2771,9 +2785,12 @@ static int ext4_do_update_inode(handle_t *handle,
}
raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
raw_inode->i_size = cpu_to_le32(ei->i_disksize);
- raw_inode->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
- raw_inode->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec);
- raw_inode->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec);
+
+ EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode);
+ EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode);
+ EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
+ EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode);
+
raw_inode->i_blocks = cpu_to_le32(inode->i_blocks);
raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
raw_inode->i_flags = cpu_to_le32(ei->i_flags);
@@ -2886,7 +2903,7 @@ int ext4_write_inode(struct inode *inode, int wait)
return 0;
if (ext4_journal_current_handle()) {
- jbd_debug(0, "called recursively, non-PF_MEMALLOC!\n");
+ jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n");
dump_stack();
return -EIO;
}
@@ -3082,6 +3099,39 @@ ext4_reserve_inode_write(handle_t *handle, struct inode *inode,
}
/*
+ * Expand an inode by new_extra_isize bytes.
+ * Returns 0 on success or negative error number on failure.
+ */
+int ext4_expand_extra_isize(struct inode *inode, unsigned int new_extra_isize,
+ struct ext4_iloc iloc, handle_t *handle)
+{
+ struct ext4_inode *raw_inode;
+ struct ext4_xattr_ibody_header *header;
+ struct ext4_xattr_entry *entry;
+
+ if (EXT4_I(inode)->i_extra_isize >= new_extra_isize)
+ return 0;
+
+ raw_inode = ext4_raw_inode(&iloc);
+
+ header = IHDR(inode, raw_inode);
+ entry = IFIRST(header);
+
+ /* No extended attributes present */
+ if (!(EXT4_I(inode)->i_state & EXT4_STATE_XATTR) ||
+ header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
+ memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0,
+ new_extra_isize);
+ EXT4_I(inode)->i_extra_isize = new_extra_isize;
+ return 0;
+ }
+
+ /* try to expand with EAs present */
+ return ext4_expand_extra_isize_ea(inode, new_extra_isize,
+ raw_inode, handle);
+}
+
+/*
* What we do here is to mark the in-core inode as clean with respect to inode
* dirtiness (it may still be data-dirty).
* This means that the in-core inode may be reaped by prune_icache
@@ -3105,10 +3155,38 @@ ext4_reserve_inode_write(handle_t *handle, struct inode *inode,
int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
{
struct ext4_iloc iloc;
- int err;
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ static unsigned int mnt_count;
+ int err, ret;
might_sleep();
err = ext4_reserve_inode_write(handle, inode, &iloc);
+ if (EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize &&
+ !(EXT4_I(inode)->i_state & EXT4_STATE_NO_EXPAND)) {
+ /*
+ * We need extra buffer credits since we may write into EA block
+ * with this same handle. If journal_extend fails, then it will
+ * only result in a minor loss of functionality for that inode.
+ * If this is felt to be critical, then e2fsck should be run to
+ * force a large enough s_min_extra_isize.
+ */
+ if ((jbd2_journal_extend(handle,
+ EXT4_DATA_TRANS_BLOCKS(inode->i_sb))) == 0) {
+ ret = ext4_expand_extra_isize(inode,
+ sbi->s_want_extra_isize,
+ iloc, handle);
+ if (ret) {
+ EXT4_I(inode)->i_state |= EXT4_STATE_NO_EXPAND;
+ if (mnt_count != sbi->s_es->s_mnt_count) {
+ ext4_warning(inode->i_sb, __FUNCTION__,
+ "Unable to expand inode %lu. Delete"
+ " some EAs or run e2fsck.",
+ inode->i_ino);
+ mnt_count = sbi->s_es->s_mnt_count;
+ }
+ }
+ }
+ }
if (!err)
err = ext4_mark_iloc_dirty(handle, inode, &iloc);
return err;
@@ -3197,7 +3275,7 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
*/
journal = EXT4_JOURNAL(inode);
- if (is_journal_aborted(journal) || IS_RDONLY(inode))
+ if (is_journal_aborted(journal))
return -EROFS;
jbd2_journal_lock_updates(journal);
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index 7b4aa45..c04c7cc 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -28,6 +28,7 @@ int ext4_ioctl (struct inode * inode, struct file * filp, unsigned int cmd,
switch (cmd) {
case EXT4_IOC_GETFLAGS:
+ ext4_get_inode_flags(ei);
flags = ei->i_flags & EXT4_FL_USER_VISIBLE;
return put_user(flags, (int __user *) arg);
case EXT4_IOC_SETFLAGS: {
@@ -96,7 +97,7 @@ int ext4_ioctl (struct inode * inode, struct file * filp, unsigned int cmd,
ei->i_flags = flags;
ext4_set_inode_flags(inode);
- inode->i_ctime = CURRENT_TIME_SEC;
+ inode->i_ctime = ext4_current_time(inode);
err = ext4_mark_iloc_dirty(handle, inode, &iloc);
flags_err:
@@ -133,14 +134,14 @@ flags_err:
return PTR_ERR(handle);
err = ext4_reserve_inode_write(handle, inode, &iloc);
if (err == 0) {
- inode->i_ctime = CURRENT_TIME_SEC;
+ inode->i_ctime = ext4_current_time(inode);
inode->i_generation = generation;
err = ext4_mark_iloc_dirty(handle, inode, &iloc);
}
ext4_journal_stop(handle);
return err;
}
-#ifdef CONFIG_JBD_DEBUG
+#ifdef CONFIG_JBD2_DEBUG
case EXT4_IOC_WAIT_FOR_READONLY:
/*
* This is racy - by the time we're woken up and running,
@@ -282,7 +283,7 @@ long ext4_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case EXT4_IOC32_SETVERSION_OLD:
cmd = EXT4_IOC_SETVERSION_OLD;
break;
-#ifdef CONFIG_JBD_DEBUG
+#ifdef CONFIG_JBD2_DEBUG
case EXT4_IOC32_WAIT_FOR_READONLY:
cmd = EXT4_IOC_WAIT_FOR_READONLY;
break;
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 2de339d..5fdb862 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -140,7 +140,8 @@ struct dx_frame
struct dx_map_entry
{
u32 hash;
- u32 offs;
+ u16 offs;
+ u16 size;
};
#ifdef CONFIG_EXT4_INDEX
@@ -379,13 +380,28 @@ dx_probe(struct dentry *dentry, struct inode *dir,
entries = (struct dx_entry *) (((char *)&root->info) +
root->info.info_length);
- assert(dx_get_limit(entries) == dx_root_limit(dir,
- root->info.info_length));
+
+ if (dx_get_limit(entries) != dx_root_limit(dir,
+ root->info.info_length)) {
+ ext4_warning(dir->i_sb, __FUNCTION__,
+ "dx entry: limit != root limit");
+ brelse(bh);
+ *err = ERR_BAD_DX_DIR;
+ goto fail;
+ }
+
dxtrace (printk("Look up %x", hash));
while (1)
{
count = dx_get_count(entries);
- assert (count && count <= dx_get_limit(entries));
+ if (!count || count > dx_get_limit(entries)) {
+ ext4_warning(dir->i_sb, __FUNCTION__,
+ "dx entry: no count or count > limit");
+ brelse(bh);
+ *err = ERR_BAD_DX_DIR;
+ goto fail2;
+ }
+
p = entries + 1;
q = entries + count - 1;
while (p <= q)
@@ -423,8 +439,15 @@ dx_probe(struct dentry *dentry, struct inode *dir,
if (!(bh = ext4_bread (NULL,dir, dx_get_block(at), 0, err)))
goto fail2;
at = entries = ((struct dx_node *) bh->b_data)->entries;
- assert (dx_get_limit(entries) == dx_node_limit (dir));
+ if (dx_get_limit(entries) != dx_node_limit (dir)) {
+ ext4_warning(dir->i_sb, __FUNCTION__,
+ "dx entry: limit != node limit");
+ brelse(bh);
+ *err = ERR_BAD_DX_DIR;
+ goto fail2;
+ }
frame++;
+ frame->bh = NULL;
}
fail2:
while (frame >= frame_in) {
@@ -432,6 +455,10 @@ fail2:
frame--;
}
fail:
+ if (*err == ERR_BAD_DX_DIR)
+ ext4_warning(dir->i_sb, __FUNCTION__,
+ "Corrupt dir inode %ld, running e2fsck is "
+ "recommended.", dir->i_ino);
return NULL;
}
@@ -671,6 +698,10 @@ errout:
* Directory block splitting, compacting
*/
+/*
+ * Create map of hash values, offsets, and sizes, stored at end of block.
+ * Returns number of entries mapped.
+ */
static int dx_make_map (struct ext4_dir_entry_2 *de, int size,
struct dx_hash_info *hinfo, struct dx_map_entry *map_tail)
{
@@ -684,7 +715,8 @@ static int dx_make_map (struct ext4_dir_entry_2 *de, int size,
ext4fs_dirhash(de->name, de->name_len, &h);
map_tail--;
map_tail->hash = h.hash;
- map_tail->offs = (u32) ((char *) de - base);
+ map_tail->offs = (u16) ((char *) de - base);
+ map_tail->size = le16_to_cpu(de->rec_len);
count++;
cond_resched();
}
@@ -694,6 +726,7 @@ static int dx_make_map (struct ext4_dir_entry_2 *de, int size,
return count;
}
+/* Sort map by hash value */
static void dx_sort_map (struct dx_map_entry *map, unsigned count)
{
struct dx_map_entry *p, *q, *top = map + count - 1;
@@ -1089,6 +1122,10 @@ static inline void ext4_set_de_type(struct super_block *sb,
}
#ifdef CONFIG_EXT4_INDEX
+/*
+ * Move count entries from end of map between two memory locations.
+ * Returns pointer to last entry moved.
+ */
static struct ext4_dir_entry_2 *
dx_move_dirents(char *from, char *to, struct dx_map_entry *map, int count)
{
@@ -1107,6 +1144,10 @@ dx_move_dirents(char *from, char *to, struct dx_map_entry *map, int count)
return (struct ext4_dir_entry_2 *) (to - rec_len);
}
+/*
+ * Compact each dir entry in the range to the minimal rec_len.
+ * Returns pointer to last entry in range.
+ */
static struct ext4_dir_entry_2* dx_pack_dirents(char *base, int size)
{
struct ext4_dir_entry_2 *next, *to, *prev, *de = (struct ext4_dir_entry_2 *) base;
@@ -1129,6 +1170,11 @@ static struct ext4_dir_entry_2* dx_pack_dirents(char *base, int size)
return prev;
}
+/*
+ * Split a full leaf block to make room for a new dir entry.
+ * Allocate a new block, and move entries so that they are approx. equally full.
+ * Returns pointer to de in block into which the new entry will be inserted.
+ */
static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
struct buffer_head **bh,struct dx_frame *frame,
struct dx_hash_info *hinfo, int *error)
@@ -1140,7 +1186,7 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
u32 hash2;
struct dx_map_entry *map;
char *data1 = (*bh)->b_data, *data2;
- unsigned split;
+ unsigned split, move, size, i;
struct ext4_dir_entry_2 *de = NULL, *de2;
int err = 0;
@@ -1168,8 +1214,19 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
count = dx_make_map ((struct ext4_dir_entry_2 *) data1,
blocksize, hinfo, map);
map -= count;
- split = count/2; // need to adjust to actual middle
dx_sort_map (map, count);
+ /* Split the existing block in the middle, size-wise */
+ size = 0;
+ move = 0;
+ for (i = count-1; i >= 0; i--) {
+ /* is more than half of this entry in 2nd half of the block? */
+ if (size + map[i].size/2 > blocksize/2)
+ break;
+ size += map[i].size;
+ move++;
+ }
+ /* map index at which we will split */
+ split = count - move;
hash2 = map[split].hash;
continued = hash2 == map[split - 1].hash;
dxtrace(printk("Split block %i at %x, %i/%i\n",
@@ -1295,7 +1352,7 @@ static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry,
* happen is that the times are slightly out of date
* and/or different from the directory change time.
*/
- dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
+ dir->i_mtime = dir->i_ctime = ext4_current_time(dir);
ext4_update_dx_flag(dir);
dir->i_version++;
ext4_mark_inode_dirty(handle, dir);
@@ -1629,6 +1686,35 @@ static int ext4_delete_entry (handle_t *handle,
return -ENOENT;
}
+/*
+ * DIR_NLINK feature is set if 1) nlinks > EXT4_LINK_MAX or 2) nlinks == 2,
+ * since this indicates that nlinks count was previously 1.
+ */
+static void ext4_inc_count(handle_t *handle, struct inode *inode)
+{
+ inc_nlink(inode);
+ if (is_dx(inode) && inode->i_nlink > 1) {
+ /* limit is 16-bit i_links_count */
+ if (inode->i_nlink >= EXT4_LINK_MAX || inode->i_nlink == 2) {
+ inode->i_nlink = 1;
+ EXT4_SET_RO_COMPAT_FEATURE(inode->i_sb,
+ EXT4_FEATURE_RO_COMPAT_DIR_NLINK);
+ }
+ }
+}
+
+/*
+ * If a directory had nlink == 1, then we should let it be 1. This indicates
+ * directory has >EXT4_LINK_MAX subdirs.
+ */
+static void ext4_dec_count(handle_t *handle, struct inode *inode)
+{
+ drop_nlink(inode);
+ if (S_ISDIR(inode->i_mode) && inode->i_nlink == 0)
+ inc_nlink(inode);
+}
+
+
static int ext4_add_nondir(handle_t *handle,
struct dentry *dentry, struct inode *inode)
{
@@ -1725,7 +1811,7 @@ static int ext4_mkdir(struct inode * dir, struct dentry * dentry, int mode)
struct ext4_dir_entry_2 * de;
int err, retries = 0;
- if (dir->i_nlink >= EXT4_LINK_MAX)
+ if (EXT4_DIR_LINK_MAX(dir))
return -EMLINK;
retry:
@@ -1748,7 +1834,7 @@ retry:
inode->i_size = EXT4_I(inode)->i_disksize = inode->i_sb->s_blocksize;
dir_block = ext4_bread (handle, inode, 0, 1, &err);
if (!dir_block) {
- drop_nlink(inode); /* is this nlink == 0? */
+ ext4_dec_count(handle, inode); /* is this nlink == 0? */
ext4_mark_inode_dirty(handle, inode);
iput (inode);
goto out_stop;
@@ -1780,7 +1866,7 @@ retry:
iput (inode);
goto out_stop;
}
- inc_nlink(dir);
+ ext4_inc_count(handle, dir);
ext4_update_dx_flag(dir);
ext4_mark_inode_dirty(handle, dir);
d_instantiate(dentry, inode);
@@ -2045,9 +2131,9 @@ static int ext4_rmdir (struct inode * dir, struct dentry *dentry)
retval = ext4_delete_entry(handle, dir, de, bh);
if (retval)
goto end_rmdir;
- if (inode->i_nlink != 2)
+ if (!EXT4_DIR_LINK_EMPTY(inode))
ext4_warning (inode->i_sb, "ext4_rmdir",
- "empty directory has nlink!=2 (%d)",
+ "empty directory has too many links (%d)",
inode->i_nlink);
inode->i_version++;
clear_nlink(inode);
@@ -2056,9 +2142,9 @@ static int ext4_rmdir (struct inode * dir, struct dentry *dentry)
* recovery. */
inode->i_size = 0;
ext4_orphan_add(handle, inode);
- inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME_SEC;
+ inode->i_ctime = dir->i_ctime = dir->i_mtime = ext4_current_time(inode);
ext4_mark_inode_dirty(handle, inode);
- drop_nlink(dir);
+ ext4_dec_count(handle, dir);
ext4_update_dx_flag(dir);
ext4_mark_inode_dirty(handle, dir);
@@ -2106,13 +2192,13 @@ static int ext4_unlink(struct inode * dir, struct dentry *dentry)
retval = ext4_delete_entry(handle, dir, de, bh);
if (retval)
goto end_unlink;
- dir->i_ctime = dir->i_mtime = CURRENT_TIME_SEC;
+ dir->i_ctime = dir->i_mtime = ext4_current_time(dir);
ext4_update_dx_flag(dir);
ext4_mark_inode_dirty(handle, dir);
- drop_nlink(inode);
+ ext4_dec_count(handle, inode);
if (!inode->i_nlink)
ext4_orphan_add(handle, inode);
- inode->i_ctime = dir->i_ctime;
+ inode->i_ctime = ext4_current_time(inode);
ext4_mark_inode_dirty(handle, inode);
retval = 0;
@@ -2159,7 +2245,7 @@ retry:
err = __page_symlink(inode, symname, l,
mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
if (err) {
- drop_nlink(inode);
+ ext4_dec_count(handle, inode);
ext4_mark_inode_dirty(handle, inode);
iput (inode);
goto out_stop;
@@ -2185,8 +2271,9 @@ static int ext4_link (struct dentry * old_dentry,
struct inode *inode = old_dentry->d_inode;
int err, retries = 0;
- if (inode->i_nlink >= EXT4_LINK_MAX)
+ if (EXT4_DIR_LINK_MAX(inode))
return -EMLINK;
+
/*
* Return -ENOENT if we've raced with unlink and i_nlink is 0. Doing
* otherwise has the potential to corrupt the orphan inode list.
@@ -2203,8 +2290,8 @@ retry:
if (IS_DIRSYNC(dir))
handle->h_sync = 1;
- inode->i_ctime = CURRENT_TIME_SEC;
- inc_nlink(inode);
+ inode->i_ctime = ext4_current_time(inode);
+ ext4_inc_count(handle, inode);
atomic_inc(&inode->i_count);
err = ext4_add_nondir(handle, dentry, inode);
@@ -2305,7 +2392,7 @@ static int ext4_rename (struct inode * old_dir, struct dentry *old_dentry,
* Like most other Unix systems, set the ctime for inodes on a
* rename.
*/
- old_inode->i_ctime = CURRENT_TIME_SEC;
+ old_inode->i_ctime = ext4_current_time(old_inode);
ext4_mark_inode_dirty(handle, old_inode);
/*
@@ -2337,10 +2424,10 @@ static int ext4_rename (struct inode * old_dir, struct dentry *old_dentry,
}
if (new_inode) {
- drop_nlink(new_inode);
- new_inode->i_ctime = CURRENT_TIME_SEC;
+ ext4_dec_count(handle, new_inode);
+ new_inode->i_ctime = ext4_current_time(new_inode);
}
- old_dir->i_ctime = old_dir->i_mtime = CURRENT_TIME_SEC;
+ old_dir->i_ctime = old_dir->i_mtime = ext4_current_time(old_dir);
ext4_update_dx_flag(old_dir);
if (dir_bh) {
BUFFER_TRACE(dir_bh, "get_write_access");
@@ -2348,11 +2435,13 @@ static int ext4_rename (struct inode * old_dir, struct dentry *old_dentry,
PARENT_INO(dir_bh->b_data) = cpu_to_le32(new_dir->i_ino);
BUFFER_TRACE(dir_bh, "call ext4_journal_dirty_metadata");
ext4_journal_dirty_metadata(handle, dir_bh);
- drop_nlink(old_dir);
+ ext4_dec_count(handle, old_dir);
if (new_inode) {
- drop_nlink(new_inode);
+ /* checked empty_dir above, can't have another parent,
+ * ext3_dec_count() won't work for many-linked dirs */
+ new_inode->i_nlink = 0;
} else {
- inc_nlink(new_dir);
+ ext4_inc_count(handle, new_dir);
ext4_update_dx_flag(new_dir);
ext4_mark_inode_dirty(handle, new_dir);
}
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index b806e68..3c1397f 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -36,6 +36,7 @@
#include <linux/namei.h>
#include <linux/quotaops.h>
#include <linux/seq_file.h>
+#include <linux/log2.h>
#include <asm/uaccess.h>
@@ -540,7 +541,7 @@ static int init_inodecache(void)
sizeof(struct ext4_inode_info),
0, (SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD),
- init_once, NULL);
+ init_once);
if (ext4_inode_cachep == NULL)
return -ENOMEM;
return 0;
@@ -734,7 +735,7 @@ enum {
Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_quota, Opt_noquota,
Opt_ignore, Opt_barrier, Opt_err, Opt_resize, Opt_usrquota,
- Opt_grpquota, Opt_extents,
+ Opt_grpquota, Opt_extents, Opt_noextents,
};
static match_table_t tokens = {
@@ -785,6 +786,7 @@ static match_table_t tokens = {
{Opt_usrquota, "usrquota"},
{Opt_barrier, "barrier=%u"},
{Opt_extents, "extents"},
+ {Opt_noextents, "noextents"},
{Opt_err, NULL},
{Opt_resize, "resize"},
};
@@ -1120,6 +1122,9 @@ clear_qf_name:
case Opt_extents:
set_opt (sbi->s_mount_opt, EXTENTS);
break;
+ case Opt_noextents:
+ clear_opt (sbi->s_mount_opt, EXTENTS);
+ break;
default:
printk (KERN_ERR
"EXT4-fs: Unrecognized mount option \"%s\" "
@@ -1278,7 +1283,7 @@ static int ext4_check_descriptors (struct super_block * sb)
}
inode_table = ext4_inode_table(sb, gdp);
if (inode_table < first_block ||
- inode_table + sbi->s_itb_per_group > last_block)
+ inode_table + sbi->s_itb_per_group - 1 > last_block)
{
ext4_error (sb, "ext4_check_descriptors",
"Inode table for group %d"
@@ -1551,6 +1556,12 @@ static int ext4_fill_super (struct super_block *sb, void *data, int silent)
set_opt(sbi->s_mount_opt, RESERVATION);
+ /*
+ * turn on extents feature by default in ext4 filesystem
+ * User -o noextents to turn it off
+ */
+ set_opt(sbi->s_mount_opt, EXTENTS);
+
if (!parse_options ((char *) data, sb, &journal_inum, &journal_devnum,
NULL, 0))
goto failed_mount;
@@ -1634,13 +1645,15 @@ static int ext4_fill_super (struct super_block *sb, void *data, int silent)
sbi->s_inode_size = le16_to_cpu(es->s_inode_size);
sbi->s_first_ino = le32_to_cpu(es->s_first_ino);
if ((sbi->s_inode_size < EXT4_GOOD_OLD_INODE_SIZE) ||
- (sbi->s_inode_size & (sbi->s_inode_size - 1)) ||
+ (!is_power_of_2(sbi->s_inode_size)) ||
(sbi->s_inode_size > blocksize)) {
printk (KERN_ERR
"EXT4-fs: unsupported inode size: %d\n",
sbi->s_inode_size);
goto failed_mount;
}
+ if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE)
+ sb->s_time_gran = 1 << (EXT4_EPOCH_BITS - 2);
}
sbi->s_frag_size = EXT4_MIN_FRAG_SIZE <<
le32_to_cpu(es->s_log_frag_size);
@@ -1803,6 +1816,13 @@ static int ext4_fill_super (struct super_block *sb, void *data, int silent)
goto failed_mount3;
}
+ if (ext4_blocks_count(es) > 0xffffffffULL &&
+ !jbd2_journal_set_features(EXT4_SB(sb)->s_journal, 0, 0,
+ JBD2_FEATURE_INCOMPAT_64BIT)) {
+ printk(KERN_ERR "ext4: Failed to set 64-bit journal feature\n");
+ goto failed_mount4;
+ }
+
/* We have now updated the journal if required, so we can
* validate the data journaling mode. */
switch (test_opt(sb, DATA_FLAGS)) {
@@ -1857,6 +1877,32 @@ static int ext4_fill_super (struct super_block *sb, void *data, int silent)
}
ext4_setup_super (sb, es, sb->s_flags & MS_RDONLY);
+
+ /* determine the minimum size of new large inodes, if present */
+ if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE) {
+ sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
+ EXT4_GOOD_OLD_INODE_SIZE;
+ if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
+ EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE)) {
+ if (sbi->s_want_extra_isize <
+ le16_to_cpu(es->s_want_extra_isize))
+ sbi->s_want_extra_isize =
+ le16_to_cpu(es->s_want_extra_isize);
+ if (sbi->s_want_extra_isize <
+ le16_to_cpu(es->s_min_extra_isize))
+ sbi->s_want_extra_isize =
+ le16_to_cpu(es->s_min_extra_isize);
+ }
+ }
+ /* Check if enough inode space is available */
+ if (EXT4_GOOD_OLD_INODE_SIZE + sbi->s_want_extra_isize >
+ sbi->s_inode_size) {
+ sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
+ EXT4_GOOD_OLD_INODE_SIZE;
+ printk(KERN_INFO "EXT4-fs: required extra inode space not"
+ "available.\n");
+ }
+
/*
* akpm: core read_super() calls in here with the superblock locked.
* That deadlocks, because orphan cleanup needs to lock the superblock
@@ -2652,8 +2698,11 @@ static int ext4_release_dquot(struct dquot *dquot)
handle = ext4_journal_start(dquot_to_inode(dquot),
EXT4_QUOTA_DEL_BLOCKS(dquot->dq_sb));
- if (IS_ERR(handle))
+ if (IS_ERR(handle)) {
+ /* Release dquot anyway to avoid endless cycle in dqput() */
+ dquot_release(dquot);
return PTR_ERR(handle);
+ }
ret = dquot_release(dquot);
err = ext4_journal_stop(handle);
if (!ret)
@@ -2786,6 +2835,12 @@ static ssize_t ext4_quota_write(struct super_block *sb, int type,
struct buffer_head *bh;
handle_t *handle = journal_current_handle();
+ if (!handle) {
+ printk(KERN_WARNING "EXT4-fs: Quota write (off=%Lu, len=%Lu)"
+ " cancelled because transaction is not started.\n",
+ (unsigned long long)off, (unsigned long long)len);
+ return -EIO;
+ }
mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
while (towrite > 0) {
tocopy = sb->s_blocksize - offset < towrite ?
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index e832e96..b10d68f 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -66,13 +66,6 @@
#define BFIRST(bh) ENTRY(BHDR(bh)+1)
#define IS_LAST_ENTRY(entry) (*(__u32 *)(entry) == 0)
-#define IHDR(inode, raw_inode) \
- ((struct ext4_xattr_ibody_header *) \
- ((void *)raw_inode + \
- EXT4_GOOD_OLD_INODE_SIZE + \
- EXT4_I(inode)->i_extra_isize))
-#define IFIRST(hdr) ((struct ext4_xattr_entry *)((hdr)+1))
-
#ifdef EXT4_XATTR_DEBUG
# define ea_idebug(inode, f...) do { \
printk(KERN_DEBUG "inode %s:%lu: ", \
@@ -508,6 +501,24 @@ out:
return;
}
+/*
+ * Find the available free space for EAs. This also returns the total number of
+ * bytes used by EA entries.
+ */
+static size_t ext4_xattr_free_space(struct ext4_xattr_entry *last,
+ size_t *min_offs, void *base, int *total)
+{
+ for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
+ *total += EXT4_XATTR_LEN(last->e_name_len);
+ if (!last->e_value_block && last->e_value_size) {
+ size_t offs = le16_to_cpu(last->e_value_offs);
+ if (offs < *min_offs)
+ *min_offs = offs;
+ }
+ }
+ return (*min_offs - ((void *)last - base) - sizeof(__u32));
+}
+
struct ext4_xattr_info {
int name_index;
const char *name;
@@ -1013,7 +1024,9 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
}
if (!error) {
ext4_xattr_update_super_block(handle, inode->i_sb);
- inode->i_ctime = CURRENT_TIME_SEC;
+ inode->i_ctime = ext4_current_time(inode);
+ if (!value)
+ EXT4_I(inode)->i_state &= ~EXT4_STATE_NO_EXPAND;
error = ext4_mark_iloc_dirty(handle, inode, &is.iloc);
/*
* The bh is consumed by ext4_mark_iloc_dirty, even with
@@ -1067,6 +1080,253 @@ retry:
}
/*
+ * Shift the EA entries in the inode to create space for the increased
+ * i_extra_isize.
+ */
+static void ext4_xattr_shift_entries(struct ext4_xattr_entry *entry,
+ int value_offs_shift, void *to,
+ void *from, size_t n, int blocksize)
+{
+ struct ext4_xattr_entry *last = entry;
+ int new_offs;
+
+ /* Adjust the value offsets of the entries */
+ for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
+ if (!last->e_value_block && last->e_value_size) {
+ new_offs = le16_to_cpu(last->e_value_offs) +
+ value_offs_shift;
+ BUG_ON(new_offs + le32_to_cpu(last->e_value_size)
+ > blocksize);
+ last->e_value_offs = cpu_to_le16(new_offs);
+ }
+ }
+ /* Shift the entries by n bytes */
+ memmove(to, from, n);
+}
+
+/*
+ * Expand an inode by new_extra_isize bytes when EAs are present.
+ * Returns 0 on success or negative error number on failure.
+ */
+int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
+ struct ext4_inode *raw_inode, handle_t *handle)
+{
+ struct ext4_xattr_ibody_header *header;
+ struct ext4_xattr_entry *entry, *last, *first;
+ struct buffer_head *bh = NULL;
+ struct ext4_xattr_ibody_find *is = NULL;
+ struct ext4_xattr_block_find *bs = NULL;
+ char *buffer = NULL, *b_entry_name = NULL;
+ size_t min_offs, free;
+ int total_ino, total_blk;
+ void *base, *start, *end;
+ int extra_isize = 0, error = 0, tried_min_extra_isize = 0;
+ int s_min_extra_isize = EXT4_SB(inode->i_sb)->s_es->s_min_extra_isize;
+
+ down_write(&EXT4_I(inode)->xattr_sem);
+retry:
+ if (EXT4_I(inode)->i_extra_isize >= new_extra_isize) {
+ up_write(&EXT4_I(inode)->xattr_sem);
+ return 0;
+ }
+
+ header = IHDR(inode, raw_inode);
+ entry = IFIRST(header);
+
+ /*
+ * Check if enough free space is available in the inode to shift the
+ * entries ahead by new_extra_isize.
+ */
+
+ base = start = entry;
+ end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
+ min_offs = end - base;
+ last = entry;
+ total_ino = sizeof(struct ext4_xattr_ibody_header);
+
+ free = ext4_xattr_free_space(last, &min_offs, base, &total_ino);
+ if (free >= new_extra_isize) {
+ entry = IFIRST(header);
+ ext4_xattr_shift_entries(entry, EXT4_I(inode)->i_extra_isize
+ - new_extra_isize, (void *)raw_inode +
+ EXT4_GOOD_OLD_INODE_SIZE + new_extra_isize,
+ (void *)header, total_ino,
+ inode->i_sb->s_blocksize);
+ EXT4_I(inode)->i_extra_isize = new_extra_isize;
+ error = 0;
+ goto cleanup;
+ }
+
+ /*
+ * Enough free space isn't available in the inode, check if
+ * EA block can hold new_extra_isize bytes.
+ */
+ if (EXT4_I(inode)->i_file_acl) {
+ bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
+ error = -EIO;
+ if (!bh)
+ goto cleanup;
+ if (ext4_xattr_check_block(bh)) {
+ ext4_error(inode->i_sb, __FUNCTION__,
+ "inode %lu: bad block %llu", inode->i_ino,
+ EXT4_I(inode)->i_file_acl);
+ error = -EIO;
+ goto cleanup;
+ }
+ base = BHDR(bh);
+ first = BFIRST(bh);
+ end = bh->b_data + bh->b_size;
+ min_offs = end - base;
+ free = ext4_xattr_free_space(first, &min_offs, base,
+ &total_blk);
+ if (free < new_extra_isize) {
+ if (!tried_min_extra_isize && s_min_extra_isize) {
+ tried_min_extra_isize++;
+ new_extra_isize = s_min_extra_isize;
+ brelse(bh);
+ goto retry;
+ }
+ error = -1;
+ goto cleanup;
+ }
+ } else {
+ free = inode->i_sb->s_blocksize;
+ }
+
+ while (new_extra_isize > 0) {
+ size_t offs, size, entry_size;
+ struct ext4_xattr_entry *small_entry = NULL;
+ struct ext4_xattr_info i = {
+ .value = NULL,
+ .value_len = 0,
+ };
+ unsigned int total_size; /* EA entry size + value size */
+ unsigned int shift_bytes; /* No. of bytes to shift EAs by? */
+ unsigned int min_total_size = ~0U;
+
+ is = kzalloc(sizeof(struct ext4_xattr_ibody_find), GFP_NOFS);
+ bs = kzalloc(sizeof(struct ext4_xattr_block_find), GFP_NOFS);
+ if (!is || !bs) {
+ error = -ENOMEM;
+ goto cleanup;
+ }
+
+ is->s.not_found = -ENODATA;
+ bs->s.not_found = -ENODATA;
+ is->iloc.bh = NULL;
+ bs->bh = NULL;
+
+ last = IFIRST(header);
+ /* Find the entry best suited to be pushed into EA block */
+ entry = NULL;
+ for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
+ total_size =
+ EXT4_XATTR_SIZE(le32_to_cpu(last->e_value_size)) +
+ EXT4_XATTR_LEN(last->e_name_len);
+ if (total_size <= free && total_size < min_total_size) {
+ if (total_size < new_extra_isize) {
+ small_entry = last;
+ } else {
+ entry = last;
+ min_total_size = total_size;
+ }
+ }
+ }
+
+ if (entry == NULL) {
+ if (small_entry) {
+ entry = small_entry;
+ } else {
+ if (!tried_min_extra_isize &&
+ s_min_extra_isize) {
+ tried_min_extra_isize++;
+ new_extra_isize = s_min_extra_isize;
+ goto retry;
+ }
+ error = -1;
+ goto cleanup;
+ }
+ }
+ offs = le16_to_cpu(entry->e_value_offs);
+ size = le32_to_cpu(entry->e_value_size);
+ entry_size = EXT4_XATTR_LEN(entry->e_name_len);
+ i.name_index = entry->e_name_index,
+ buffer = kmalloc(EXT4_XATTR_SIZE(size), GFP_NOFS);
+ b_entry_name = kmalloc(entry->e_name_len + 1, GFP_NOFS);
+ if (!buffer || !b_entry_name) {
+ error = -ENOMEM;
+ goto cleanup;
+ }
+ /* Save the entry name and the entry value */
+ memcpy(buffer, (void *)IFIRST(header) + offs,
+ EXT4_XATTR_SIZE(size));
+ memcpy(b_entry_name, entry->e_name, entry->e_name_len);
+ b_entry_name[entry->e_name_len] = '\0';
+ i.name = b_entry_name;
+
+ error = ext4_get_inode_loc(inode, &is->iloc);
+ if (error)
+ goto cleanup;
+
+ error = ext4_xattr_ibody_find(inode, &i, is);
+ if (error)
+ goto cleanup;
+
+ /* Remove the chosen entry from the inode */
+ error = ext4_xattr_ibody_set(handle, inode, &i, is);
+
+ entry = IFIRST(header);
+ if (entry_size + EXT4_XATTR_SIZE(size) >= new_extra_isize)
+ shift_bytes = new_extra_isize;
+ else
+ shift_bytes = entry_size + size;
+ /* Adjust the offsets and shift the remaining entries ahead */
+ ext4_xattr_shift_entries(entry, EXT4_I(inode)->i_extra_isize -
+ shift_bytes, (void *)raw_inode +
+ EXT4_GOOD_OLD_INODE_SIZE + extra_isize + shift_bytes,
+ (void *)header, total_ino - entry_size,
+ inode->i_sb->s_blocksize);
+
+ extra_isize += shift_bytes;
+ new_extra_isize -= shift_bytes;
+ EXT4_I(inode)->i_extra_isize = extra_isize;
+
+ i.name = b_entry_name;
+ i.value = buffer;
+ i.value_len = cpu_to_le32(size);
+ error = ext4_xattr_block_find(inode, &i, bs);
+ if (error)
+ goto cleanup;
+
+ /* Add entry which was removed from the inode into the block */
+ error = ext4_xattr_block_set(handle, inode, &i, bs);
+ if (error)
+ goto cleanup;
+ kfree(b_entry_name);
+ kfree(buffer);
+ brelse(is->iloc.bh);
+ kfree(is);
+ kfree(bs);
+ }
+ brelse(bh);
+ up_write(&EXT4_I(inode)->xattr_sem);
+ return 0;
+
+cleanup:
+ kfree(b_entry_name);
+ kfree(buffer);
+ if (is)
+ brelse(is->iloc.bh);
+ kfree(is);
+ kfree(bs);
+ brelse(bh);
+ up_write(&EXT4_I(inode)->xattr_sem);
+ return error;
+}
+
+
+
+/*
* ext4_xattr_delete_inode()
*
* Free extended attribute resources associated with this inode. This
diff --git a/fs/ext4/xattr.h b/fs/ext4/xattr.h
index 79432b3..d7f5d6a 100644
--- a/fs/ext4/xattr.h
+++ b/fs/ext4/xattr.h
@@ -56,6 +56,13 @@ struct ext4_xattr_entry {
#define EXT4_XATTR_SIZE(size) \
(((size) + EXT4_XATTR_ROUND) & ~EXT4_XATTR_ROUND)
+#define IHDR(inode, raw_inode) \
+ ((struct ext4_xattr_ibody_header *) \
+ ((void *)raw_inode + \
+ EXT4_GOOD_OLD_INODE_SIZE + \
+ EXT4_I(inode)->i_extra_isize))
+#define IFIRST(hdr) ((struct ext4_xattr_entry *)((hdr)+1))
+
# ifdef CONFIG_EXT4DEV_FS_XATTR
extern struct xattr_handler ext4_xattr_user_handler;
@@ -74,6 +81,9 @@ extern int ext4_xattr_set_handle(handle_t *, struct inode *, int, const char *,
extern void ext4_xattr_delete_inode(handle_t *, struct inode *);
extern void ext4_xattr_put_super(struct super_block *);
+extern int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
+ struct ext4_inode *raw_inode, handle_t *handle);
+
extern int init_ext4_xattr(void);
extern void exit_ext4_xattr(void);
@@ -129,6 +139,13 @@ exit_ext4_xattr(void)
{
}
+static inline int
+ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
+ struct ext4_inode *raw_inode, handle_t *handle)
+{
+ return -EOPNOTSUPP;
+}
+
#define ext4_xattr_handlers NULL
# endif /* CONFIG_EXT4DEV_FS_XATTR */
diff --git a/fs/fat/cache.c b/fs/fat/cache.c
index 3c9c8a1..be6f89b 100644
--- a/fs/fat/cache.c
+++ b/fs/fat/cache.c
@@ -48,7 +48,7 @@ int __init fat_cache_init(void)
fat_cache_cachep = kmem_cache_create("fat_cache",
sizeof(struct fat_cache),
0, SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
- init_once, NULL);
+ init_once);
if (fat_cache_cachep == NULL)
return -ENOMEM;
return 0;
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 0a7ddb3..4baa5f2 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -514,7 +514,7 @@ static int __init fat_init_inodecache(void)
sizeof(struct msdos_inode_info),
0, (SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD),
- init_once, NULL);
+ init_once);
if (fat_inode_cachep == NULL)
return -ENOMEM;
return 0;
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 3f22e9f..78b2ff0 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -638,7 +638,7 @@ EXPORT_SYMBOL(kill_fasync);
static int __init fasync_init(void)
{
fasync_cache = kmem_cache_create("fasync_cache",
- sizeof(struct fasync_struct), 0, SLAB_PANIC, NULL, NULL);
+ sizeof(struct fasync_struct), 0, SLAB_PANIC, NULL);
return 0;
}
diff --git a/fs/freevxfs/vxfs_super.c b/fs/freevxfs/vxfs_super.c
index 647d600..4f95572 100644
--- a/fs/freevxfs/vxfs_super.c
+++ b/fs/freevxfs/vxfs_super.c
@@ -263,8 +263,8 @@ vxfs_init(void)
int rv;
vxfs_inode_cachep = kmem_cache_create("vxfs_inode",
- sizeof(struct vxfs_inode_info), 0,
- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL, NULL);
+ sizeof(struct vxfs_inode_info), 0,
+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL);
if (!vxfs_inode_cachep)
return -ENOMEM;
rv = register_filesystem(&vxfs_fs_type);
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index a4b142a..8d23b0b 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -14,6 +14,7 @@
*/
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <linux/fs.h>
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 357764d..3ad22be 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -1044,7 +1044,7 @@ int __init fuse_dev_init(void)
int err = -ENOMEM;
fuse_req_cachep = kmem_cache_create("fuse_request",
sizeof(struct fuse_req),
- 0, 0, NULL, NULL);
+ 0, 0, NULL);
if (!fuse_req_cachep)
goto out;
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index cc5efc1..5448f62 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -706,7 +706,7 @@ static int __init fuse_fs_init(void)
fuse_inode_cachep = kmem_cache_create("fuse_inode",
sizeof(struct fuse_inode),
0, SLAB_HWCACHE_ALIGN,
- fuse_inode_init_once, NULL);
+ fuse_inode_init_once);
err = -ENOMEM;
if (!fuse_inode_cachep)
goto out_unreg2;
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index cd805a6..93fa427 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -93,9 +93,10 @@ static int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh,
map_bh(bh, inode->i_sb, block);
set_buffer_uptodate(bh);
+ if (!gfs2_is_jdata(ip))
+ mark_buffer_dirty(bh);
if (sdp->sd_args.ar_data == GFS2_DATA_ORDERED || gfs2_is_jdata(ip))
gfs2_trans_add_bh(ip->i_gl, bh, 0);
- mark_buffer_dirty(bh);
if (release) {
unlock_page(page);
@@ -1085,6 +1086,33 @@ static int do_shrink(struct gfs2_inode *ip, u64 size)
return error;
}
+static int do_touch(struct gfs2_inode *ip, u64 size)
+{
+ struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
+ struct buffer_head *dibh;
+ int error;
+
+ error = gfs2_trans_begin(sdp, RES_DINODE, 0);
+ if (error)
+ return error;
+
+ down_write(&ip->i_rw_mutex);
+
+ error = gfs2_meta_inode_buffer(ip, &dibh);
+ if (error)
+ goto do_touch_out;
+
+ ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
+ gfs2_trans_add_bh(ip->i_gl, dibh, 1);
+ gfs2_dinode_out(ip, dibh->b_data);
+ brelse(dibh);
+
+do_touch_out:
+ up_write(&ip->i_rw_mutex);
+ gfs2_trans_end(sdp);
+ return error;
+}
+
/**
* gfs2_truncatei - make a file a given size
* @ip: the inode
@@ -1105,8 +1133,11 @@ int gfs2_truncatei(struct gfs2_inode *ip, u64 size)
if (size > ip->i_di.di_size)
error = do_grow(ip, size);
- else
+ else if (size < ip->i_di.di_size)
error = do_shrink(ip, size);
+ else
+ /* update time stamps */
+ error = do_touch(ip, size);
return error;
}
diff --git a/fs/gfs2/daemon.c b/fs/gfs2/daemon.c
index 3548d9f..3731ab0 100644
--- a/fs/gfs2/daemon.c
+++ b/fs/gfs2/daemon.c
@@ -35,30 +35,6 @@
The kthread functions used to start these daemons block and flush signals. */
/**
- * gfs2_scand - Look for cached glocks and inodes to toss from memory
- * @sdp: Pointer to GFS2 superblock
- *
- * One of these daemons runs, finding candidates to add to sd_reclaim_list.
- * See gfs2_glockd()
- */
-
-int gfs2_scand(void *data)
-{
- struct gfs2_sbd *sdp = data;
- unsigned long t;
-
- while (!kthread_should_stop()) {
- gfs2_scand_internal(sdp);
- t = gfs2_tune_get(sdp, gt_scand_secs) * HZ;
- if (freezing(current))
- refrigerator();
- schedule_timeout_interruptible(t);
- }
-
- return 0;
-}
-
-/**
* gfs2_glockd - Reclaim unused glock structures
* @sdp: Pointer to GFS2 superblock
*
diff --git a/fs/gfs2/daemon.h b/fs/gfs2/daemon.h
index 80100712..0de9b35 100644
--- a/fs/gfs2/daemon.h
+++ b/fs/gfs2/daemon.h
@@ -10,7 +10,6 @@
#ifndef __DAEMON_DOT_H__
#define __DAEMON_DOT_H__
-int gfs2_scand(void *data);
int gfs2_glockd(void *data);
int gfs2_recoverd(void *data);
int gfs2_logd(void *data);
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
index 2beb2f4..9949bb7 100644
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -1043,6 +1043,7 @@ static int dir_split_leaf(struct inode *inode, const struct qstr *name)
error = gfs2_meta_inode_buffer(dip, &dibh);
if (!gfs2_assert_withdraw(GFS2_SB(&dip->i_inode), !error)) {
+ gfs2_trans_add_bh(dip->i_gl, dibh, 1);
dip->i_di.di_blocks++;
gfs2_set_inode_blocks(&dip->i_inode);
gfs2_dinode_out(dip, dibh->b_data);
@@ -1501,7 +1502,7 @@ struct inode *gfs2_dir_search(struct inode *dir, const struct qstr *name)
inode = gfs2_inode_lookup(dir->i_sb,
be16_to_cpu(dent->de_type),
be64_to_cpu(dent->de_inum.no_addr),
- be64_to_cpu(dent->de_inum.no_formal_ino));
+ be64_to_cpu(dent->de_inum.no_formal_ino), 0);
brelse(bh);
return inode;
}
diff --git a/fs/gfs2/eaops.c b/fs/gfs2/eaops.c
index 1ab3e9d..aa8dbf3 100644
--- a/fs/gfs2/eaops.c
+++ b/fs/gfs2/eaops.c
@@ -200,28 +200,28 @@ static int security_eo_remove(struct gfs2_inode *ip, struct gfs2_ea_request *er)
return gfs2_ea_remove_i(ip, er);
}
-static struct gfs2_eattr_operations gfs2_user_eaops = {
+static const struct gfs2_eattr_operations gfs2_user_eaops = {
.eo_get = user_eo_get,
.eo_set = user_eo_set,
.eo_remove = user_eo_remove,
.eo_name = "user",
};
-struct gfs2_eattr_operations gfs2_system_eaops = {
+const struct gfs2_eattr_operations gfs2_system_eaops = {
.eo_get = system_eo_get,
.eo_set = system_eo_set,
.eo_remove = system_eo_remove,
.eo_name = "system",
};
-static struct gfs2_eattr_operations gfs2_security_eaops = {
+static const struct gfs2_eattr_operations gfs2_security_eaops = {
.eo_get = security_eo_get,
.eo_set = security_eo_set,
.eo_remove = security_eo_remove,
.eo_name = "security",
};
-struct gfs2_eattr_operations *gfs2_ea_ops[] = {
+const struct gfs2_eattr_operations *gfs2_ea_ops[] = {
NULL,
&gfs2_user_eaops,
&gfs2_system_eaops,
diff --git a/fs/gfs2/eaops.h b/fs/gfs2/eaops.h
index 508b4f7..da2f7fb 100644
--- a/fs/gfs2/eaops.h
+++ b/fs/gfs2/eaops.h
@@ -22,9 +22,9 @@ struct gfs2_eattr_operations {
unsigned int gfs2_ea_name2type(const char *name, const char **truncated_name);
-extern struct gfs2_eattr_operations gfs2_system_eaops;
+extern const struct gfs2_eattr_operations gfs2_system_eaops;
-extern struct gfs2_eattr_operations *gfs2_ea_ops[];
+extern const struct gfs2_eattr_operations *gfs2_ea_ops[];
#endif /* __EAOPS_DOT_H__ */
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 3f0974e..a37efe4 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -25,8 +25,10 @@
#include <asm/uaccess.h>
#include <linux/seq_file.h>
#include <linux/debugfs.h>
-#include <linux/module.h>
-#include <linux/kallsyms.h>
+#include <linux/kthread.h>
+#include <linux/freezer.h>
+#include <linux/workqueue.h>
+#include <linux/jiffies.h>
#include "gfs2.h"
#include "incore.h"
@@ -48,7 +50,6 @@ struct glock_iter {
int hash; /* hash bucket index */
struct gfs2_sbd *sdp; /* incore superblock */
struct gfs2_glock *gl; /* current glock struct */
- struct hlist_head *hb_list; /* current hash bucket ptr */
struct seq_file *seq; /* sequence file for debugfs */
char string[512]; /* scratch space */
};
@@ -59,8 +60,13 @@ static int gfs2_dump_lockstate(struct gfs2_sbd *sdp);
static int dump_glock(struct glock_iter *gi, struct gfs2_glock *gl);
static void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh);
static void gfs2_glock_drop_th(struct gfs2_glock *gl);
+static void run_queue(struct gfs2_glock *gl);
+
static DECLARE_RWSEM(gfs2_umount_flush_sem);
static struct dentry *gfs2_root;
+static struct task_struct *scand_process;
+static unsigned int scand_secs = 5;
+static struct workqueue_struct *glock_workqueue;
#define GFS2_GL_HASH_SHIFT 15
#define GFS2_GL_HASH_SIZE (1 << GFS2_GL_HASH_SHIFT)
@@ -276,6 +282,18 @@ static struct gfs2_glock *gfs2_glock_find(const struct gfs2_sbd *sdp,
return gl;
}
+static void glock_work_func(struct work_struct *work)
+{
+ struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
+
+ spin_lock(&gl->gl_spin);
+ if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags))
+ set_bit(GLF_DEMOTE, &gl->gl_flags);
+ run_queue(gl);
+ spin_unlock(&gl->gl_spin);
+ gfs2_glock_put(gl);
+}
+
/**
* gfs2_glock_get() - Get a glock, or create one if one doesn't exist
* @sdp: The GFS2 superblock
@@ -315,6 +333,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
gl->gl_name = name;
atomic_set(&gl->gl_ref, 1);
gl->gl_state = LM_ST_UNLOCKED;
+ gl->gl_demote_state = LM_ST_EXCLUSIVE;
gl->gl_hash = hash;
gl->gl_owner_pid = 0;
gl->gl_ip = 0;
@@ -323,10 +342,12 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
gl->gl_req_bh = NULL;
gl->gl_vn = 0;
gl->gl_stamp = jiffies;
+ gl->gl_tchange = jiffies;
gl->gl_object = NULL;
gl->gl_sbd = sdp;
gl->gl_aspace = NULL;
lops_init_le(&gl->gl_le, &gfs2_glock_lops);
+ INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
/* If this glock protects actual on-disk data or metadata blocks,
create a VFS inode to manage the pages/buffers holding them. */
@@ -440,6 +461,8 @@ static void wait_on_holder(struct gfs2_holder *gh)
static void gfs2_demote_wake(struct gfs2_glock *gl)
{
+ BUG_ON(!spin_is_locked(&gl->gl_spin));
+ gl->gl_demote_state = LM_ST_EXCLUSIVE;
clear_bit(GLF_DEMOTE, &gl->gl_flags);
smp_mb__after_clear_bit();
wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
@@ -545,12 +568,14 @@ static int rq_demote(struct gfs2_glock *gl)
return 0;
}
set_bit(GLF_LOCK, &gl->gl_flags);
- spin_unlock(&gl->gl_spin);
if (gl->gl_demote_state == LM_ST_UNLOCKED ||
- gl->gl_state != LM_ST_EXCLUSIVE)
+ gl->gl_state != LM_ST_EXCLUSIVE) {
+ spin_unlock(&gl->gl_spin);
gfs2_glock_drop_th(gl);
- else
+ } else {
+ spin_unlock(&gl->gl_spin);
gfs2_glock_xmote_th(gl, NULL);
+ }
spin_lock(&gl->gl_spin);
return 0;
@@ -679,24 +704,25 @@ static void gfs2_glmutex_unlock(struct gfs2_glock *gl)
* practise: LM_ST_SHARED and LM_ST_UNLOCKED
*/
-static void handle_callback(struct gfs2_glock *gl, unsigned int state, int remote)
+static void handle_callback(struct gfs2_glock *gl, unsigned int state,
+ int remote, unsigned long delay)
{
+ int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE;
+
spin_lock(&gl->gl_spin);
- if (test_and_set_bit(GLF_DEMOTE, &gl->gl_flags) == 0) {
+ set_bit(bit, &gl->gl_flags);
+ if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
gl->gl_demote_state = state;
gl->gl_demote_time = jiffies;
if (remote && gl->gl_ops->go_type == LM_TYPE_IOPEN &&
gl->gl_object) {
- struct inode *inode = igrab(gl->gl_object);
+ gfs2_glock_schedule_for_reclaim(gl);
spin_unlock(&gl->gl_spin);
- if (inode) {
- d_prune_aliases(inode);
- iput(inode);
- }
return;
}
- } else if (gl->gl_demote_state != LM_ST_UNLOCKED) {
- gl->gl_demote_state = state;
+ } else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
+ gl->gl_demote_state != state) {
+ gl->gl_demote_state = LM_ST_UNLOCKED;
}
spin_unlock(&gl->gl_spin);
}
@@ -723,6 +749,7 @@ static void state_change(struct gfs2_glock *gl, unsigned int new_state)
}
gl->gl_state = new_state;
+ gl->gl_tchange = jiffies;
}
/**
@@ -760,10 +787,20 @@ static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
if (!gh) {
gl->gl_stamp = jiffies;
- if (ret & LM_OUT_CANCELED)
+ if (ret & LM_OUT_CANCELED) {
op_done = 0;
- else
+ } else {
+ spin_lock(&gl->gl_spin);
+ if (gl->gl_state != gl->gl_demote_state) {
+ gl->gl_req_bh = NULL;
+ spin_unlock(&gl->gl_spin);
+ gfs2_glock_drop_th(gl);
+ gfs2_glock_put(gl);
+ return;
+ }
gfs2_demote_wake(gl);
+ spin_unlock(&gl->gl_spin);
+ }
} else {
spin_lock(&gl->gl_spin);
list_del_init(&gh->gh_list);
@@ -799,7 +836,6 @@ out:
gl->gl_req_gh = NULL;
gl->gl_req_bh = NULL;
clear_bit(GLF_LOCK, &gl->gl_flags);
- run_queue(gl);
spin_unlock(&gl->gl_spin);
}
@@ -817,7 +853,7 @@ out:
*
*/
-void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh)
+static void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh)
{
struct gfs2_sbd *sdp = gl->gl_sbd;
int flags = gh ? gh->gh_flags : 0;
@@ -871,7 +907,6 @@ static void drop_bh(struct gfs2_glock *gl, unsigned int ret)
gfs2_assert_warn(sdp, !ret);
state_change(gl, LM_ST_UNLOCKED);
- gfs2_demote_wake(gl);
if (glops->go_inval)
glops->go_inval(gl, DIO_METADATA);
@@ -884,10 +919,10 @@ static void drop_bh(struct gfs2_glock *gl, unsigned int ret)
}
spin_lock(&gl->gl_spin);
+ gfs2_demote_wake(gl);
gl->gl_req_gh = NULL;
gl->gl_req_bh = NULL;
clear_bit(GLF_LOCK, &gl->gl_flags);
- run_queue(gl);
spin_unlock(&gl->gl_spin);
gfs2_glock_put(gl);
@@ -1067,24 +1102,31 @@ static void add_to_queue(struct gfs2_holder *gh)
if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
BUG();
- existing = find_holder_by_owner(&gl->gl_holders, gh->gh_owner_pid);
- if (existing) {
- print_symbol(KERN_WARNING "original: %s\n", existing->gh_ip);
- printk(KERN_INFO "pid : %d\n", existing->gh_owner_pid);
- printk(KERN_INFO "lock type : %d lock state : %d\n",
- existing->gh_gl->gl_name.ln_type, existing->gh_gl->gl_state);
- print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
- printk(KERN_INFO "pid : %d\n", gh->gh_owner_pid);
- printk(KERN_INFO "lock type : %d lock state : %d\n",
- gl->gl_name.ln_type, gl->gl_state);
- BUG();
- }
-
- existing = find_holder_by_owner(&gl->gl_waiters3, gh->gh_owner_pid);
- if (existing) {
- print_symbol(KERN_WARNING "original: %s\n", existing->gh_ip);
- print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
- BUG();
+ if (!(gh->gh_flags & GL_FLOCK)) {
+ existing = find_holder_by_owner(&gl->gl_holders,
+ gh->gh_owner_pid);
+ if (existing) {
+ print_symbol(KERN_WARNING "original: %s\n",
+ existing->gh_ip);
+ printk(KERN_INFO "pid : %d\n", existing->gh_owner_pid);
+ printk(KERN_INFO "lock type : %d lock state : %d\n",
+ existing->gh_gl->gl_name.ln_type,
+ existing->gh_gl->gl_state);
+ print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
+ printk(KERN_INFO "pid : %d\n", gh->gh_owner_pid);
+ printk(KERN_INFO "lock type : %d lock state : %d\n",
+ gl->gl_name.ln_type, gl->gl_state);
+ BUG();
+ }
+
+ existing = find_holder_by_owner(&gl->gl_waiters3,
+ gh->gh_owner_pid);
+ if (existing) {
+ print_symbol(KERN_WARNING "original: %s\n",
+ existing->gh_ip);
+ print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
+ BUG();
+ }
}
if (gh->gh_flags & LM_FLAG_PRIORITY)
@@ -1195,9 +1237,10 @@ void gfs2_glock_dq(struct gfs2_holder *gh)
{
struct gfs2_glock *gl = gh->gh_gl;
const struct gfs2_glock_operations *glops = gl->gl_ops;
+ unsigned delay = 0;
if (gh->gh_flags & GL_NOCACHE)
- handle_callback(gl, LM_ST_UNLOCKED, 0);
+ handle_callback(gl, LM_ST_UNLOCKED, 0, 0);
gfs2_glmutex_lock(gl);
@@ -1215,8 +1258,14 @@ void gfs2_glock_dq(struct gfs2_holder *gh)
}
clear_bit(GLF_LOCK, &gl->gl_flags);
- run_queue(gl);
spin_unlock(&gl->gl_spin);
+
+ gfs2_glock_hold(gl);
+ if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
+ !test_bit(GLF_DEMOTE, &gl->gl_flags))
+ delay = gl->gl_ops->go_min_hold_time;
+ if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
+ gfs2_glock_put(gl);
}
void gfs2_glock_dq_wait(struct gfs2_holder *gh)
@@ -1443,18 +1492,21 @@ static void blocking_cb(struct gfs2_sbd *sdp, struct lm_lockname *name,
unsigned int state)
{
struct gfs2_glock *gl;
+ unsigned long delay = 0;
+ unsigned long holdtime;
+ unsigned long now = jiffies;
gl = gfs2_glock_find(sdp, name);
if (!gl)
return;
- handle_callback(gl, state, 1);
-
- spin_lock(&gl->gl_spin);
- run_queue(gl);
- spin_unlock(&gl->gl_spin);
+ holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time;
+ if (time_before(now, holdtime))
+ delay = holdtime - now;
- gfs2_glock_put(gl);
+ handle_callback(gl, state, 1, delay);
+ if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
+ gfs2_glock_put(gl);
}
/**
@@ -1495,7 +1547,8 @@ void gfs2_glock_cb(void *cb_data, unsigned int type, void *data)
return;
if (!gfs2_assert_warn(sdp, gl->gl_req_bh))
gl->gl_req_bh(gl, async->lc_ret);
- gfs2_glock_put(gl);
+ if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
+ gfs2_glock_put(gl);
up_read(&gfs2_umount_flush_sem);
return;
}
@@ -1588,7 +1641,7 @@ void gfs2_reclaim_glock(struct gfs2_sbd *sdp)
if (gfs2_glmutex_trylock(gl)) {
if (list_empty(&gl->gl_holders) &&
gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl))
- handle_callback(gl, LM_ST_UNLOCKED, 0);
+ handle_callback(gl, LM_ST_UNLOCKED, 0, 0);
gfs2_glmutex_unlock(gl);
}
@@ -1617,7 +1670,7 @@ static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp,
goto out;
gl = list_entry(head->first, struct gfs2_glock, gl_list);
while(1) {
- if (gl->gl_sbd == sdp) {
+ if (!sdp || gl->gl_sbd == sdp) {
gfs2_glock_hold(gl);
read_unlock(gl_lock_addr(hash));
if (prev)
@@ -1635,6 +1688,7 @@ out:
read_unlock(gl_lock_addr(hash));
if (prev)
gfs2_glock_put(prev);
+ cond_resched();
return has_entries;
}
@@ -1663,20 +1717,6 @@ out_schedule:
}
/**
- * gfs2_scand_internal - Look for glocks and inodes to toss from memory
- * @sdp: the filesystem
- *
- */
-
-void gfs2_scand_internal(struct gfs2_sbd *sdp)
-{
- unsigned int x;
-
- for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
- examine_bucket(scan_glock, sdp, x);
-}
-
-/**
* clear_glock - look at a glock and see if we can free it from glock cache
* @gl: the glock to look at
*
@@ -1701,7 +1741,7 @@ static void clear_glock(struct gfs2_glock *gl)
if (gfs2_glmutex_trylock(gl)) {
if (list_empty(&gl->gl_holders) &&
gl->gl_state != LM_ST_UNLOCKED)
- handle_callback(gl, LM_ST_UNLOCKED, 0);
+ handle_callback(gl, LM_ST_UNLOCKED, 0, 0);
gfs2_glmutex_unlock(gl);
}
}
@@ -1843,7 +1883,7 @@ static int dump_glock(struct glock_iter *gi, struct gfs2_glock *gl)
spin_lock(&gl->gl_spin);
- print_dbg(gi, "Glock 0x%p (%u, %llu)\n", gl, gl->gl_name.ln_type,
+ print_dbg(gi, "Glock 0x%p (%u, 0x%llx)\n", gl, gl->gl_name.ln_type,
(unsigned long long)gl->gl_name.ln_number);
print_dbg(gi, " gl_flags =");
for (x = 0; x < 32; x++) {
@@ -1963,6 +2003,35 @@ static int gfs2_dump_lockstate(struct gfs2_sbd *sdp)
return error;
}
+/**
+ * gfs2_scand - Look for cached glocks and inodes to toss from memory
+ * @sdp: Pointer to GFS2 superblock
+ *
+ * One of these daemons runs, finding candidates to add to sd_reclaim_list.
+ * See gfs2_glockd()
+ */
+
+static int gfs2_scand(void *data)
+{
+ unsigned x;
+ unsigned delay;
+
+ while (!kthread_should_stop()) {
+ for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
+ examine_bucket(scan_glock, NULL, x);
+ if (freezing(current))
+ refrigerator();
+ delay = scand_secs;
+ if (delay < 1)
+ delay = 1;
+ schedule_timeout_interruptible(delay * HZ);
+ }
+
+ return 0;
+}
+
+
+
int __init gfs2_glock_init(void)
{
unsigned i;
@@ -1974,52 +2043,69 @@ int __init gfs2_glock_init(void)
rwlock_init(&gl_hash_locks[i]);
}
#endif
+
+ scand_process = kthread_run(gfs2_scand, NULL, "gfs2_scand");
+ if (IS_ERR(scand_process))
+ return PTR_ERR(scand_process);
+
+ glock_workqueue = create_workqueue("glock_workqueue");
+ if (IS_ERR(glock_workqueue)) {
+ kthread_stop(scand_process);
+ return PTR_ERR(glock_workqueue);
+ }
+
return 0;
}
+void gfs2_glock_exit(void)
+{
+ destroy_workqueue(glock_workqueue);
+ kthread_stop(scand_process);
+}
+
+module_param(scand_secs, uint, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(scand_secs, "The number of seconds between scand runs");
+
static int gfs2_glock_iter_next(struct glock_iter *gi)
{
+ struct gfs2_glock *gl;
+
+restart:
read_lock(gl_lock_addr(gi->hash));
- while (1) {
- if (!gi->hb_list) { /* If we don't have a hash bucket yet */
- gi->hb_list = &gl_hash_table[gi->hash].hb_list;
- if (hlist_empty(gi->hb_list)) {
- read_unlock(gl_lock_addr(gi->hash));
- gi->hash++;
- read_lock(gl_lock_addr(gi->hash));
- gi->hb_list = NULL;
- if (gi->hash >= GFS2_GL_HASH_SIZE) {
- read_unlock(gl_lock_addr(gi->hash));
- return 1;
- }
- else
- continue;
- }
- if (!hlist_empty(gi->hb_list)) {
- gi->gl = list_entry(gi->hb_list->first,
- struct gfs2_glock,
- gl_list);
- }
- } else {
- if (gi->gl->gl_list.next == NULL) {
- read_unlock(gl_lock_addr(gi->hash));
- gi->hash++;
- read_lock(gl_lock_addr(gi->hash));
- gi->hb_list = NULL;
- continue;
- }
- gi->gl = list_entry(gi->gl->gl_list.next,
- struct gfs2_glock, gl_list);
- }
+ gl = gi->gl;
+ if (gl) {
+ gi->gl = hlist_entry(gl->gl_list.next,
+ struct gfs2_glock, gl_list);
if (gi->gl)
- break;
+ gfs2_glock_hold(gi->gl);
}
read_unlock(gl_lock_addr(gi->hash));
+ if (gl)
+ gfs2_glock_put(gl);
+ if (gl && gi->gl == NULL)
+ gi->hash++;
+ while(gi->gl == NULL) {
+ if (gi->hash >= GFS2_GL_HASH_SIZE)
+ return 1;
+ read_lock(gl_lock_addr(gi->hash));
+ gi->gl = hlist_entry(gl_hash_table[gi->hash].hb_list.first,
+ struct gfs2_glock, gl_list);
+ if (gi->gl)
+ gfs2_glock_hold(gi->gl);
+ read_unlock(gl_lock_addr(gi->hash));
+ gi->hash++;
+ }
+
+ if (gi->sdp != gi->gl->gl_sbd)
+ goto restart;
+
return 0;
}
static void gfs2_glock_iter_free(struct glock_iter *gi)
{
+ if (gi->gl)
+ gfs2_glock_put(gi->gl);
kfree(gi);
}
@@ -2033,9 +2119,8 @@ static struct glock_iter *gfs2_glock_iter_init(struct gfs2_sbd *sdp)
gi->sdp = sdp;
gi->hash = 0;
- gi->gl = NULL;
- gi->hb_list = NULL;
gi->seq = NULL;
+ gi->gl = NULL;
memset(gi->string, 0, sizeof(gi->string));
if (gfs2_glock_iter_next(gi)) {
@@ -2055,7 +2140,7 @@ static void *gfs2_glock_seq_start(struct seq_file *file, loff_t *pos)
if (!gi)
return NULL;
- while (n--) {
+ while(n--) {
if (gfs2_glock_iter_next(gi)) {
gfs2_glock_iter_free(gi);
return NULL;
@@ -2082,7 +2167,9 @@ static void *gfs2_glock_seq_next(struct seq_file *file, void *iter_ptr,
static void gfs2_glock_seq_stop(struct seq_file *file, void *iter_ptr)
{
- /* nothing for now */
+ struct glock_iter *gi = iter_ptr;
+ if (gi)
+ gfs2_glock_iter_free(gi);
}
static int gfs2_glock_seq_show(struct seq_file *file, void *iter_ptr)
@@ -2095,7 +2182,7 @@ static int gfs2_glock_seq_show(struct seq_file *file, void *iter_ptr)
return 0;
}
-static struct seq_operations gfs2_glock_seq_ops = {
+static const struct seq_operations gfs2_glock_seq_ops = {
.start = gfs2_glock_seq_start,
.next = gfs2_glock_seq_next,
.stop = gfs2_glock_seq_stop,
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index 7721ca3..b16f604 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -26,6 +26,7 @@
#define GL_SKIP 0x00000100
#define GL_ATIME 0x00000200
#define GL_NOCACHE 0x00000400
+#define GL_FLOCK 0x00000800
#define GL_NOCANCEL 0x00001000
#define GLR_TRYFAILED 13
@@ -132,11 +133,11 @@ void gfs2_glock_cb(void *cb_data, unsigned int type, void *data);
void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl);
void gfs2_reclaim_glock(struct gfs2_sbd *sdp);
-
-void gfs2_scand_internal(struct gfs2_sbd *sdp);
void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait);
int __init gfs2_glock_init(void);
+void gfs2_glock_exit(void);
+
int gfs2_create_debugfs_file(struct gfs2_sbd *sdp);
void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp);
int gfs2_register_debugfs(void);
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index 777ca46..4670dcb 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -41,7 +41,6 @@ static void gfs2_ail_empty_gl(struct gfs2_glock *gl)
struct list_head *head = &gl->gl_ail_list;
struct gfs2_bufdata *bd;
struct buffer_head *bh;
- u64 blkno;
int error;
blocks = atomic_read(&gl->gl_ail_count);
@@ -57,19 +56,12 @@ static void gfs2_ail_empty_gl(struct gfs2_glock *gl)
bd = list_entry(head->next, struct gfs2_bufdata,
bd_ail_gl_list);
bh = bd->bd_bh;
- blkno = bh->b_blocknr;
+ gfs2_remove_from_ail(NULL, bd);
+ bd->bd_bh = NULL;
+ bh->b_private = NULL;
+ bd->bd_blkno = bh->b_blocknr;
gfs2_assert_withdraw(sdp, !buffer_busy(bh));
-
- bd->bd_ail = NULL;
- list_del(&bd->bd_ail_st_list);
- list_del(&bd->bd_ail_gl_list);
- atomic_dec(&gl->gl_ail_count);
- brelse(bh);
- gfs2_log_unlock(sdp);
-
- gfs2_trans_add_revoke(sdp, blkno);
-
- gfs2_log_lock(sdp);
+ gfs2_trans_add_revoke(sdp, bd);
}
gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count));
gfs2_log_unlock(sdp);
@@ -156,9 +148,11 @@ static void inode_go_sync(struct gfs2_glock *gl)
ip = NULL;
if (test_bit(GLF_DIRTY, &gl->gl_flags)) {
- if (ip)
+ if (ip && !gfs2_is_jdata(ip))
filemap_fdatawrite(ip->i_inode.i_mapping);
gfs2_log_flush(gl->gl_sbd, gl);
+ if (ip && gfs2_is_jdata(ip))
+ filemap_fdatawrite(ip->i_inode.i_mapping);
gfs2_meta_sync(gl);
if (ip) {
struct address_space *mapping = ip->i_inode.i_mapping;
@@ -452,6 +446,7 @@ const struct gfs2_glock_operations gfs2_inode_glops = {
.go_lock = inode_go_lock,
.go_unlock = inode_go_unlock,
.go_type = LM_TYPE_INODE,
+ .go_min_hold_time = HZ / 10,
};
const struct gfs2_glock_operations gfs2_rgrp_glops = {
@@ -462,6 +457,7 @@ const struct gfs2_glock_operations gfs2_rgrp_glops = {
.go_lock = rgrp_go_lock,
.go_unlock = rgrp_go_unlock,
.go_type = LM_TYPE_RGRP,
+ .go_min_hold_time = HZ / 10,
};
const struct gfs2_glock_operations gfs2_trans_glops = {
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 170ba93..eaddfb5 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -11,6 +11,7 @@
#define __INCORE_DOT_H__
#include <linux/fs.h>
+#include <linux/workqueue.h>
#define DIO_WAIT 0x00000010
#define DIO_METADATA 0x00000020
@@ -113,7 +114,13 @@ struct gfs2_bufdata {
struct buffer_head *bd_bh;
struct gfs2_glock *bd_gl;
- struct list_head bd_list_tr;
+ union {
+ struct list_head list_tr;
+ u64 blkno;
+ } u;
+#define bd_list_tr u.list_tr
+#define bd_blkno u.blkno
+
struct gfs2_log_element bd_le;
struct gfs2_ail *bd_ail;
@@ -130,6 +137,7 @@ struct gfs2_glock_operations {
int (*go_lock) (struct gfs2_holder *gh);
void (*go_unlock) (struct gfs2_holder *gh);
const int go_type;
+ const unsigned long go_min_hold_time;
};
enum {
@@ -161,6 +169,7 @@ enum {
GLF_LOCK = 1,
GLF_STICKY = 2,
GLF_DEMOTE = 3,
+ GLF_PENDING_DEMOTE = 4,
GLF_DIRTY = 5,
};
@@ -193,6 +202,7 @@ struct gfs2_glock {
u64 gl_vn;
unsigned long gl_stamp;
+ unsigned long gl_tchange;
void *gl_object;
struct list_head gl_reclaim;
@@ -203,6 +213,7 @@ struct gfs2_glock {
struct gfs2_log_element gl_le;
struct list_head gl_ail_list;
atomic_t gl_ail_count;
+ struct delayed_work gl_work;
};
struct gfs2_alloc {
@@ -293,11 +304,6 @@ struct gfs2_file {
struct gfs2_holder f_fl_gh;
};
-struct gfs2_revoke {
- struct gfs2_log_element rv_le;
- u64 rv_blkno;
-};
-
struct gfs2_revoke_replay {
struct list_head rr_list;
u64 rr_blkno;
@@ -335,12 +341,6 @@ struct gfs2_quota_data {
unsigned long qd_last_touched;
};
-struct gfs2_log_buf {
- struct list_head lb_list;
- struct buffer_head *lb_bh;
- struct buffer_head *lb_real;
-};
-
struct gfs2_trans {
unsigned long tr_ip;
@@ -429,7 +429,6 @@ struct gfs2_tune {
unsigned int gt_log_flush_secs;
unsigned int gt_jindex_refresh_secs; /* Check for new journal index */
- unsigned int gt_scand_secs;
unsigned int gt_recoverd_secs;
unsigned int gt_logd_secs;
unsigned int gt_quotad_secs;
@@ -574,7 +573,6 @@ struct gfs2_sbd {
/* Daemon stuff */
- struct task_struct *sd_scand_process;
struct task_struct *sd_recoverd_process;
struct task_struct *sd_logd_process;
struct task_struct *sd_quotad_process;
@@ -609,13 +607,13 @@ struct gfs2_sbd {
unsigned int sd_log_num_revoke;
unsigned int sd_log_num_rg;
unsigned int sd_log_num_databuf;
- unsigned int sd_log_num_jdata;
struct list_head sd_log_le_gl;
struct list_head sd_log_le_buf;
struct list_head sd_log_le_revoke;
struct list_head sd_log_le_rg;
struct list_head sd_log_le_databuf;
+ struct list_head sd_log_le_ordered;
unsigned int sd_log_blks_free;
struct mutex sd_log_reserve_mutex;
@@ -627,7 +625,8 @@ struct gfs2_sbd {
unsigned long sd_log_flush_time;
struct rw_semaphore sd_log_flush_lock;
- struct list_head sd_log_flush_list;
+ atomic_t sd_log_in_flight;
+ wait_queue_head_t sd_log_flush_wait;
unsigned int sd_log_flush_head;
u64 sd_log_flush_wrapped;
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 34f7bcd..5f6dc32 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -77,6 +77,49 @@ static struct inode *gfs2_iget(struct super_block *sb, u64 no_addr)
return iget5_locked(sb, hash, iget_test, iget_set, &no_addr);
}
+struct gfs2_skip_data {
+ u64 no_addr;
+ int skipped;
+};
+
+static int iget_skip_test(struct inode *inode, void *opaque)
+{
+ struct gfs2_inode *ip = GFS2_I(inode);
+ struct gfs2_skip_data *data = opaque;
+
+ if (ip->i_no_addr == data->no_addr && inode->i_private != NULL){
+ if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE)){
+ data->skipped = 1;
+ return 0;
+ }
+ return 1;
+ }
+ return 0;
+}
+
+static int iget_skip_set(struct inode *inode, void *opaque)
+{
+ struct gfs2_inode *ip = GFS2_I(inode);
+ struct gfs2_skip_data *data = opaque;
+
+ if (data->skipped)
+ return 1;
+ inode->i_ino = (unsigned long)(data->no_addr);
+ ip->i_no_addr = data->no_addr;
+ return 0;
+}
+
+static struct inode *gfs2_iget_skip(struct super_block *sb,
+ u64 no_addr)
+{
+ struct gfs2_skip_data data;
+ unsigned long hash = (unsigned long)no_addr;
+
+ data.no_addr = no_addr;
+ data.skipped = 0;
+ return iget5_locked(sb, hash, iget_skip_test, iget_skip_set, &data);
+}
+
/**
* GFS2 lookup code fills in vfs inode contents based on info obtained
* from directory entry inside gfs2_inode_lookup(). This has caused issues
@@ -112,6 +155,7 @@ void gfs2_set_iop(struct inode *inode)
* @sb: The super block
* @no_addr: The inode number
* @type: The type of the inode
+ * @skip_freeing: set this not return an inode if it is currently being freed.
*
* Returns: A VFS inode, or an error
*/
@@ -119,13 +163,19 @@ void gfs2_set_iop(struct inode *inode)
struct inode *gfs2_inode_lookup(struct super_block *sb,
unsigned int type,
u64 no_addr,
- u64 no_formal_ino)
+ u64 no_formal_ino, int skip_freeing)
{
- struct inode *inode = gfs2_iget(sb, no_addr);
- struct gfs2_inode *ip = GFS2_I(inode);
+ struct inode *inode;
+ struct gfs2_inode *ip;
struct gfs2_glock *io_gl;
int error;
+ if (skip_freeing)
+ inode = gfs2_iget_skip(sb, no_addr);
+ else
+ inode = gfs2_iget(sb, no_addr);
+ ip = GFS2_I(inode);
+
if (!inode)
return ERR_PTR(-ENOBUFS);
@@ -244,6 +294,11 @@ static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
return 0;
}
+static void gfs2_inode_bh(struct gfs2_inode *ip, struct buffer_head *bh)
+{
+ ip->i_cache[0] = bh;
+}
+
/**
* gfs2_inode_refresh - Refresh the incore copy of the dinode
* @ip: The GFS2 inode
@@ -688,7 +743,7 @@ out:
static void init_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
const struct gfs2_inum_host *inum, unsigned int mode,
unsigned int uid, unsigned int gid,
- const u64 *generation, dev_t dev)
+ const u64 *generation, dev_t dev, struct buffer_head **bhp)
{
struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
struct gfs2_dinode *di;
@@ -743,13 +798,15 @@ static void init_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
di->di_mtime_nsec = cpu_to_be32(tv.tv_nsec);
di->di_ctime_nsec = cpu_to_be32(tv.tv_nsec);
memset(&di->di_reserved, 0, sizeof(di->di_reserved));
+
+ set_buffer_uptodate(dibh);
- brelse(dibh);
+ *bhp = dibh;
}
static int make_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
unsigned int mode, const struct gfs2_inum_host *inum,
- const u64 *generation, dev_t dev)
+ const u64 *generation, dev_t dev, struct buffer_head **bhp)
{
struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
unsigned int uid, gid;
@@ -770,7 +827,7 @@ static int make_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
if (error)
goto out_quota;
- init_dinode(dip, gl, inum, mode, uid, gid, generation, dev);
+ init_dinode(dip, gl, inum, mode, uid, gid, generation, dev, bhp);
gfs2_quota_change(dip, +1, uid, gid);
gfs2_trans_end(sdp);
@@ -909,6 +966,7 @@ struct inode *gfs2_createi(struct gfs2_holder *ghs, const struct qstr *name,
struct gfs2_inum_host inum = { .no_addr = 0, .no_formal_ino = 0 };
int error;
u64 generation;
+ struct buffer_head *bh=NULL;
if (!name->len || name->len > GFS2_FNAMESIZE)
return ERR_PTR(-ENAMETOOLONG);
@@ -935,16 +993,18 @@ struct inode *gfs2_createi(struct gfs2_holder *ghs, const struct qstr *name,
if (error)
goto fail_gunlock;
- error = make_dinode(dip, ghs[1].gh_gl, mode, &inum, &generation, dev);
+ error = make_dinode(dip, ghs[1].gh_gl, mode, &inum, &generation, dev, &bh);
if (error)
goto fail_gunlock2;
inode = gfs2_inode_lookup(dir->i_sb, IF2DT(mode),
inum.no_addr,
- inum.no_formal_ino);
+ inum.no_formal_ino, 0);
if (IS_ERR(inode))
goto fail_gunlock2;
+ gfs2_inode_bh(GFS2_I(inode), bh);
+
error = gfs2_inode_refresh(GFS2_I(inode));
if (error)
goto fail_gunlock2;
diff --git a/fs/gfs2/inode.h b/fs/gfs2/inode.h
index 4517ac8..351ac87 100644
--- a/fs/gfs2/inode.h
+++ b/fs/gfs2/inode.h
@@ -49,7 +49,8 @@ static inline void gfs2_inum_out(const struct gfs2_inode *ip,
void gfs2_inode_attr_in(struct gfs2_inode *ip);
void gfs2_set_iop(struct inode *inode);
struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned type,
- u64 no_addr, u64 no_formal_ino);
+ u64 no_addr, u64 no_formal_ino,
+ int skip_freeing);
struct inode *gfs2_ilookup(struct super_block *sb, u64 no_addr);
int gfs2_inode_refresh(struct gfs2_inode *ip);
diff --git a/fs/gfs2/locking/dlm/lock_dlm.h b/fs/gfs2/locking/dlm/lock_dlm.h
index 24d70f7..9e8265d 100644
--- a/fs/gfs2/locking/dlm/lock_dlm.h
+++ b/fs/gfs2/locking/dlm/lock_dlm.h
@@ -13,7 +13,6 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
-#include <linux/module.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/list.h>
diff --git a/fs/gfs2/locking/dlm/plock.c b/fs/gfs2/locking/dlm/plock.c
index fba1f1d..1f7b038 100644
--- a/fs/gfs2/locking/dlm/plock.c
+++ b/fs/gfs2/locking/dlm/plock.c
@@ -346,15 +346,16 @@ static ssize_t dev_write(struct file *file, const char __user *u, size_t count,
static unsigned int dev_poll(struct file *file, poll_table *wait)
{
+ unsigned int mask = 0;
+
poll_wait(file, &send_wq, wait);
spin_lock(&ops_lock);
- if (!list_empty(&send_list)) {
- spin_unlock(&ops_lock);
- return POLLIN | POLLRDNORM;
- }
+ if (!list_empty(&send_list))
+ mask = POLLIN | POLLRDNORM;
spin_unlock(&ops_lock);
- return 0;
+
+ return mask;
}
static const struct file_operations dev_fops = {
diff --git a/fs/gfs2/locking/dlm/sysfs.c b/fs/gfs2/locking/dlm/sysfs.c
index d9fe3ca..ae9e6a2 100644
--- a/fs/gfs2/locking/dlm/sysfs.c
+++ b/fs/gfs2/locking/dlm/sysfs.c
@@ -190,7 +190,6 @@ static struct kobj_type gdlm_ktype = {
};
static struct kset gdlm_kset = {
- .kobj = {.name = "lock_dlm",},
.ktype = &gdlm_ktype,
};
@@ -224,6 +223,7 @@ int gdlm_sysfs_init(void)
{
int error;
+ kobject_set_name(&gdlm_kset.kobj, "lock_dlm");
kobj_set_kset_s(&gdlm_kset, kernel_subsys);
error = kset_register(&gdlm_kset);
if (error)
diff --git a/fs/gfs2/locking/dlm/thread.c b/fs/gfs2/locking/dlm/thread.c
index 1aca51e..bd938f0 100644
--- a/fs/gfs2/locking/dlm/thread.c
+++ b/fs/gfs2/locking/dlm/thread.c
@@ -268,20 +268,16 @@ static inline int check_drop(struct gdlm_ls *ls)
return 0;
}
-static int gdlm_thread(void *data)
+static int gdlm_thread(void *data, int blist)
{
struct gdlm_ls *ls = (struct gdlm_ls *) data;
struct gdlm_lock *lp = NULL;
- int blist = 0;
uint8_t complete, blocking, submit, drop;
DECLARE_WAITQUEUE(wait, current);
/* Only thread1 is allowed to do blocking callbacks since gfs
may wait for a completion callback within a blocking cb. */
- if (current == ls->thread1)
- blist = 1;
-
while (!kthread_should_stop()) {
set_current_state(TASK_INTERRUPTIBLE);
add_wait_queue(&ls->thread_wait, &wait);
@@ -333,12 +329,22 @@ static int gdlm_thread(void *data)
return 0;
}
+static int gdlm_thread1(void *data)
+{
+ return gdlm_thread(data, 1);
+}
+
+static int gdlm_thread2(void *data)
+{
+ return gdlm_thread(data, 0);
+}
+
int gdlm_init_threads(struct gdlm_ls *ls)
{
struct task_struct *p;
int error;
- p = kthread_run(gdlm_thread, ls, "lock_dlm1");
+ p = kthread_run(gdlm_thread1, ls, "lock_dlm1");
error = IS_ERR(p);
if (error) {
log_error("can't start lock_dlm1 thread %d", error);
@@ -346,7 +352,7 @@ int gdlm_init_threads(struct gdlm_ls *ls)
}
ls->thread1 = p;
- p = kthread_run(gdlm_thread, ls, "lock_dlm2");
+ p = kthread_run(gdlm_thread2, ls, "lock_dlm2");
error = IS_ERR(p);
if (error) {
log_error("can't start lock_dlm2 thread %d", error);
diff --git a/fs/gfs2/locking/nolock/main.c b/fs/gfs2/locking/nolock/main.c
index 0d149c8c..d3b8ce6 100644
--- a/fs/gfs2/locking/nolock/main.c
+++ b/fs/gfs2/locking/nolock/main.c
@@ -9,7 +9,6 @@
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/fs.h>
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index f49a12e..7df7024 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -60,6 +60,26 @@ unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct,
}
/**
+ * gfs2_remove_from_ail - Remove an entry from the ail lists, updating counters
+ * @mapping: The associated mapping (maybe NULL)
+ * @bd: The gfs2_bufdata to remove
+ *
+ * The log lock _must_ be held when calling this function
+ *
+ */
+
+void gfs2_remove_from_ail(struct address_space *mapping, struct gfs2_bufdata *bd)
+{
+ bd->bd_ail = NULL;
+ list_del_init(&bd->bd_ail_st_list);
+ list_del_init(&bd->bd_ail_gl_list);
+ atomic_dec(&bd->bd_gl->gl_ail_count);
+ if (mapping)
+ gfs2_meta_cache_flush(GFS2_I(mapping->host));
+ brelse(bd->bd_bh);
+}
+
+/**
* gfs2_ail1_start_one - Start I/O on a part of the AIL
* @sdp: the filesystem
* @tr: the part of the AIL
@@ -83,17 +103,9 @@ static void gfs2_ail1_start_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
gfs2_assert(sdp, bd->bd_ail == ai);
- if (!bh){
- list_move(&bd->bd_ail_st_list, &ai->ai_ail2_list);
- continue;
- }
-
if (!buffer_busy(bh)) {
- if (!buffer_uptodate(bh)) {
- gfs2_log_unlock(sdp);
+ if (!buffer_uptodate(bh))
gfs2_io_error_bh(sdp, bh);
- gfs2_log_lock(sdp);
- }
list_move(&bd->bd_ail_st_list, &ai->ai_ail2_list);
continue;
}
@@ -103,9 +115,16 @@ static void gfs2_ail1_start_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
list_move(&bd->bd_ail_st_list, &ai->ai_ail1_list);
+ get_bh(bh);
gfs2_log_unlock(sdp);
- wait_on_buffer(bh);
- ll_rw_block(WRITE, 1, &bh);
+ lock_buffer(bh);
+ if (test_clear_buffer_dirty(bh)) {
+ bh->b_end_io = end_buffer_write_sync;
+ submit_bh(WRITE, bh);
+ } else {
+ unlock_buffer(bh);
+ brelse(bh);
+ }
gfs2_log_lock(sdp);
retry = 1;
@@ -130,11 +149,6 @@ static int gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai, int fl
bd_ail_st_list) {
bh = bd->bd_bh;
- if (!bh){
- list_move(&bd->bd_ail_st_list, &ai->ai_ail2_list);
- continue;
- }
-
gfs2_assert(sdp, bd->bd_ail == ai);
if (buffer_busy(bh)) {
@@ -155,13 +169,14 @@ static int gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai, int fl
static void gfs2_ail1_start(struct gfs2_sbd *sdp, int flags)
{
- struct list_head *head = &sdp->sd_ail1_list;
+ struct list_head *head;
u64 sync_gen;
struct list_head *first;
struct gfs2_ail *first_ai, *ai, *tmp;
int done = 0;
gfs2_log_lock(sdp);
+ head = &sdp->sd_ail1_list;
if (list_empty(head)) {
gfs2_log_unlock(sdp);
return;
@@ -233,11 +248,7 @@ static void gfs2_ail2_empty_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
bd = list_entry(head->prev, struct gfs2_bufdata,
bd_ail_st_list);
gfs2_assert(sdp, bd->bd_ail == ai);
- bd->bd_ail = NULL;
- list_del(&bd->bd_ail_st_list);
- list_del(&bd->bd_ail_gl_list);
- atomic_dec(&bd->bd_gl->gl_ail_count);
- brelse(bd->bd_bh);
+ gfs2_remove_from_ail(bd->bd_bh->b_page->mapping, bd);
}
}
@@ -439,10 +450,10 @@ static unsigned int current_tail(struct gfs2_sbd *sdp)
return tail;
}
-static inline void log_incr_head(struct gfs2_sbd *sdp)
+void gfs2_log_incr_head(struct gfs2_sbd *sdp)
{
if (sdp->sd_log_flush_head == sdp->sd_log_tail)
- gfs2_assert_withdraw(sdp, sdp->sd_log_flush_head == sdp->sd_log_head);
+ BUG_ON(sdp->sd_log_flush_head != sdp->sd_log_head);
if (++sdp->sd_log_flush_head == sdp->sd_jdesc->jd_blocks) {
sdp->sd_log_flush_head = 0;
@@ -451,6 +462,23 @@ static inline void log_incr_head(struct gfs2_sbd *sdp)
}
/**
+ * gfs2_log_write_endio - End of I/O for a log buffer
+ * @bh: The buffer head
+ * @uptodate: I/O Status
+ *
+ */
+
+static void gfs2_log_write_endio(struct buffer_head *bh, int uptodate)
+{
+ struct gfs2_sbd *sdp = bh->b_private;
+ bh->b_private = NULL;
+
+ end_buffer_write_sync(bh, uptodate);
+ if (atomic_dec_and_test(&sdp->sd_log_in_flight))
+ wake_up(&sdp->sd_log_flush_wait);
+}
+
+/**
* gfs2_log_get_buf - Get and initialize a buffer to use for log control data
* @sdp: The GFS2 superblock
*
@@ -460,25 +488,43 @@ static inline void log_incr_head(struct gfs2_sbd *sdp)
struct buffer_head *gfs2_log_get_buf(struct gfs2_sbd *sdp)
{
u64 blkno = log_bmap(sdp, sdp->sd_log_flush_head);
- struct gfs2_log_buf *lb;
struct buffer_head *bh;
- lb = kzalloc(sizeof(struct gfs2_log_buf), GFP_NOFS | __GFP_NOFAIL);
- list_add(&lb->lb_list, &sdp->sd_log_flush_list);
-
- bh = lb->lb_bh = sb_getblk(sdp->sd_vfs, blkno);
+ bh = sb_getblk(sdp->sd_vfs, blkno);
lock_buffer(bh);
memset(bh->b_data, 0, bh->b_size);
set_buffer_uptodate(bh);
clear_buffer_dirty(bh);
- unlock_buffer(bh);
-
- log_incr_head(sdp);
+ gfs2_log_incr_head(sdp);
+ atomic_inc(&sdp->sd_log_in_flight);
+ bh->b_private = sdp;
+ bh->b_end_io = gfs2_log_write_endio;
return bh;
}
/**
+ * gfs2_fake_write_endio -
+ * @bh: The buffer head
+ * @uptodate: The I/O Status
+ *
+ */
+
+static void gfs2_fake_write_endio(struct buffer_head *bh, int uptodate)
+{
+ struct buffer_head *real_bh = bh->b_private;
+ struct gfs2_bufdata *bd = real_bh->b_private;
+ struct gfs2_sbd *sdp = bd->bd_gl->gl_sbd;
+
+ end_buffer_write_sync(bh, uptodate);
+ free_buffer_head(bh);
+ unlock_buffer(real_bh);
+ brelse(real_bh);
+ if (atomic_dec_and_test(&sdp->sd_log_in_flight))
+ wake_up(&sdp->sd_log_flush_wait);
+}
+
+/**
* gfs2_log_fake_buf - Build a fake buffer head to write metadata buffer to log
* @sdp: the filesystem
* @data: the data the buffer_head should point to
@@ -490,22 +536,20 @@ struct buffer_head *gfs2_log_fake_buf(struct gfs2_sbd *sdp,
struct buffer_head *real)
{
u64 blkno = log_bmap(sdp, sdp->sd_log_flush_head);
- struct gfs2_log_buf *lb;
struct buffer_head *bh;
- lb = kzalloc(sizeof(struct gfs2_log_buf), GFP_NOFS | __GFP_NOFAIL);
- list_add(&lb->lb_list, &sdp->sd_log_flush_list);
- lb->lb_real = real;
-
- bh = lb->lb_bh = alloc_buffer_head(GFP_NOFS | __GFP_NOFAIL);
+ bh = alloc_buffer_head(GFP_NOFS | __GFP_NOFAIL);
atomic_set(&bh->b_count, 1);
- bh->b_state = (1 << BH_Mapped) | (1 << BH_Uptodate);
+ bh->b_state = (1 << BH_Mapped) | (1 << BH_Uptodate) | (1 << BH_Lock);
set_bh_page(bh, real->b_page, bh_offset(real));
bh->b_blocknr = blkno;
bh->b_size = sdp->sd_sb.sb_bsize;
bh->b_bdev = sdp->sd_vfs->s_bdev;
+ bh->b_private = real;
+ bh->b_end_io = gfs2_fake_write_endio;
- log_incr_head(sdp);
+ gfs2_log_incr_head(sdp);
+ atomic_inc(&sdp->sd_log_in_flight);
return bh;
}
@@ -572,45 +616,75 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags, int pull)
gfs2_assert_withdraw(sdp, !pull);
sdp->sd_log_idle = (tail == sdp->sd_log_flush_head);
- log_incr_head(sdp);
+ gfs2_log_incr_head(sdp);
}
static void log_flush_commit(struct gfs2_sbd *sdp)
{
- struct list_head *head = &sdp->sd_log_flush_list;
- struct gfs2_log_buf *lb;
- struct buffer_head *bh;
- int flushcount = 0;
+ DEFINE_WAIT(wait);
+
+ if (atomic_read(&sdp->sd_log_in_flight)) {
+ do {
+ prepare_to_wait(&sdp->sd_log_flush_wait, &wait,
+ TASK_UNINTERRUPTIBLE);
+ if (atomic_read(&sdp->sd_log_in_flight))
+ io_schedule();
+ } while(atomic_read(&sdp->sd_log_in_flight));
+ finish_wait(&sdp->sd_log_flush_wait, &wait);
+ }
- while (!list_empty(head)) {
- lb = list_entry(head->next, struct gfs2_log_buf, lb_list);
- list_del(&lb->lb_list);
- bh = lb->lb_bh;
+ log_write_header(sdp, 0, 0);
+}
- wait_on_buffer(bh);
- if (!buffer_uptodate(bh))
- gfs2_io_error_bh(sdp, bh);
- if (lb->lb_real) {
- while (atomic_read(&bh->b_count) != 1) /* Grrrr... */
- schedule();
- free_buffer_head(bh);
- } else
+static void gfs2_ordered_write(struct gfs2_sbd *sdp)
+{
+ struct gfs2_bufdata *bd;
+ struct buffer_head *bh;
+ LIST_HEAD(written);
+
+ gfs2_log_lock(sdp);
+ while (!list_empty(&sdp->sd_log_le_ordered)) {
+ bd = list_entry(sdp->sd_log_le_ordered.next, struct gfs2_bufdata, bd_le.le_list);
+ list_move(&bd->bd_le.le_list, &written);
+ bh = bd->bd_bh;
+ if (!buffer_dirty(bh))
+ continue;
+ get_bh(bh);
+ gfs2_log_unlock(sdp);
+ lock_buffer(bh);
+ if (test_clear_buffer_dirty(bh)) {
+ bh->b_end_io = end_buffer_write_sync;
+ submit_bh(WRITE, bh);
+ } else {
+ unlock_buffer(bh);
brelse(bh);
- kfree(lb);
- flushcount++;
+ }
+ gfs2_log_lock(sdp);
}
+ list_splice(&written, &sdp->sd_log_le_ordered);
+ gfs2_log_unlock(sdp);
+}
- /* If nothing was journaled, the header is unplanned and unwanted. */
- if (flushcount) {
- log_write_header(sdp, 0, 0);
- } else {
- unsigned int tail;
- tail = current_tail(sdp);
+static void gfs2_ordered_wait(struct gfs2_sbd *sdp)
+{
+ struct gfs2_bufdata *bd;
+ struct buffer_head *bh;
- gfs2_ail1_empty(sdp, 0);
- if (sdp->sd_log_tail != tail)
- log_pull_tail(sdp, tail);
+ gfs2_log_lock(sdp);
+ while (!list_empty(&sdp->sd_log_le_ordered)) {
+ bd = list_entry(sdp->sd_log_le_ordered.prev, struct gfs2_bufdata, bd_le.le_list);
+ bh = bd->bd_bh;
+ if (buffer_locked(bh)) {
+ get_bh(bh);
+ gfs2_log_unlock(sdp);
+ wait_on_buffer(bh);
+ brelse(bh);
+ gfs2_log_lock(sdp);
+ continue;
+ }
+ list_del_init(&bd->bd_le.le_list);
}
+ gfs2_log_unlock(sdp);
}
/**
@@ -640,10 +714,16 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl)
INIT_LIST_HEAD(&ai->ai_ail1_list);
INIT_LIST_HEAD(&ai->ai_ail2_list);
- gfs2_assert_withdraw(sdp,
- sdp->sd_log_num_buf + sdp->sd_log_num_jdata ==
- sdp->sd_log_commited_buf +
- sdp->sd_log_commited_databuf);
+ if (sdp->sd_log_num_buf != sdp->sd_log_commited_buf) {
+ printk(KERN_INFO "GFS2: log buf %u %u\n", sdp->sd_log_num_buf,
+ sdp->sd_log_commited_buf);
+ gfs2_assert_withdraw(sdp, 0);
+ }
+ if (sdp->sd_log_num_databuf != sdp->sd_log_commited_databuf) {
+ printk(KERN_INFO "GFS2: log databuf %u %u\n",
+ sdp->sd_log_num_databuf, sdp->sd_log_commited_databuf);
+ gfs2_assert_withdraw(sdp, 0);
+ }
gfs2_assert_withdraw(sdp,
sdp->sd_log_num_revoke == sdp->sd_log_commited_revoke);
@@ -651,8 +731,11 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl)
sdp->sd_log_flush_wrapped = 0;
ai->ai_first = sdp->sd_log_flush_head;
+ gfs2_ordered_write(sdp);
lops_before_commit(sdp);
- if (!list_empty(&sdp->sd_log_flush_list))
+ gfs2_ordered_wait(sdp);
+
+ if (sdp->sd_log_head != sdp->sd_log_flush_head)
log_flush_commit(sdp);
else if (sdp->sd_log_tail != current_tail(sdp) && !sdp->sd_log_idle){
gfs2_log_lock(sdp);
@@ -744,7 +827,6 @@ void gfs2_log_shutdown(struct gfs2_sbd *sdp)
gfs2_assert_withdraw(sdp, !sdp->sd_log_blks_reserved);
gfs2_assert_withdraw(sdp, !sdp->sd_log_num_gl);
gfs2_assert_withdraw(sdp, !sdp->sd_log_num_buf);
- gfs2_assert_withdraw(sdp, !sdp->sd_log_num_jdata);
gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
gfs2_assert_withdraw(sdp, !sdp->sd_log_num_rg);
gfs2_assert_withdraw(sdp, !sdp->sd_log_num_databuf);
diff --git a/fs/gfs2/log.h b/fs/gfs2/log.h
index 8e7aa0f..dae2824 100644
--- a/fs/gfs2/log.h
+++ b/fs/gfs2/log.h
@@ -52,12 +52,14 @@ int gfs2_ail1_empty(struct gfs2_sbd *sdp, int flags);
int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks);
void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks);
+void gfs2_log_incr_head(struct gfs2_sbd *sdp);
struct buffer_head *gfs2_log_get_buf(struct gfs2_sbd *sdp);
struct buffer_head *gfs2_log_fake_buf(struct gfs2_sbd *sdp,
struct buffer_head *real);
void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl);
void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *trans);
+void gfs2_remove_from_ail(struct address_space *mapping, struct gfs2_bufdata *bd);
void gfs2_log_shutdown(struct gfs2_sbd *sdp);
void gfs2_meta_syncfs(struct gfs2_sbd *sdp);
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index aff70f0..6c27cea 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -27,7 +27,104 @@
#include "trans.h"
#include "util.h"
-static void glock_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
+/**
+ * gfs2_pin - Pin a buffer in memory
+ * @sdp: The superblock
+ * @bh: The buffer to be pinned
+ *
+ * The log lock must be held when calling this function
+ */
+static void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh)
+{
+ struct gfs2_bufdata *bd;
+
+ gfs2_assert_withdraw(sdp, test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags));
+
+ clear_buffer_dirty(bh);
+ if (test_set_buffer_pinned(bh))
+ gfs2_assert_withdraw(sdp, 0);
+ if (!buffer_uptodate(bh))
+ gfs2_io_error_bh(sdp, bh);
+ bd = bh->b_private;
+ /* If this buffer is in the AIL and it has already been written
+ * to in-place disk block, remove it from the AIL.
+ */
+ if (bd->bd_ail)
+ list_move(&bd->bd_ail_st_list, &bd->bd_ail->ai_ail2_list);
+ get_bh(bh);
+}
+
+/**
+ * gfs2_unpin - Unpin a buffer
+ * @sdp: the filesystem the buffer belongs to
+ * @bh: The buffer to unpin
+ * @ai:
+ *
+ */
+
+static void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh,
+ struct gfs2_ail *ai)
+{
+ struct gfs2_bufdata *bd = bh->b_private;
+
+ gfs2_assert_withdraw(sdp, buffer_uptodate(bh));
+
+ if (!buffer_pinned(bh))
+ gfs2_assert_withdraw(sdp, 0);
+
+ lock_buffer(bh);
+ mark_buffer_dirty(bh);
+ clear_buffer_pinned(bh);
+
+ gfs2_log_lock(sdp);
+ if (bd->bd_ail) {
+ list_del(&bd->bd_ail_st_list);
+ brelse(bh);
+ } else {
+ struct gfs2_glock *gl = bd->bd_gl;
+ list_add(&bd->bd_ail_gl_list, &gl->gl_ail_list);
+ atomic_inc(&gl->gl_ail_count);
+ }
+ bd->bd_ail = ai;
+ list_add(&bd->bd_ail_st_list, &ai->ai_ail1_list);
+ gfs2_log_unlock(sdp);
+ unlock_buffer(bh);
+}
+
+
+static inline struct gfs2_log_descriptor *bh_log_desc(struct buffer_head *bh)
+{
+ return (struct gfs2_log_descriptor *)bh->b_data;
+}
+
+static inline __be64 *bh_log_ptr(struct buffer_head *bh)
+{
+ struct gfs2_log_descriptor *ld = bh_log_desc(bh);
+ return (__force __be64 *)(ld + 1);
+}
+
+static inline __be64 *bh_ptr_end(struct buffer_head *bh)
+{
+ return (__force __be64 *)(bh->b_data + bh->b_size);
+}
+
+
+static struct buffer_head *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type)
+{
+ struct buffer_head *bh = gfs2_log_get_buf(sdp);
+ struct gfs2_log_descriptor *ld = bh_log_desc(bh);
+ ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
+ ld->ld_header.mh_type = cpu_to_be32(GFS2_METATYPE_LD);
+ ld->ld_header.mh_format = cpu_to_be32(GFS2_FORMAT_LD);
+ ld->ld_type = cpu_to_be32(ld_type);
+ ld->ld_length = 0;
+ ld->ld_data1 = 0;
+ ld->ld_data2 = 0;
+ memset(ld->ld_reserved, 0, sizeof(ld->ld_reserved));
+ return bh;
+}
+
+static void __glock_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
{
struct gfs2_glock *gl;
struct gfs2_trans *tr = current->journal_info;
@@ -38,15 +135,19 @@ static void glock_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(gl)))
return;
- gfs2_log_lock(sdp);
- if (!list_empty(&le->le_list)){
- gfs2_log_unlock(sdp);
+ if (!list_empty(&le->le_list))
return;
- }
+
gfs2_glock_hold(gl);
set_bit(GLF_DIRTY, &gl->gl_flags);
sdp->sd_log_num_gl++;
list_add(&le->le_list, &sdp->sd_log_le_gl);
+}
+
+static void glock_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
+{
+ gfs2_log_lock(sdp);
+ __glock_lo_add(sdp, le);
gfs2_log_unlock(sdp);
}
@@ -71,30 +172,25 @@ static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
struct gfs2_bufdata *bd = container_of(le, struct gfs2_bufdata, bd_le);
struct gfs2_trans *tr;
+ lock_buffer(bd->bd_bh);
gfs2_log_lock(sdp);
- if (!list_empty(&bd->bd_list_tr)) {
- gfs2_log_unlock(sdp);
- return;
- }
+ if (!list_empty(&bd->bd_list_tr))
+ goto out;
tr = current->journal_info;
tr->tr_touched = 1;
tr->tr_num_buf++;
list_add(&bd->bd_list_tr, &tr->tr_list_buf);
- gfs2_log_unlock(sdp);
-
if (!list_empty(&le->le_list))
- return;
-
- gfs2_trans_add_gl(bd->bd_gl);
-
+ goto out;
+ __glock_lo_add(sdp, &bd->bd_gl->gl_le);
gfs2_meta_check(sdp, bd->bd_bh);
gfs2_pin(sdp, bd->bd_bh);
- gfs2_log_lock(sdp);
sdp->sd_log_num_buf++;
list_add(&le->le_list, &sdp->sd_log_le_buf);
- gfs2_log_unlock(sdp);
-
tr->tr_num_buf_new++;
+out:
+ gfs2_log_unlock(sdp);
+ unlock_buffer(bd->bd_bh);
}
static void buf_lo_incore_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
@@ -117,8 +213,7 @@ static void buf_lo_before_commit(struct gfs2_sbd *sdp)
struct buffer_head *bh;
struct gfs2_log_descriptor *ld;
struct gfs2_bufdata *bd1 = NULL, *bd2;
- unsigned int total = sdp->sd_log_num_buf;
- unsigned int offset = BUF_OFFSET;
+ unsigned int total;
unsigned int limit;
unsigned int num;
unsigned n;
@@ -127,22 +222,20 @@ static void buf_lo_before_commit(struct gfs2_sbd *sdp)
limit = buf_limit(sdp);
/* for 4k blocks, limit = 503 */
+ gfs2_log_lock(sdp);
+ total = sdp->sd_log_num_buf;
bd1 = bd2 = list_prepare_entry(bd1, &sdp->sd_log_le_buf, bd_le.le_list);
while(total) {
num = total;
if (total > limit)
num = limit;
- bh = gfs2_log_get_buf(sdp);
- ld = (struct gfs2_log_descriptor *)bh->b_data;
- ptr = (__be64 *)(bh->b_data + offset);
- ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
- ld->ld_header.mh_type = cpu_to_be32(GFS2_METATYPE_LD);
- ld->ld_header.mh_format = cpu_to_be32(GFS2_FORMAT_LD);
- ld->ld_type = cpu_to_be32(GFS2_LOG_DESC_METADATA);
+ gfs2_log_unlock(sdp);
+ bh = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_METADATA);
+ gfs2_log_lock(sdp);
+ ld = bh_log_desc(bh);
+ ptr = bh_log_ptr(bh);
ld->ld_length = cpu_to_be32(num + 1);
ld->ld_data1 = cpu_to_be32(num);
- ld->ld_data2 = cpu_to_be32(0);
- memset(ld->ld_reserved, 0, sizeof(ld->ld_reserved));
n = 0;
list_for_each_entry_continue(bd1, &sdp->sd_log_le_buf,
@@ -152,21 +245,27 @@ static void buf_lo_before_commit(struct gfs2_sbd *sdp)
break;
}
- set_buffer_dirty(bh);
- ll_rw_block(WRITE, 1, &bh);
+ gfs2_log_unlock(sdp);
+ submit_bh(WRITE, bh);
+ gfs2_log_lock(sdp);
n = 0;
list_for_each_entry_continue(bd2, &sdp->sd_log_le_buf,
bd_le.le_list) {
+ get_bh(bd2->bd_bh);
+ gfs2_log_unlock(sdp);
+ lock_buffer(bd2->bd_bh);
bh = gfs2_log_fake_buf(sdp, bd2->bd_bh);
- set_buffer_dirty(bh);
- ll_rw_block(WRITE, 1, &bh);
+ submit_bh(WRITE, bh);
+ gfs2_log_lock(sdp);
if (++n >= num)
break;
}
+ BUG_ON(total < num);
total -= num;
}
+ gfs2_log_unlock(sdp);
}
static void buf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
@@ -270,11 +369,8 @@ static void revoke_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
tr = current->journal_info;
tr->tr_touched = 1;
tr->tr_num_revoke++;
-
- gfs2_log_lock(sdp);
sdp->sd_log_num_revoke++;
list_add(&le->le_list, &sdp->sd_log_le_revoke);
- gfs2_log_unlock(sdp);
}
static void revoke_lo_before_commit(struct gfs2_sbd *sdp)
@@ -284,32 +380,25 @@ static void revoke_lo_before_commit(struct gfs2_sbd *sdp)
struct buffer_head *bh;
unsigned int offset;
struct list_head *head = &sdp->sd_log_le_revoke;
- struct gfs2_revoke *rv;
+ struct gfs2_bufdata *bd;
if (!sdp->sd_log_num_revoke)
return;
- bh = gfs2_log_get_buf(sdp);
- ld = (struct gfs2_log_descriptor *)bh->b_data;
- ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
- ld->ld_header.mh_type = cpu_to_be32(GFS2_METATYPE_LD);
- ld->ld_header.mh_format = cpu_to_be32(GFS2_FORMAT_LD);
- ld->ld_type = cpu_to_be32(GFS2_LOG_DESC_REVOKE);
+ bh = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_REVOKE);
+ ld = bh_log_desc(bh);
ld->ld_length = cpu_to_be32(gfs2_struct2blk(sdp, sdp->sd_log_num_revoke,
sizeof(u64)));
ld->ld_data1 = cpu_to_be32(sdp->sd_log_num_revoke);
- ld->ld_data2 = cpu_to_be32(0);
- memset(ld->ld_reserved, 0, sizeof(ld->ld_reserved));
offset = sizeof(struct gfs2_log_descriptor);
while (!list_empty(head)) {
- rv = list_entry(head->next, struct gfs2_revoke, rv_le.le_list);
- list_del_init(&rv->rv_le.le_list);
+ bd = list_entry(head->next, struct gfs2_bufdata, bd_le.le_list);
+ list_del_init(&bd->bd_le.le_list);
sdp->sd_log_num_revoke--;
if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) {
- set_buffer_dirty(bh);
- ll_rw_block(WRITE, 1, &bh);
+ submit_bh(WRITE, bh);
bh = gfs2_log_get_buf(sdp);
mh = (struct gfs2_meta_header *)bh->b_data;
@@ -319,15 +408,14 @@ static void revoke_lo_before_commit(struct gfs2_sbd *sdp)
offset = sizeof(struct gfs2_meta_header);
}
- *(__be64 *)(bh->b_data + offset) = cpu_to_be64(rv->rv_blkno);
- kfree(rv);
+ *(__be64 *)(bh->b_data + offset) = cpu_to_be64(bd->bd_blkno);
+ kmem_cache_free(gfs2_bufdata_cachep, bd);
offset += sizeof(u64);
}
gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
- set_buffer_dirty(bh);
- ll_rw_block(WRITE, 1, &bh);
+ submit_bh(WRITE, bh);
}
static void revoke_lo_before_scan(struct gfs2_jdesc *jd,
@@ -466,220 +554,136 @@ static void databuf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
struct address_space *mapping = bd->bd_bh->b_page->mapping;
struct gfs2_inode *ip = GFS2_I(mapping->host);
+ lock_buffer(bd->bd_bh);
gfs2_log_lock(sdp);
- if (!list_empty(&bd->bd_list_tr)) {
- gfs2_log_unlock(sdp);
- return;
- }
+ if (!list_empty(&bd->bd_list_tr))
+ goto out;
tr->tr_touched = 1;
if (gfs2_is_jdata(ip)) {
tr->tr_num_buf++;
list_add(&bd->bd_list_tr, &tr->tr_list_buf);
}
- gfs2_log_unlock(sdp);
if (!list_empty(&le->le_list))
- return;
+ goto out;
- gfs2_trans_add_gl(bd->bd_gl);
+ __glock_lo_add(sdp, &bd->bd_gl->gl_le);
if (gfs2_is_jdata(ip)) {
- sdp->sd_log_num_jdata++;
gfs2_pin(sdp, bd->bd_bh);
tr->tr_num_databuf_new++;
+ sdp->sd_log_num_databuf++;
+ list_add(&le->le_list, &sdp->sd_log_le_databuf);
+ } else {
+ list_add(&le->le_list, &sdp->sd_log_le_ordered);
}
- sdp->sd_log_num_databuf++;
- gfs2_log_lock(sdp);
- list_add(&le->le_list, &sdp->sd_log_le_databuf);
+out:
gfs2_log_unlock(sdp);
+ unlock_buffer(bd->bd_bh);
}
-static int gfs2_check_magic(struct buffer_head *bh)
+static void gfs2_check_magic(struct buffer_head *bh)
{
- struct page *page = bh->b_page;
void *kaddr;
__be32 *ptr;
- int rv = 0;
- kaddr = kmap_atomic(page, KM_USER0);
+ clear_buffer_escaped(bh);
+ kaddr = kmap_atomic(bh->b_page, KM_USER0);
ptr = kaddr + bh_offset(bh);
if (*ptr == cpu_to_be32(GFS2_MAGIC))
- rv = 1;
+ set_buffer_escaped(bh);
kunmap_atomic(kaddr, KM_USER0);
-
- return rv;
}
-/**
- * databuf_lo_before_commit - Scan the data buffers, writing as we go
- *
- * Here we scan through the lists of buffers and make the assumption
- * that any buffer thats been pinned is being journaled, and that
- * any unpinned buffer is an ordered write data buffer and therefore
- * will be written back rather than journaled.
- */
-static void databuf_lo_before_commit(struct gfs2_sbd *sdp)
+static void gfs2_write_blocks(struct gfs2_sbd *sdp, struct buffer_head *bh,
+ struct list_head *list, struct list_head *done,
+ unsigned int n)
{
- LIST_HEAD(started);
- struct gfs2_bufdata *bd1 = NULL, *bd2, *bdt;
- struct buffer_head *bh = NULL,*bh1 = NULL;
+ struct buffer_head *bh1;
struct gfs2_log_descriptor *ld;
- unsigned int limit;
- unsigned int total_dbuf = sdp->sd_log_num_databuf;
- unsigned int total_jdata = sdp->sd_log_num_jdata;
- unsigned int num, n;
- __be64 *ptr = NULL;
+ struct gfs2_bufdata *bd;
+ __be64 *ptr;
+
+ if (!bh)
+ return;
- limit = databuf_limit(sdp);
+ ld = bh_log_desc(bh);
+ ld->ld_length = cpu_to_be32(n + 1);
+ ld->ld_data1 = cpu_to_be32(n);
- /*
- * Start writing ordered buffers, write journaled buffers
- * into the log along with a header
- */
+ ptr = bh_log_ptr(bh);
+
+ get_bh(bh);
+ submit_bh(WRITE, bh);
gfs2_log_lock(sdp);
- bd2 = bd1 = list_prepare_entry(bd1, &sdp->sd_log_le_databuf,
- bd_le.le_list);
- while(total_dbuf) {
- num = total_jdata;
- if (num > limit)
- num = limit;
- n = 0;
- list_for_each_entry_safe_continue(bd1, bdt,
- &sdp->sd_log_le_databuf,
- bd_le.le_list) {
- /* store off the buffer head in a local ptr since
- * gfs2_bufdata might change when we drop the log lock
- */
- bh1 = bd1->bd_bh;
-
- /* An ordered write buffer */
- if (bh1 && !buffer_pinned(bh1)) {
- list_move(&bd1->bd_le.le_list, &started);
- if (bd1 == bd2) {
- bd2 = NULL;
- bd2 = list_prepare_entry(bd2,
- &sdp->sd_log_le_databuf,
- bd_le.le_list);
- }
- total_dbuf--;
- if (bh1) {
- if (buffer_dirty(bh1)) {
- get_bh(bh1);
-
- gfs2_log_unlock(sdp);
-
- ll_rw_block(SWRITE, 1, &bh1);
- brelse(bh1);
-
- gfs2_log_lock(sdp);
- }
- continue;
- }
- continue;
- } else if (bh1) { /* A journaled buffer */
- int magic;
- gfs2_log_unlock(sdp);
- if (!bh) {
- bh = gfs2_log_get_buf(sdp);
- ld = (struct gfs2_log_descriptor *)
- bh->b_data;
- ptr = (__be64 *)(bh->b_data +
- DATABUF_OFFSET);
- ld->ld_header.mh_magic =
- cpu_to_be32(GFS2_MAGIC);
- ld->ld_header.mh_type =
- cpu_to_be32(GFS2_METATYPE_LD);
- ld->ld_header.mh_format =
- cpu_to_be32(GFS2_FORMAT_LD);
- ld->ld_type =
- cpu_to_be32(GFS2_LOG_DESC_JDATA);
- ld->ld_length = cpu_to_be32(num + 1);
- ld->ld_data1 = cpu_to_be32(num);
- ld->ld_data2 = cpu_to_be32(0);
- memset(ld->ld_reserved, 0, sizeof(ld->ld_reserved));
- }
- magic = gfs2_check_magic(bh1);
- *ptr++ = cpu_to_be64(bh1->b_blocknr);
- *ptr++ = cpu_to_be64((__u64)magic);
- clear_buffer_escaped(bh1);
- if (unlikely(magic != 0))
- set_buffer_escaped(bh1);
- gfs2_log_lock(sdp);
- if (++n >= num)
- break;
- } else if (!bh1) {
- total_dbuf--;
- sdp->sd_log_num_databuf--;
- list_del_init(&bd1->bd_le.le_list);
- if (bd1 == bd2) {
- bd2 = NULL;
- bd2 = list_prepare_entry(bd2,
- &sdp->sd_log_le_databuf,
- bd_le.le_list);
- }
- kmem_cache_free(gfs2_bufdata_cachep, bd1);
- }
+ while(!list_empty(list)) {
+ bd = list_entry(list->next, struct gfs2_bufdata, bd_le.le_list);
+ list_move_tail(&bd->bd_le.le_list, done);
+ get_bh(bd->bd_bh);
+ while (be64_to_cpu(*ptr) != bd->bd_bh->b_blocknr) {
+ gfs2_log_incr_head(sdp);
+ ptr += 2;
}
gfs2_log_unlock(sdp);
- if (bh) {
- set_buffer_mapped(bh);
- set_buffer_dirty(bh);
- ll_rw_block(WRITE, 1, &bh);
- bh = NULL;
+ lock_buffer(bd->bd_bh);
+ if (buffer_escaped(bd->bd_bh)) {
+ void *kaddr;
+ bh1 = gfs2_log_get_buf(sdp);
+ kaddr = kmap_atomic(bd->bd_bh->b_page, KM_USER0);
+ memcpy(bh1->b_data, kaddr + bh_offset(bd->bd_bh),
+ bh1->b_size);
+ kunmap_atomic(kaddr, KM_USER0);
+ *(__be32 *)bh1->b_data = 0;
+ clear_buffer_escaped(bd->bd_bh);
+ unlock_buffer(bd->bd_bh);
+ brelse(bd->bd_bh);
+ } else {
+ bh1 = gfs2_log_fake_buf(sdp, bd->bd_bh);
}
- n = 0;
+ submit_bh(WRITE, bh1);
gfs2_log_lock(sdp);
- list_for_each_entry_continue(bd2, &sdp->sd_log_le_databuf,
- bd_le.le_list) {
- if (!bd2->bd_bh)
- continue;
- /* copy buffer if it needs escaping */
- gfs2_log_unlock(sdp);
- if (unlikely(buffer_escaped(bd2->bd_bh))) {
- void *kaddr;
- struct page *page = bd2->bd_bh->b_page;
- bh = gfs2_log_get_buf(sdp);
- kaddr = kmap_atomic(page, KM_USER0);
- memcpy(bh->b_data,
- kaddr + bh_offset(bd2->bd_bh),
- sdp->sd_sb.sb_bsize);
- kunmap_atomic(kaddr, KM_USER0);
- *(__be32 *)bh->b_data = 0;
- } else {
- bh = gfs2_log_fake_buf(sdp, bd2->bd_bh);
- }
- set_buffer_dirty(bh);
- ll_rw_block(WRITE, 1, &bh);
- gfs2_log_lock(sdp);
- if (++n >= num)
- break;
- }
- bh = NULL;
- total_dbuf -= num;
- total_jdata -= num;
+ ptr += 2;
}
gfs2_log_unlock(sdp);
+ brelse(bh);
+}
- /* Wait on all ordered buffers */
- while (!list_empty(&started)) {
- gfs2_log_lock(sdp);
- bd1 = list_entry(started.next, struct gfs2_bufdata,
- bd_le.le_list);
- list_del_init(&bd1->bd_le.le_list);
- sdp->sd_log_num_databuf--;
- bh = bd1->bd_bh;
- if (bh) {
- bh->b_private = NULL;
- get_bh(bh);
- gfs2_log_unlock(sdp);
- wait_on_buffer(bh);
- brelse(bh);
- } else
- gfs2_log_unlock(sdp);
+/**
+ * databuf_lo_before_commit - Scan the data buffers, writing as we go
+ *
+ */
- kmem_cache_free(gfs2_bufdata_cachep, bd1);
- }
+static void databuf_lo_before_commit(struct gfs2_sbd *sdp)
+{
+ struct gfs2_bufdata *bd = NULL;
+ struct buffer_head *bh = NULL;
+ unsigned int n = 0;
+ __be64 *ptr = NULL, *end = NULL;
+ LIST_HEAD(processed);
+ LIST_HEAD(in_progress);
- /* We've removed all the ordered write bufs here, so only jdata left */
- gfs2_assert_warn(sdp, sdp->sd_log_num_databuf == sdp->sd_log_num_jdata);
+ gfs2_log_lock(sdp);
+ while (!list_empty(&sdp->sd_log_le_databuf)) {
+ if (ptr == end) {
+ gfs2_log_unlock(sdp);
+ gfs2_write_blocks(sdp, bh, &in_progress, &processed, n);
+ n = 0;
+ bh = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_JDATA);
+ ptr = bh_log_ptr(bh);
+ end = bh_ptr_end(bh) - 1;
+ gfs2_log_lock(sdp);
+ continue;
+ }
+ bd = list_entry(sdp->sd_log_le_databuf.next, struct gfs2_bufdata, bd_le.le_list);
+ list_move_tail(&bd->bd_le.le_list, &in_progress);
+ gfs2_check_magic(bd->bd_bh);
+ *ptr++ = cpu_to_be64(bd->bd_bh->b_blocknr);
+ *ptr++ = cpu_to_be64(buffer_escaped(bh) ? 1 : 0);
+ n++;
+ }
+ gfs2_log_unlock(sdp);
+ gfs2_write_blocks(sdp, bh, &in_progress, &processed, n);
+ gfs2_log_lock(sdp);
+ list_splice(&processed, &sdp->sd_log_le_databuf);
+ gfs2_log_unlock(sdp);
}
static int databuf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
@@ -763,11 +767,9 @@ static void databuf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
bd = list_entry(head->next, struct gfs2_bufdata, bd_le.le_list);
list_del_init(&bd->bd_le.le_list);
sdp->sd_log_num_databuf--;
- sdp->sd_log_num_jdata--;
gfs2_unpin(sdp, bd->bd_bh, ai);
}
gfs2_assert_warn(sdp, !sdp->sd_log_num_databuf);
- gfs2_assert_warn(sdp, !sdp->sd_log_num_jdata);
}
@@ -815,10 +817,10 @@ const struct gfs2_log_operations gfs2_databuf_lops = {
const struct gfs2_log_operations *gfs2_log_ops[] = {
&gfs2_glock_lops,
+ &gfs2_databuf_lops,
&gfs2_buf_lops,
- &gfs2_revoke_lops,
&gfs2_rg_lops,
- &gfs2_databuf_lops,
+ &gfs2_revoke_lops,
NULL,
};
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index 787a0ed..79c91fd 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -72,7 +72,7 @@ static int __init init_gfs2_fs(void)
gfs2_glock_cachep = kmem_cache_create("gfs2_glock",
sizeof(struct gfs2_glock),
0, 0,
- gfs2_init_glock_once, NULL);
+ gfs2_init_glock_once);
if (!gfs2_glock_cachep)
goto fail;
@@ -80,13 +80,13 @@ static int __init init_gfs2_fs(void)
sizeof(struct gfs2_inode),
0, SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD,
- gfs2_init_inode_once, NULL);
+ gfs2_init_inode_once);
if (!gfs2_inode_cachep)
goto fail;
gfs2_bufdata_cachep = kmem_cache_create("gfs2_bufdata",
sizeof(struct gfs2_bufdata),
- 0, 0, NULL, NULL);
+ 0, 0, NULL);
if (!gfs2_bufdata_cachep)
goto fail;
@@ -107,6 +107,8 @@ static int __init init_gfs2_fs(void)
fail_unregister:
unregister_filesystem(&gfs2_fs_type);
fail:
+ gfs2_glock_exit();
+
if (gfs2_bufdata_cachep)
kmem_cache_destroy(gfs2_bufdata_cachep);
@@ -127,6 +129,7 @@ fail:
static void __exit exit_gfs2_fs(void)
{
+ gfs2_glock_exit();
gfs2_unregister_debugfs();
unregister_filesystem(&gfs2_fs_type);
unregister_filesystem(&gfs2meta_fs_type);
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
index 8da343b..4da4239 100644
--- a/fs/gfs2/meta_io.c
+++ b/fs/gfs2/meta_io.c
@@ -297,74 +297,35 @@ void gfs2_attach_bufdata(struct gfs2_glock *gl, struct buffer_head *bh,
unlock_page(bh->b_page);
}
-/**
- * gfs2_pin - Pin a buffer in memory
- * @sdp: the filesystem the buffer belongs to
- * @bh: The buffer to be pinned
- *
- */
-
-void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh)
+void gfs2_remove_from_journal(struct buffer_head *bh, struct gfs2_trans *tr, int meta)
{
+ struct gfs2_sbd *sdp = GFS2_SB(bh->b_page->mapping->host);
struct gfs2_bufdata *bd = bh->b_private;
-
- gfs2_assert_withdraw(sdp, test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags));
-
- if (test_set_buffer_pinned(bh))
- gfs2_assert_withdraw(sdp, 0);
-
- wait_on_buffer(bh);
-
- /* If this buffer is in the AIL and it has already been written
- to in-place disk block, remove it from the AIL. */
-
- gfs2_log_lock(sdp);
- if (bd->bd_ail && !buffer_in_io(bh))
- list_move(&bd->bd_ail_st_list, &bd->bd_ail->ai_ail2_list);
- gfs2_log_unlock(sdp);
-
- clear_buffer_dirty(bh);
- wait_on_buffer(bh);
-
- if (!buffer_uptodate(bh))
- gfs2_io_error_bh(sdp, bh);
-
- get_bh(bh);
-}
-
-/**
- * gfs2_unpin - Unpin a buffer
- * @sdp: the filesystem the buffer belongs to
- * @bh: The buffer to unpin
- * @ai:
- *
- */
-
-void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh,
- struct gfs2_ail *ai)
-{
- struct gfs2_bufdata *bd = bh->b_private;
-
- gfs2_assert_withdraw(sdp, buffer_uptodate(bh));
-
- if (!buffer_pinned(bh))
- gfs2_assert_withdraw(sdp, 0);
-
- mark_buffer_dirty(bh);
- clear_buffer_pinned(bh);
-
- gfs2_log_lock(sdp);
- if (bd->bd_ail) {
- list_del(&bd->bd_ail_st_list);
+ if (test_clear_buffer_pinned(bh)) {
+ list_del_init(&bd->bd_le.le_list);
+ if (meta) {
+ gfs2_assert_warn(sdp, sdp->sd_log_num_buf);
+ sdp->sd_log_num_buf--;
+ tr->tr_num_buf_rm++;
+ } else {
+ gfs2_assert_warn(sdp, sdp->sd_log_num_databuf);
+ sdp->sd_log_num_databuf--;
+ tr->tr_num_databuf_rm++;
+ }
+ tr->tr_touched = 1;
brelse(bh);
- } else {
- struct gfs2_glock *gl = bd->bd_gl;
- list_add(&bd->bd_ail_gl_list, &gl->gl_ail_list);
- atomic_inc(&gl->gl_ail_count);
}
- bd->bd_ail = ai;
- list_add(&bd->bd_ail_st_list, &ai->ai_ail1_list);
- gfs2_log_unlock(sdp);
+ if (bd) {
+ if (bd->bd_ail) {
+ gfs2_remove_from_ail(NULL, bd);
+ bh->b_private = NULL;
+ bd->bd_bh = NULL;
+ bd->bd_blkno = bh->b_blocknr;
+ gfs2_trans_add_revoke(sdp, bd);
+ }
+ }
+ clear_buffer_dirty(bh);
+ clear_buffer_uptodate(bh);
}
/**
@@ -383,44 +344,11 @@ void gfs2_meta_wipe(struct gfs2_inode *ip, u64 bstart, u32 blen)
while (blen) {
bh = getbuf(ip->i_gl, bstart, NO_CREATE);
if (bh) {
- struct gfs2_bufdata *bd = bh->b_private;
-
- if (test_clear_buffer_pinned(bh)) {
- struct gfs2_trans *tr = current->journal_info;
- struct gfs2_inode *bh_ip =
- GFS2_I(bh->b_page->mapping->host);
-
- gfs2_log_lock(sdp);
- list_del_init(&bd->bd_le.le_list);
- gfs2_assert_warn(sdp, sdp->sd_log_num_buf);
- sdp->sd_log_num_buf--;
- gfs2_log_unlock(sdp);
- if (bh_ip->i_inode.i_private != NULL)
- tr->tr_num_databuf_rm++;
- else
- tr->tr_num_buf_rm++;
- brelse(bh);
- }
- if (bd) {
- gfs2_log_lock(sdp);
- if (bd->bd_ail) {
- u64 blkno = bh->b_blocknr;
- bd->bd_ail = NULL;
- list_del(&bd->bd_ail_st_list);
- list_del(&bd->bd_ail_gl_list);
- atomic_dec(&bd->bd_gl->gl_ail_count);
- brelse(bh);
- gfs2_log_unlock(sdp);
- gfs2_trans_add_revoke(sdp, blkno);
- } else
- gfs2_log_unlock(sdp);
- }
-
lock_buffer(bh);
- clear_buffer_dirty(bh);
- clear_buffer_uptodate(bh);
+ gfs2_log_lock(sdp);
+ gfs2_remove_from_journal(bh, current->journal_info, 1);
+ gfs2_log_unlock(sdp);
unlock_buffer(bh);
-
brelse(bh);
}
@@ -446,10 +374,10 @@ void gfs2_meta_cache_flush(struct gfs2_inode *ip)
for (x = 0; x < GFS2_MAX_META_HEIGHT; x++) {
bh_slot = &ip->i_cache[x];
- if (!*bh_slot)
- break;
- brelse(*bh_slot);
- *bh_slot = NULL;
+ if (*bh_slot) {
+ brelse(*bh_slot);
+ *bh_slot = NULL;
+ }
}
spin_unlock(&ip->i_spin);
diff --git a/fs/gfs2/meta_io.h b/fs/gfs2/meta_io.h
index 527bf19..b704822 100644
--- a/fs/gfs2/meta_io.h
+++ b/fs/gfs2/meta_io.h
@@ -50,9 +50,9 @@ int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh);
void gfs2_attach_bufdata(struct gfs2_glock *gl, struct buffer_head *bh,
int meta);
-void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh);
-void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh,
- struct gfs2_ail *ai);
+
+void gfs2_remove_from_journal(struct buffer_head *bh, struct gfs2_trans *tr,
+ int meta);
void gfs2_meta_wipe(struct gfs2_inode *ip, u64 bstart, u32 blen);
diff --git a/fs/gfs2/mount.c b/fs/gfs2/mount.c
index 6f006a80..b941f9f 100644
--- a/fs/gfs2/mount.c
+++ b/fs/gfs2/mount.c
@@ -42,6 +42,7 @@ enum {
Opt_nosuiddir,
Opt_data_writeback,
Opt_data_ordered,
+ Opt_err,
};
static match_table_t tokens = {
@@ -64,7 +65,8 @@ static match_table_t tokens = {
{Opt_suiddir, "suiddir"},
{Opt_nosuiddir, "nosuiddir"},
{Opt_data_writeback, "data=writeback"},
- {Opt_data_ordered, "data=ordered"}
+ {Opt_data_ordered, "data=ordered"},
+ {Opt_err, NULL}
};
/**
@@ -82,19 +84,20 @@ int gfs2_mount_args(struct gfs2_sbd *sdp, char *data_arg, int remount)
char *options, *o, *v;
int error = 0;
- /* If someone preloaded options, use those instead */
- spin_lock(&gfs2_sys_margs_lock);
- if (!remount && gfs2_sys_margs) {
- data = gfs2_sys_margs;
- gfs2_sys_margs = NULL;
- }
- spin_unlock(&gfs2_sys_margs_lock);
+ if (!remount) {
+ /* If someone preloaded options, use those instead */
+ spin_lock(&gfs2_sys_margs_lock);
+ if (gfs2_sys_margs) {
+ data = gfs2_sys_margs;
+ gfs2_sys_margs = NULL;
+ }
+ spin_unlock(&gfs2_sys_margs_lock);
- /* Set some defaults */
- memset(args, 0, sizeof(struct gfs2_args));
- args->ar_num_glockd = GFS2_GLOCKD_DEFAULT;
- args->ar_quota = GFS2_QUOTA_DEFAULT;
- args->ar_data = GFS2_DATA_DEFAULT;
+ /* Set some defaults */
+ args->ar_num_glockd = GFS2_GLOCKD_DEFAULT;
+ args->ar_quota = GFS2_QUOTA_DEFAULT;
+ args->ar_data = GFS2_DATA_DEFAULT;
+ }
/* Split the options into tokens with the "," character and
process them */
@@ -236,6 +239,7 @@ int gfs2_mount_args(struct gfs2_sbd *sdp, char *data_arg, int remount)
case Opt_data_ordered:
args->ar_data = GFS2_DATA_ORDERED;
break;
+ case Opt_err:
default:
fs_info(sdp, "unknown option: %s\n", o);
error = -EINVAL;
diff --git a/fs/gfs2/ops_address.c b/fs/gfs2/ops_address.c
index 26c8888..873a511 100644
--- a/fs/gfs2/ops_address.c
+++ b/fs/gfs2/ops_address.c
@@ -90,7 +90,7 @@ static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock,
error = gfs2_block_map(inode, lblock, 0, bh_result);
if (error)
return error;
- if (bh_result->b_blocknr == 0)
+ if (!buffer_mapped(bh_result))
return -EIO;
return 0;
}
@@ -251,7 +251,7 @@ static int gfs2_readpage(struct file *file, struct page *page)
if (file) {
gf = file->private_data;
if (test_bit(GFF_EXLOCK, &gf->f_flags))
- /* gfs2_sharewrite_nopage has grabbed the ip->i_gl already */
+ /* gfs2_sharewrite_fault has grabbed the ip->i_gl already */
goto skip_lock;
}
gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME|LM_FLAG_TRY_1CB, &gh);
@@ -414,9 +414,10 @@ static int gfs2_prepare_write(struct file *file, struct page *page,
if (ind_blocks || data_blocks)
rblocks += RES_STATFS + RES_QUOTA;
- error = gfs2_trans_begin(sdp, rblocks, 0);
+ error = gfs2_trans_begin(sdp, rblocks,
+ PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize);
if (error)
- goto out;
+ goto out_trans_fail;
if (gfs2_is_stuffed(ip)) {
if (end > sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)) {
@@ -434,6 +435,7 @@ prepare_write:
out:
if (error) {
gfs2_trans_end(sdp);
+out_trans_fail:
if (alloc_required) {
gfs2_inplace_release(ip);
out_qunlock:
@@ -615,58 +617,50 @@ static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock)
return dblock;
}
-static void discard_buffer(struct gfs2_sbd *sdp, struct buffer_head *bh)
+static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh)
{
struct gfs2_bufdata *bd;
+ lock_buffer(bh);
gfs2_log_lock(sdp);
+ clear_buffer_dirty(bh);
bd = bh->b_private;
if (bd) {
- bd->bd_bh = NULL;
- bh->b_private = NULL;
- if (!bd->bd_ail && list_empty(&bd->bd_le.le_list))
- kmem_cache_free(gfs2_bufdata_cachep, bd);
+ if (!list_empty(&bd->bd_le.le_list) && !buffer_pinned(bh))
+ list_del_init(&bd->bd_le.le_list);
+ else
+ gfs2_remove_from_journal(bh, current->journal_info, 0);
}
- gfs2_log_unlock(sdp);
-
- lock_buffer(bh);
- clear_buffer_dirty(bh);
bh->b_bdev = NULL;
clear_buffer_mapped(bh);
clear_buffer_req(bh);
clear_buffer_new(bh);
- clear_buffer_delay(bh);
+ gfs2_log_unlock(sdp);
unlock_buffer(bh);
}
static void gfs2_invalidatepage(struct page *page, unsigned long offset)
{
struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
- struct buffer_head *head, *bh, *next;
- unsigned int curr_off = 0;
+ struct buffer_head *bh, *head;
+ unsigned long pos = 0;
BUG_ON(!PageLocked(page));
if (offset == 0)
ClearPageChecked(page);
if (!page_has_buffers(page))
- return;
+ goto out;
bh = head = page_buffers(page);
do {
- unsigned int next_off = curr_off + bh->b_size;
- next = bh->b_this_page;
-
- if (offset <= curr_off)
- discard_buffer(sdp, bh);
-
- curr_off = next_off;
- bh = next;
+ if (offset <= pos)
+ gfs2_discard(sdp, bh);
+ pos += bh->b_size;
+ bh = bh->b_this_page;
} while (bh != head);
-
- if (!offset)
+out:
+ if (offset == 0)
try_to_release_page(page, 0);
-
- return;
}
/**
@@ -735,59 +729,6 @@ out:
}
/**
- * stuck_releasepage - We're stuck in gfs2_releasepage(). Print stuff out.
- * @bh: the buffer we're stuck on
- *
- */
-
-static void stuck_releasepage(struct buffer_head *bh)
-{
- struct inode *inode = bh->b_page->mapping->host;
- struct gfs2_sbd *sdp = inode->i_sb->s_fs_info;
- struct gfs2_bufdata *bd = bh->b_private;
- struct gfs2_glock *gl;
-static unsigned limit = 0;
-
- if (limit > 3)
- return;
- limit++;
-
- fs_warn(sdp, "stuck in gfs2_releasepage() %p\n", inode);
- fs_warn(sdp, "blkno = %llu, bh->b_count = %d\n",
- (unsigned long long)bh->b_blocknr, atomic_read(&bh->b_count));
- fs_warn(sdp, "pinned = %u\n", buffer_pinned(bh));
- fs_warn(sdp, "bh->b_private = %s\n", (bd) ? "!NULL" : "NULL");
-
- if (!bd)
- return;
-
- gl = bd->bd_gl;
-
- fs_warn(sdp, "gl = (%u, %llu)\n",
- gl->gl_name.ln_type, (unsigned long long)gl->gl_name.ln_number);
-
- fs_warn(sdp, "bd_list_tr = %s, bd_le.le_list = %s\n",
- (list_empty(&bd->bd_list_tr)) ? "no" : "yes",
- (list_empty(&bd->bd_le.le_list)) ? "no" : "yes");
-
- if (gl->gl_ops == &gfs2_inode_glops) {
- struct gfs2_inode *ip = gl->gl_object;
- unsigned int x;
-
- if (!ip)
- return;
-
- fs_warn(sdp, "ip = %llu %llu\n",
- (unsigned long long)ip->i_no_formal_ino,
- (unsigned long long)ip->i_no_addr);
-
- for (x = 0; x < GFS2_MAX_META_HEIGHT; x++)
- fs_warn(sdp, "ip->i_cache[%u] = %s\n",
- x, (ip->i_cache[x]) ? "!NULL" : "NULL");
- }
-}
-
-/**
* gfs2_releasepage - free the metadata associated with a page
* @page: the page that's being released
* @gfp_mask: passed from Linux VFS, ignored by us
@@ -804,41 +745,39 @@ int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
struct gfs2_sbd *sdp = aspace->i_sb->s_fs_info;
struct buffer_head *bh, *head;
struct gfs2_bufdata *bd;
- unsigned long t = jiffies + gfs2_tune_get(sdp, gt_stall_secs) * HZ;
if (!page_has_buffers(page))
- goto out;
+ return 0;
+ gfs2_log_lock(sdp);
head = bh = page_buffers(page);
do {
- while (atomic_read(&bh->b_count)) {
- if (!atomic_read(&aspace->i_writecount))
- return 0;
-
- if (!(gfp_mask & __GFP_WAIT))
- return 0;
-
- if (time_after_eq(jiffies, t)) {
- stuck_releasepage(bh);
- /* should we withdraw here? */
- return 0;
- }
-
- yield();
- }
-
+ if (atomic_read(&bh->b_count))
+ goto cannot_release;
+ bd = bh->b_private;
+ if (bd && bd->bd_ail)
+ goto cannot_release;
gfs2_assert_warn(sdp, !buffer_pinned(bh));
gfs2_assert_warn(sdp, !buffer_dirty(bh));
+ bh = bh->b_this_page;
+ } while(bh != head);
+ gfs2_log_unlock(sdp);
+ head = bh = page_buffers(page);
+ do {
gfs2_log_lock(sdp);
bd = bh->b_private;
if (bd) {
gfs2_assert_warn(sdp, bd->bd_bh == bh);
gfs2_assert_warn(sdp, list_empty(&bd->bd_list_tr));
- gfs2_assert_warn(sdp, !bd->bd_ail);
- bd->bd_bh = NULL;
- if (!list_empty(&bd->bd_le.le_list))
- bd = NULL;
+ if (!list_empty(&bd->bd_le.le_list)) {
+ if (!buffer_pinned(bh))
+ list_del_init(&bd->bd_le.le_list);
+ else
+ bd = NULL;
+ }
+ if (bd)
+ bd->bd_bh = NULL;
bh->b_private = NULL;
}
gfs2_log_unlock(sdp);
@@ -848,8 +787,10 @@ int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
bh = bh->b_this_page;
} while (bh != head);
-out:
return try_to_free_buffers(page);
+cannot_release:
+ gfs2_log_unlock(sdp);
+ return 0;
}
const struct address_space_operations gfs2_file_aops = {
diff --git a/fs/gfs2/ops_export.c b/fs/gfs2/ops_export.c
index b8312ed..e2d1347 100644
--- a/fs/gfs2/ops_export.c
+++ b/fs/gfs2/ops_export.c
@@ -237,7 +237,7 @@ static struct dentry *gfs2_get_dentry(struct super_block *sb, void *inum_obj)
inode = gfs2_inode_lookup(sb, DT_UNKNOWN,
inum->no_addr,
- 0);
+ 0, 0);
if (!inode)
goto fail;
if (IS_ERR(inode)) {
diff --git a/fs/gfs2/ops_file.c b/fs/gfs2/ops_file.c
index 196d832..46a9e10 100644
--- a/fs/gfs2/ops_file.c
+++ b/fs/gfs2/ops_file.c
@@ -177,8 +177,8 @@ static const u32 fsflags_to_gfs2[32] = {
[5] = GFS2_DIF_APPENDONLY,
[7] = GFS2_DIF_NOATIME,
[12] = GFS2_DIF_EXHASH,
- [14] = GFS2_DIF_JDATA,
- [20] = GFS2_DIF_DIRECTIO,
+ [14] = GFS2_DIF_INHERIT_JDATA,
+ [20] = GFS2_DIF_INHERIT_DIRECTIO,
};
static const u32 gfs2_to_fsflags[32] = {
@@ -187,8 +187,6 @@ static const u32 gfs2_to_fsflags[32] = {
[gfs2fl_AppendOnly] = FS_APPEND_FL,
[gfs2fl_NoAtime] = FS_NOATIME_FL,
[gfs2fl_ExHash] = FS_INDEX_FL,
- [gfs2fl_Jdata] = FS_JOURNAL_DATA_FL,
- [gfs2fl_Directio] = FS_DIRECTIO_FL,
[gfs2fl_InheritDirectio] = FS_DIRECTIO_FL,
[gfs2fl_InheritJdata] = FS_JOURNAL_DATA_FL,
};
@@ -207,6 +205,12 @@ static int gfs2_get_flags(struct file *filp, u32 __user *ptr)
return error;
fsflags = fsflags_cvt(gfs2_to_fsflags, ip->i_di.di_flags);
+ if (!S_ISDIR(inode->i_mode)) {
+ if (ip->i_di.di_flags & GFS2_DIF_JDATA)
+ fsflags |= FS_JOURNAL_DATA_FL;
+ if (ip->i_di.di_flags & GFS2_DIF_DIRECTIO)
+ fsflags |= FS_DIRECTIO_FL;
+ }
if (put_user(fsflags, ptr))
error = -EFAULT;
@@ -270,13 +274,6 @@ static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask)
if ((new_flags ^ flags) == 0)
goto out;
- if (S_ISDIR(inode->i_mode)) {
- if ((new_flags ^ flags) & GFS2_DIF_JDATA)
- new_flags ^= (GFS2_DIF_JDATA|GFS2_DIF_INHERIT_JDATA);
- if ((new_flags ^ flags) & GFS2_DIF_DIRECTIO)
- new_flags ^= (GFS2_DIF_DIRECTIO|GFS2_DIF_INHERIT_DIRECTIO);
- }
-
error = -EINVAL;
if ((new_flags ^ flags) & ~GFS2_FLAGS_USER_SET)
goto out;
@@ -315,11 +312,19 @@ out:
static int gfs2_set_flags(struct file *filp, u32 __user *ptr)
{
+ struct inode *inode = filp->f_path.dentry->d_inode;
u32 fsflags, gfsflags;
if (get_user(fsflags, ptr))
return -EFAULT;
gfsflags = fsflags_cvt(fsflags_to_gfs2, fsflags);
- return do_gfs2_set_flags(filp, gfsflags, ~0);
+ if (!S_ISDIR(inode->i_mode)) {
+ if (gfsflags & GFS2_DIF_INHERIT_JDATA)
+ gfsflags ^= (GFS2_DIF_JDATA | GFS2_DIF_INHERIT_JDATA);
+ if (gfsflags & GFS2_DIF_INHERIT_DIRECTIO)
+ gfsflags ^= (GFS2_DIF_DIRECTIO | GFS2_DIF_INHERIT_DIRECTIO);
+ return do_gfs2_set_flags(filp, gfsflags, ~0);
+ }
+ return do_gfs2_set_flags(filp, gfsflags, ~GFS2_DIF_JDATA);
}
static long gfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
@@ -489,6 +494,29 @@ static int gfs2_fsync(struct file *file, struct dentry *dentry, int datasync)
}
/**
+ * gfs2_setlease - acquire/release a file lease
+ * @file: the file pointer
+ * @arg: lease type
+ * @fl: file lock
+ *
+ * Returns: errno
+ */
+
+static int gfs2_setlease(struct file *file, long arg, struct file_lock **fl)
+{
+ struct gfs2_sbd *sdp = GFS2_SB(file->f_mapping->host);
+
+ /*
+ * We don't currently have a way to enforce a lease across the whole
+ * cluster; until we do, disable leases (by just returning -EINVAL),
+ * unless the administrator has requested purely local locking.
+ */
+ if (!sdp->sd_args.ar_localflocks)
+ return -EINVAL;
+ return generic_setlease(file, arg, fl);
+}
+
+/**
* gfs2_lock - acquire/release a posix lock on a file
* @file: the file pointer
* @cmd: either modify or retrieve lock state, possibly wait
@@ -543,7 +571,8 @@ static int do_flock(struct file *file, int cmd, struct file_lock *fl)
int error = 0;
state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED;
- flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY) | GL_EXACT | GL_NOCACHE;
+ flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY) | GL_EXACT | GL_NOCACHE
+ | GL_FLOCK;
mutex_lock(&fp->f_fl_mutex);
@@ -551,21 +580,19 @@ static int do_flock(struct file *file, int cmd, struct file_lock *fl)
if (gl) {
if (fl_gh->gh_state == state)
goto out;
- gfs2_glock_hold(gl);
flock_lock_file_wait(file,
&(struct file_lock){.fl_type = F_UNLCK});
- gfs2_glock_dq_uninit(fl_gh);
+ gfs2_glock_dq_wait(fl_gh);
+ gfs2_holder_reinit(state, flags, fl_gh);
} else {
error = gfs2_glock_get(GFS2_SB(&ip->i_inode),
ip->i_no_addr, &gfs2_flock_glops,
CREATE, &gl);
if (error)
goto out;
+ gfs2_holder_init(gl, state, flags, fl_gh);
+ gfs2_glock_put(gl);
}
-
- gfs2_holder_init(gl, state, flags, fl_gh);
- gfs2_glock_put(gl);
-
error = gfs2_glock_nq(fl_gh);
if (error) {
gfs2_holder_uninit(fl_gh);
@@ -638,6 +665,7 @@ const struct file_operations gfs2_file_fops = {
.flock = gfs2_flock,
.splice_read = generic_file_splice_read,
.splice_write = generic_file_splice_write,
+ .setlease = gfs2_setlease,
};
const struct file_operations gfs2_dir_fops = {
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index cf5aa50..17de58e 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -28,18 +28,18 @@
#include "lm.h"
#include "mount.h"
#include "ops_fstype.h"
+#include "ops_dentry.h"
#include "ops_super.h"
#include "recovery.h"
#include "rgrp.h"
#include "super.h"
#include "sys.h"
#include "util.h"
+#include "log.h"
#define DO 0
#define UNDO 1
-extern struct dentry_operations gfs2_dops;
-
static struct gfs2_sbd *init_sbd(struct super_block *sb)
{
struct gfs2_sbd *sdp;
@@ -82,13 +82,15 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
INIT_LIST_HEAD(&sdp->sd_log_le_revoke);
INIT_LIST_HEAD(&sdp->sd_log_le_rg);
INIT_LIST_HEAD(&sdp->sd_log_le_databuf);
+ INIT_LIST_HEAD(&sdp->sd_log_le_ordered);
mutex_init(&sdp->sd_log_reserve_mutex);
INIT_LIST_HEAD(&sdp->sd_ail1_list);
INIT_LIST_HEAD(&sdp->sd_ail2_list);
init_rwsem(&sdp->sd_log_flush_lock);
- INIT_LIST_HEAD(&sdp->sd_log_flush_list);
+ atomic_set(&sdp->sd_log_in_flight, 0);
+ init_waitqueue_head(&sdp->sd_log_flush_wait);
INIT_LIST_HEAD(&sdp->sd_revoke_list);
@@ -145,7 +147,8 @@ static int init_names(struct gfs2_sbd *sdp, int silent)
snprintf(sdp->sd_proto_name, GFS2_FSNAME_LEN, "%s", proto);
snprintf(sdp->sd_table_name, GFS2_FSNAME_LEN, "%s", table);
- while ((table = strchr(sdp->sd_table_name, '/')))
+ table = sdp->sd_table_name;
+ while ((table = strchr(table, '/')))
*table = '_';
out:
@@ -161,14 +164,6 @@ static int init_locking(struct gfs2_sbd *sdp, struct gfs2_holder *mount_gh,
if (undo)
goto fail_trans;
- p = kthread_run(gfs2_scand, sdp, "gfs2_scand");
- error = IS_ERR(p);
- if (error) {
- fs_err(sdp, "can't start scand thread: %d\n", error);
- return error;
- }
- sdp->sd_scand_process = p;
-
for (sdp->sd_glockd_num = 0;
sdp->sd_glockd_num < sdp->sd_args.ar_num_glockd;
sdp->sd_glockd_num++) {
@@ -229,14 +224,13 @@ fail:
while (sdp->sd_glockd_num--)
kthread_stop(sdp->sd_glockd_process[sdp->sd_glockd_num]);
- kthread_stop(sdp->sd_scand_process);
return error;
}
static inline struct inode *gfs2_lookup_root(struct super_block *sb,
u64 no_addr)
{
- return gfs2_inode_lookup(sb, DT_DIR, no_addr, 0);
+ return gfs2_inode_lookup(sb, DT_DIR, no_addr, 0, 0);
}
static int init_sb(struct gfs2_sbd *sdp, int silent, int undo)
@@ -301,8 +295,9 @@ static int init_sb(struct gfs2_sbd *sdp, int silent, int undo)
fs_err(sdp, "can't get root dentry\n");
error = -ENOMEM;
iput(inode);
- }
- sb->s_root->d_op = &gfs2_dops;
+ } else
+ sb->s_root->d_op = &gfs2_dops;
+
out:
gfs2_glock_dq_uninit(&sb_gh);
return error;
@@ -368,7 +363,7 @@ static int init_journal(struct gfs2_sbd *sdp, int undo)
ip = GFS2_I(sdp->sd_jdesc->jd_inode);
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED,
- LM_FLAG_NOEXP | GL_EXACT,
+ LM_FLAG_NOEXP | GL_EXACT | GL_NOCACHE,
&sdp->sd_jinode_gh);
if (error) {
fs_err(sdp, "can't acquire journal inode glock: %d\n",
@@ -818,7 +813,6 @@ static struct super_block* get_gfs2_sb(const char *dev_name)
struct nameidata nd;
struct file_system_type *fstype;
struct super_block *sb = NULL, *s;
- struct list_head *l;
int error;
error = path_lookup(dev_name, LOOKUP_FOLLOW, &nd);
@@ -830,8 +824,7 @@ static struct super_block* get_gfs2_sb(const char *dev_name)
error = vfs_getattr(nd.mnt, nd.dentry, &stat);
fstype = get_fs_type("gfs2");
- list_for_each(l, &fstype->fs_supers) {
- s = list_entry(l, struct super_block, s_instances);
+ list_for_each_entry(s, &fstype->fs_supers, s_instances) {
if ((S_ISBLK(stat.mode) && s->s_dev == stat.rdev) ||
(S_ISDIR(stat.mode) && s == nd.dentry->d_inode->i_sb)) {
sb = s;
@@ -861,7 +854,7 @@ static int gfs2_get_sb_meta(struct file_system_type *fs_type, int flags,
error = -ENOENT;
goto error;
}
- sdp = (struct gfs2_sbd*) sb->s_fs_info;
+ sdp = sb->s_fs_info;
if (sdp->sd_vfs_meta) {
printk(KERN_WARNING "GFS2: gfs2meta mount already exists\n");
error = -EBUSY;
@@ -896,7 +889,10 @@ error:
static void gfs2_kill_sb(struct super_block *sb)
{
- gfs2_delete_debugfs_file(sb->s_fs_info);
+ if (sb->s_fs_info) {
+ gfs2_delete_debugfs_file(sb->s_fs_info);
+ gfs2_meta_syncfs(sb->s_fs_info);
+ }
kill_block_super(sb);
}
diff --git a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c
index 911c115..291f0c7 100644
--- a/fs/gfs2/ops_inode.c
+++ b/fs/gfs2/ops_inode.c
@@ -69,7 +69,7 @@ static int gfs2_create(struct inode *dir, struct dentry *dentry,
mark_inode_dirty(inode);
break;
} else if (PTR_ERR(inode) != -EEXIST ||
- (nd->intent.open.flags & O_EXCL)) {
+ (nd && (nd->intent.open.flags & O_EXCL))) {
gfs2_holder_uninit(ghs);
return PTR_ERR(inode);
}
@@ -278,17 +278,25 @@ static int gfs2_unlink(struct inode *dir, struct dentry *dentry)
gfs2_holder_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, ghs + 2);
- error = gfs2_glock_nq_m(3, ghs);
+ error = gfs2_glock_nq(ghs); /* parent */
if (error)
- goto out;
+ goto out_parent;
+
+ error = gfs2_glock_nq(ghs + 1); /* child */
+ if (error)
+ goto out_child;
+
+ error = gfs2_glock_nq(ghs + 2); /* rgrp */
+ if (error)
+ goto out_rgrp;
error = gfs2_unlink_ok(dip, &dentry->d_name, ip);
if (error)
- goto out_gunlock;
+ goto out_rgrp;
error = gfs2_trans_begin(sdp, 2*RES_DINODE + RES_LEAF + RES_RG_BIT, 0);
if (error)
- goto out_gunlock;
+ goto out_rgrp;
error = gfs2_dir_del(dip, &dentry->d_name);
if (error)
@@ -298,12 +306,15 @@ static int gfs2_unlink(struct inode *dir, struct dentry *dentry)
out_end_trans:
gfs2_trans_end(sdp);
-out_gunlock:
- gfs2_glock_dq_m(3, ghs);
-out:
- gfs2_holder_uninit(ghs);
- gfs2_holder_uninit(ghs + 1);
+ gfs2_glock_dq(ghs + 2);
+out_rgrp:
gfs2_holder_uninit(ghs + 2);
+ gfs2_glock_dq(ghs + 1);
+out_child:
+ gfs2_holder_uninit(ghs + 1);
+ gfs2_glock_dq(ghs);
+out_parent:
+ gfs2_holder_uninit(ghs);
gfs2_glock_dq_uninit(&ri_gh);
return error;
}
@@ -894,12 +905,17 @@ static int gfs2_permission(struct inode *inode, int mask, struct nameidata *nd)
static int setattr_size(struct inode *inode, struct iattr *attr)
{
struct gfs2_inode *ip = GFS2_I(inode);
+ struct gfs2_sbd *sdp = GFS2_SB(inode);
int error;
if (attr->ia_size != ip->i_di.di_size) {
- error = vmtruncate(inode, attr->ia_size);
+ error = gfs2_trans_begin(sdp, 0, sdp->sd_jdesc->jd_blocks);
if (error)
return error;
+ error = vmtruncate(inode, attr->ia_size);
+ gfs2_trans_end(sdp);
+ if (error)
+ return error;
}
error = gfs2_truncatei(ip, attr->ia_size);
diff --git a/fs/gfs2/ops_super.c b/fs/gfs2/ops_super.c
index 603d940..950f314 100644
--- a/fs/gfs2/ops_super.c
+++ b/fs/gfs2/ops_super.c
@@ -92,7 +92,6 @@ static void gfs2_put_super(struct super_block *sb)
kthread_stop(sdp->sd_recoverd_process);
while (sdp->sd_glockd_num--)
kthread_stop(sdp->sd_glockd_process[sdp->sd_glockd_num]);
- kthread_stop(sdp->sd_scand_process);
if (!(sb->s_flags & MS_RDONLY)) {
error = gfs2_make_fs_ro(sdp);
@@ -456,12 +455,15 @@ static void gfs2_delete_inode(struct inode *inode)
}
error = gfs2_dinode_dealloc(ip);
- /*
- * Must do this before unlock to avoid trying to write back
- * potentially dirty data now that inode no longer exists
- * on disk.
- */
+ if (error)
+ goto out_unlock;
+
+ error = gfs2_trans_begin(sdp, 0, sdp->sd_jdesc->jd_blocks);
+ if (error)
+ goto out_unlock;
+ /* Needs to be done before glock release & also in a transaction */
truncate_inode_pages(&inode->i_data, 0);
+ gfs2_trans_end(sdp);
out_unlock:
gfs2_glock_dq(&ip->i_iopen_gh);
diff --git a/fs/gfs2/ops_vm.c b/fs/gfs2/ops_vm.c
index 404b7cc..927d739 100644
--- a/fs/gfs2/ops_vm.c
+++ b/fs/gfs2/ops_vm.c
@@ -27,13 +27,12 @@
#include "trans.h"
#include "util.h"
-static struct page *gfs2_private_nopage(struct vm_area_struct *area,
- unsigned long address, int *type)
+static int gfs2_private_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
- struct gfs2_inode *ip = GFS2_I(area->vm_file->f_mapping->host);
+ struct gfs2_inode *ip = GFS2_I(vma->vm_file->f_mapping->host);
set_bit(GIF_PAGED, &ip->i_flags);
- return filemap_nopage(area, address, type);
+ return filemap_fault(vma, vmf);
}
static int alloc_page_backing(struct gfs2_inode *ip, struct page *page)
@@ -104,58 +103,67 @@ out:
return error;
}
-static struct page *gfs2_sharewrite_nopage(struct vm_area_struct *area,
- unsigned long address, int *type)
+static int gfs2_sharewrite_fault(struct vm_area_struct *vma,
+ struct vm_fault *vmf)
{
- struct file *file = area->vm_file;
+ struct file *file = vma->vm_file;
struct gfs2_file *gf = file->private_data;
struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
struct gfs2_holder i_gh;
- struct page *result = NULL;
- unsigned long index = ((address - area->vm_start) >> PAGE_CACHE_SHIFT) +
- area->vm_pgoff;
int alloc_required;
int error;
+ int ret = 0;
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
if (error)
- return NULL;
+ goto out;
set_bit(GIF_PAGED, &ip->i_flags);
set_bit(GIF_SW_PAGED, &ip->i_flags);
- error = gfs2_write_alloc_required(ip, (u64)index << PAGE_CACHE_SHIFT,
- PAGE_CACHE_SIZE, &alloc_required);
- if (error)
- goto out;
+ error = gfs2_write_alloc_required(ip,
+ (u64)vmf->pgoff << PAGE_CACHE_SHIFT,
+ PAGE_CACHE_SIZE, &alloc_required);
+ if (error) {
+ ret = VM_FAULT_OOM; /* XXX: are these right? */
+ goto out_unlock;
+ }
set_bit(GFF_EXLOCK, &gf->f_flags);
- result = filemap_nopage(area, address, type);
+ ret = filemap_fault(vma, vmf);
clear_bit(GFF_EXLOCK, &gf->f_flags);
- if (!result || result == NOPAGE_OOM)
- goto out;
+ if (ret & VM_FAULT_ERROR)
+ goto out_unlock;
if (alloc_required) {
- error = alloc_page_backing(ip, result);
+ /* XXX: do we need to drop page lock around alloc_page_backing?*/
+ error = alloc_page_backing(ip, vmf->page);
if (error) {
- page_cache_release(result);
- result = NULL;
- goto out;
+ /*
+ * VM_FAULT_LOCKED should always be the case for
+ * filemap_fault, but it may not be in a future
+ * implementation.
+ */
+ if (ret & VM_FAULT_LOCKED)
+ unlock_page(vmf->page);
+ page_cache_release(vmf->page);
+ ret = VM_FAULT_OOM;
+ goto out_unlock;
}
- set_page_dirty(result);
+ set_page_dirty(vmf->page);
}
-out:
+out_unlock:
gfs2_glock_dq_uninit(&i_gh);
-
- return result;
+out:
+ return ret;
}
struct vm_operations_struct gfs2_vm_ops_private = {
- .nopage = gfs2_private_nopage,
+ .fault = gfs2_private_fault,
};
struct vm_operations_struct gfs2_vm_ops_sharewrite = {
- .nopage = gfs2_sharewrite_nopage,
+ .fault = gfs2_sharewrite_fault,
};
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index 6e546ee..addb51e 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -70,6 +70,7 @@ struct gfs2_quota_host {
u64 qu_limit;
u64 qu_warn;
s64 qu_value;
+ u32 qu_ll_next;
};
struct gfs2_quota_change_host {
@@ -580,6 +581,7 @@ static void gfs2_quota_in(struct gfs2_quota_host *qu, const void *buf)
qu->qu_limit = be64_to_cpu(str->qu_limit);
qu->qu_warn = be64_to_cpu(str->qu_warn);
qu->qu_value = be64_to_cpu(str->qu_value);
+ qu->qu_ll_next = be32_to_cpu(str->qu_ll_next);
}
static void gfs2_quota_out(const struct gfs2_quota_host *qu, void *buf)
@@ -589,6 +591,7 @@ static void gfs2_quota_out(const struct gfs2_quota_host *qu, void *buf)
str->qu_limit = cpu_to_be64(qu->qu_limit);
str->qu_warn = cpu_to_be64(qu->qu_warn);
str->qu_value = cpu_to_be64(qu->qu_value);
+ str->qu_ll_next = cpu_to_be32(qu->qu_ll_next);
memset(&str->qu_reserved, 0, sizeof(str->qu_reserved));
}
@@ -614,6 +617,16 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
s64 value;
int err = -EIO;
+ if (gfs2_is_stuffed(ip)) {
+ struct gfs2_alloc *al = NULL;
+ al = gfs2_alloc_get(ip);
+ /* just request 1 blk */
+ al->al_requested = 1;
+ gfs2_inplace_reserve(ip);
+ gfs2_unstuff_dinode(ip, NULL);
+ gfs2_inplace_release(ip);
+ gfs2_alloc_put(ip);
+ }
page = grab_cache_page(mapping, index);
if (!page)
return -ENOMEM;
diff --git a/fs/gfs2/recovery.c b/fs/gfs2/recovery.c
index 5ada38c..beb6c7a 100644
--- a/fs/gfs2/recovery.c
+++ b/fs/gfs2/recovery.c
@@ -469,7 +469,7 @@ int gfs2_recover_journal(struct gfs2_jdesc *jd)
};
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED,
- LM_FLAG_NOEXP, &ji_gh);
+ LM_FLAG_NOEXP | GL_NOCACHE, &ji_gh);
if (error)
goto fail_gunlock_j;
} else {
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index e4e0406..708c287 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -31,6 +31,7 @@
#include "inode.h"
#define BFITNOENT ((u32)~0)
+#define NO_BLOCK ((u64)~0)
/*
* These routines are used by the resource group routines (rgrp.c)
@@ -116,8 +117,7 @@ static unsigned char gfs2_testbit(struct gfs2_rgrpd *rgd, unsigned char *buffer,
* @buffer: the buffer that holds the bitmaps
* @buflen: the length (in bytes) of the buffer
* @goal: start search at this block's bit-pair (within @buffer)
- * @old_state: GFS2_BLKST_XXX the state of the block we're looking for;
- * bit 0 = alloc(1)/free(0), bit 1 = meta(1)/data(0)
+ * @old_state: GFS2_BLKST_XXX the state of the block we're looking for.
*
* Scope of @goal and returned block number is only within this bitmap buffer,
* not entire rgrp or filesystem. @buffer will be offset from the actual
@@ -137,9 +137,13 @@ static u32 gfs2_bitfit(struct gfs2_rgrpd *rgd, unsigned char *buffer,
byte = buffer + (goal / GFS2_NBBY);
bit = (goal % GFS2_NBBY) * GFS2_BIT_SIZE;
end = buffer + buflen;
- alloc = (old_state & 1) ? 0 : 0x55;
+ alloc = (old_state == GFS2_BLKST_FREE) ? 0x55 : 0;
while (byte < end) {
+ /* If we're looking for a free block we can eliminate all
+ bitmap settings with 0x55, which represents four data
+ blocks in a row. If we're looking for a data block, we can
+ eliminate 0x00 which corresponds to four free blocks. */
if ((*byte & 0x55) == alloc) {
blk += (8 - bit) >> 1;
@@ -859,20 +863,28 @@ static int try_rgrp_fit(struct gfs2_rgrpd *rgd, struct gfs2_alloc *al)
static struct inode *try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked)
{
struct inode *inode;
- u32 goal = 0;
+ u32 goal = 0, block;
u64 no_addr;
+ struct gfs2_sbd *sdp = rgd->rd_sbd;
for(;;) {
- goal = rgblk_search(rgd, goal, GFS2_BLKST_UNLINKED,
- GFS2_BLKST_UNLINKED);
- if (goal == 0)
- return 0;
- no_addr = goal + rgd->rd_data0;
- if (no_addr <= *last_unlinked)
+ if (goal >= rgd->rd_data)
+ break;
+ down_write(&sdp->sd_log_flush_lock);
+ block = rgblk_search(rgd, goal, GFS2_BLKST_UNLINKED,
+ GFS2_BLKST_UNLINKED);
+ up_write(&sdp->sd_log_flush_lock);
+ if (block == BFITNOENT)
+ break;
+ /* rgblk_search can return a block < goal, so we need to
+ keep it marching forward. */
+ no_addr = block + rgd->rd_data0;
+ goal++;
+ if (*last_unlinked != NO_BLOCK && no_addr <= *last_unlinked)
continue;
*last_unlinked = no_addr;
inode = gfs2_inode_lookup(rgd->rd_sbd->sd_vfs, DT_UNKNOWN,
- no_addr, -1);
+ no_addr, -1, 1);
if (!IS_ERR(inode))
return inode;
}
@@ -1149,7 +1161,7 @@ int gfs2_inplace_reserve_i(struct gfs2_inode *ip, char *file, unsigned int line)
struct gfs2_alloc *al = &ip->i_alloc;
struct inode *inode;
int error = 0;
- u64 last_unlinked = 0;
+ u64 last_unlinked = NO_BLOCK;
if (gfs2_assert_warn(sdp, al->al_requested))
return -EINVAL;
@@ -1286,7 +1298,9 @@ static u32 rgblk_search(struct gfs2_rgrpd *rgd, u32 goal,
allocatable block anywhere else, we want to be able wrap around and
search in the first part of our first-searched bit block. */
for (x = 0; x <= length; x++) {
- if (bi->bi_clone)
+ /* The GFS2_BLKST_UNLINKED state doesn't apply to the clone
+ bitmaps, so we must search the originals for that. */
+ if (old_state != GFS2_BLKST_UNLINKED && bi->bi_clone)
blk = gfs2_bitfit(rgd, bi->bi_clone + bi->bi_offset,
bi->bi_len, goal, old_state);
else
@@ -1302,9 +1316,7 @@ static u32 rgblk_search(struct gfs2_rgrpd *rgd, u32 goal,
goal = 0;
}
- if (old_state != new_state) {
- gfs2_assert_withdraw(rgd->rd_sbd, blk != BFITNOENT);
-
+ if (blk != BFITNOENT && old_state != new_state) {
gfs2_trans_add_bh(rgd->rd_gl, bi->bi_bh, 1);
gfs2_setbit(rgd, bi->bi_bh->b_data + bi->bi_offset,
bi->bi_len, blk, new_state);
@@ -1313,7 +1325,7 @@ static u32 rgblk_search(struct gfs2_rgrpd *rgd, u32 goal,
bi->bi_len, blk, new_state);
}
- return (blk == BFITNOENT) ? 0 : (bi->bi_start * GFS2_NBBY) + blk;
+ return (blk == BFITNOENT) ? blk : (bi->bi_start * GFS2_NBBY) + blk;
}
/**
@@ -1393,6 +1405,7 @@ u64 gfs2_alloc_data(struct gfs2_inode *ip)
goal = rgd->rd_last_alloc_data;
blk = rgblk_search(rgd, goal, GFS2_BLKST_FREE, GFS2_BLKST_USED);
+ BUG_ON(blk == BFITNOENT);
rgd->rd_last_alloc_data = blk;
block = rgd->rd_data0 + blk;
@@ -1437,6 +1450,7 @@ u64 gfs2_alloc_meta(struct gfs2_inode *ip)
goal = rgd->rd_last_alloc_meta;
blk = rgblk_search(rgd, goal, GFS2_BLKST_FREE, GFS2_BLKST_USED);
+ BUG_ON(blk == BFITNOENT);
rgd->rd_last_alloc_meta = blk;
block = rgd->rd_data0 + blk;
@@ -1478,6 +1492,7 @@ u64 gfs2_alloc_di(struct gfs2_inode *dip, u64 *generation)
blk = rgblk_search(rgd, rgd->rd_last_alloc_meta,
GFS2_BLKST_FREE, GFS2_BLKST_DINODE);
+ BUG_ON(blk == BFITNOENT);
rgd->rd_last_alloc_meta = blk;
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index f916b97..dd3e737 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -58,7 +58,6 @@ void gfs2_tune_init(struct gfs2_tune *gt)
gt->gt_incore_log_blocks = 1024;
gt->gt_log_flush_secs = 60;
gt->gt_jindex_refresh_secs = 60;
- gt->gt_scand_secs = 15;
gt->gt_recoverd_secs = 60;
gt->gt_logd_secs = 1;
gt->gt_quotad_secs = 5;
@@ -160,18 +159,15 @@ int gfs2_check_sb(struct gfs2_sbd *sdp, struct gfs2_sb_host *sb, int silent)
}
-static int end_bio_io_page(struct bio *bio, unsigned int bytes_done, int error)
+static void end_bio_io_page(struct bio *bio, int error)
{
struct page *page = bio->bi_private;
- if (bio->bi_size)
- return 1;
if (!error)
SetPageUptodate(page);
else
printk(KERN_WARNING "gfs2: error %d reading superblock\n", error);
unlock_page(page);
- return 0;
}
static void gfs2_sb_in(struct gfs2_sb_host *sb, const void *buf)
diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
index c26c21b..06e0b77 100644
--- a/fs/gfs2/sys.c
+++ b/fs/gfs2/sys.c
@@ -222,7 +222,6 @@ static struct kobj_type gfs2_ktype = {
};
static struct kset gfs2_kset = {
- .kobj = {.name = "gfs2"},
.ktype = &gfs2_ktype,
};
@@ -442,7 +441,6 @@ TUNE_ATTR(quota_simul_sync, 1);
TUNE_ATTR(quota_cache_secs, 1);
TUNE_ATTR(stall_secs, 1);
TUNE_ATTR(statfs_quantum, 1);
-TUNE_ATTR_DAEMON(scand_secs, scand_process);
TUNE_ATTR_DAEMON(recoverd_secs, recoverd_process);
TUNE_ATTR_DAEMON(logd_secs, logd_process);
TUNE_ATTR_DAEMON(quotad_secs, quotad_process);
@@ -464,7 +462,6 @@ static struct attribute *tune_attrs[] = {
&tune_attr_quota_cache_secs.attr,
&tune_attr_stall_secs.attr,
&tune_attr_statfs_quantum.attr,
- &tune_attr_scand_secs.attr,
&tune_attr_recoverd_secs.attr,
&tune_attr_logd_secs.attr,
&tune_attr_quotad_secs.attr,
@@ -553,6 +550,7 @@ int gfs2_sys_init(void)
{
gfs2_sys_margs = NULL;
spin_lock_init(&gfs2_sys_margs_lock);
+ kobject_set_name(&gfs2_kset.kobj, "gfs2");
kobj_set_kset_s(&gfs2_kset, fs_subsys);
return kset_register(&gfs2_kset);
}
diff --git a/fs/gfs2/trans.c b/fs/gfs2/trans.c
index f8dabf8..717983e 100644
--- a/fs/gfs2/trans.c
+++ b/fs/gfs2/trans.c
@@ -142,25 +142,25 @@ void gfs2_trans_add_bh(struct gfs2_glock *gl, struct buffer_head *bh, int meta)
lops_add(sdp, &bd->bd_le);
}
-void gfs2_trans_add_revoke(struct gfs2_sbd *sdp, u64 blkno)
+void gfs2_trans_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
{
- struct gfs2_revoke *rv = kmalloc(sizeof(struct gfs2_revoke),
- GFP_NOFS | __GFP_NOFAIL);
- lops_init_le(&rv->rv_le, &gfs2_revoke_lops);
- rv->rv_blkno = blkno;
- lops_add(sdp, &rv->rv_le);
+ BUG_ON(!list_empty(&bd->bd_le.le_list));
+ BUG_ON(!list_empty(&bd->bd_ail_st_list));
+ BUG_ON(!list_empty(&bd->bd_ail_gl_list));
+ lops_init_le(&bd->bd_le, &gfs2_revoke_lops);
+ lops_add(sdp, &bd->bd_le);
}
void gfs2_trans_add_unrevoke(struct gfs2_sbd *sdp, u64 blkno)
{
- struct gfs2_revoke *rv;
+ struct gfs2_bufdata *bd;
int found = 0;
gfs2_log_lock(sdp);
- list_for_each_entry(rv, &sdp->sd_log_le_revoke, rv_le.le_list) {
- if (rv->rv_blkno == blkno) {
- list_del(&rv->rv_le.le_list);
+ list_for_each_entry(bd, &sdp->sd_log_le_revoke, bd_le.le_list) {
+ if (bd->bd_blkno == blkno) {
+ list_del_init(&bd->bd_le.le_list);
gfs2_assert_withdraw(sdp, sdp->sd_log_num_revoke);
sdp->sd_log_num_revoke--;
found = 1;
@@ -172,7 +172,7 @@ void gfs2_trans_add_unrevoke(struct gfs2_sbd *sdp, u64 blkno)
if (found) {
struct gfs2_trans *tr = current->journal_info;
- kfree(rv);
+ kmem_cache_free(gfs2_bufdata_cachep, bd);
tr->tr_num_revoke_rm++;
}
}
diff --git a/fs/gfs2/trans.h b/fs/gfs2/trans.h
index 23d4cbe..043d5f4 100644
--- a/fs/gfs2/trans.h
+++ b/fs/gfs2/trans.h
@@ -32,7 +32,7 @@ void gfs2_trans_end(struct gfs2_sbd *sdp);
void gfs2_trans_add_gl(struct gfs2_glock *gl);
void gfs2_trans_add_bh(struct gfs2_glock *gl, struct buffer_head *bh, int meta);
-void gfs2_trans_add_revoke(struct gfs2_sbd *sdp, u64 blkno);
+void gfs2_trans_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd);
void gfs2_trans_add_unrevoke(struct gfs2_sbd *sdp, u64 blkno);
void gfs2_trans_add_rg(struct gfs2_rgrpd *rgd);
diff --git a/fs/hfs/super.c b/fs/hfs/super.c
index 92cf875..6c5f92d 100644
--- a/fs/hfs/super.c
+++ b/fs/hfs/super.c
@@ -443,7 +443,7 @@ static int __init init_hfs_fs(void)
hfs_inode_cachep = kmem_cache_create("hfs_inode_cache",
sizeof(struct hfs_inode_info), 0, SLAB_HWCACHE_ALIGN,
- hfs_init_once, NULL);
+ hfs_init_once);
if (!hfs_inode_cachep)
return -ENOMEM;
err = register_filesystem(&hfs_fs_type);
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
index 6d87a2a..7b0f2e5 100644
--- a/fs/hfsplus/super.c
+++ b/fs/hfsplus/super.c
@@ -479,7 +479,7 @@ static int __init init_hfsplus_fs(void)
hfsplus_inode_cachep = kmem_cache_create("hfsplus_icache",
HFSPLUS_INODE_SIZE, 0, SLAB_HWCACHE_ALIGN,
- hfsplus_init_once, NULL);
+ hfsplus_init_once);
if (!hfsplus_inode_cachep)
return -ENOMEM;
err = register_filesystem(&hfsplus_fs_type);
diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c
index 29cc34a..89612ee 100644
--- a/fs/hpfs/super.c
+++ b/fs/hpfs/super.c
@@ -181,14 +181,14 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
mutex_init(&ei->i_parent_mutex);
inode_init_once(&ei->vfs_inode);
}
-
+
static int init_inodecache(void)
{
hpfs_inode_cachep = kmem_cache_create("hpfs_inode_cache",
sizeof(struct hpfs_inode_info),
0, (SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD),
- init_once, NULL);
+ init_once);
if (hpfs_inode_cachep == NULL)
return -ENOMEM;
return 0;
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index d145cb7..950c2fb 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -82,14 +82,19 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
int ret;
/*
- * vma alignment has already been checked by prepare_hugepage_range.
- * If you add any error returns here, do so after setting VM_HUGETLB,
- * so is_vm_hugetlb_page tests below unmap_region go the right way
- * when do_mmap_pgoff unwinds (may be important on powerpc and ia64).
+ * vma address alignment (but not the pgoff alignment) has
+ * already been checked by prepare_hugepage_range. If you add
+ * any error returns here, do so after setting VM_HUGETLB, so
+ * is_vm_hugetlb_page tests below unmap_region go the right
+ * way when do_mmap_pgoff unwinds (may be important on powerpc
+ * and ia64).
*/
vma->vm_flags |= VM_HUGETLB | VM_RESERVED;
vma->vm_ops = &hugetlb_vm_ops;
+ if (vma->vm_pgoff & ~(HPAGE_MASK >> PAGE_SHIFT))
+ return -EINVAL;
+
vma_len = (loff_t)(vma->vm_end - vma->vm_start);
mutex_lock(&inode->i_mutex);
@@ -132,7 +137,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
return -ENOMEM;
if (flags & MAP_FIXED) {
- if (prepare_hugepage_range(addr, len, pgoff))
+ if (prepare_hugepage_range(addr, len))
return -EINVAL;
return addr;
}
@@ -848,7 +853,7 @@ static int __init init_hugetlbfs_fs(void)
hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache",
sizeof(struct hugetlbfs_inode_info),
- 0, 0, init_once, NULL);
+ 0, 0, init_once);
if (hugetlbfs_inode_cachep == NULL)
return -ENOMEM;
diff --git a/fs/inode.c b/fs/inode.c
index 320e088..29f5068 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -1388,8 +1388,7 @@ void __init inode_init(unsigned long mempages)
0,
(SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
SLAB_MEM_SPREAD),
- init_once,
- NULL);
+ init_once);
register_shrinker(&icache_shrinker);
/* Hash may have been set up in inode_init_early */
diff --git a/fs/inotify_user.c b/fs/inotify_user.c
index 9f2224f..9bf2f6c 100644
--- a/fs/inotify_user.c
+++ b/fs/inotify_user.c
@@ -716,10 +716,10 @@ static int __init inotify_user_setup(void)
watch_cachep = kmem_cache_create("inotify_watch_cache",
sizeof(struct inotify_user_watch),
- 0, SLAB_PANIC, NULL, NULL);
+ 0, SLAB_PANIC, NULL);
event_cachep = kmem_cache_create("inotify_event_cache",
sizeof(struct inotify_kernel_event),
- 0, SLAB_PANIC, NULL, NULL);
+ 0, SLAB_PANIC, NULL);
return 0;
}
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index 4f5418b..043b470 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -86,7 +86,7 @@ static int init_inodecache(void)
sizeof(struct iso_inode_info),
0, (SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD),
- init_once, NULL);
+ init_once);
if (isofs_inode_cachep == NULL)
return -ENOMEM;
return 0;
@@ -846,6 +846,15 @@ root_found:
goto out_no_root;
if (!inode->i_op)
goto out_bad_root;
+
+ /* Make sure the root inode is a directory */
+ if (!S_ISDIR(inode->i_mode)) {
+ printk(KERN_WARNING
+ "isofs_fill_super: root inode is not a directory. "
+ "Corrupted media?\n");
+ goto out_iput;
+ }
+
/* get the root dentry */
s->s_root = d_alloc_root(inode);
if (!(s->s_root))
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
index 46fe743..06ab3c1 100644
--- a/fs/jbd/journal.c
+++ b/fs/jbd/journal.c
@@ -1668,7 +1668,7 @@ static int journal_create_jbd_slab(size_t slab_size)
* boundary.
*/
jbd_slab[i] = kmem_cache_create(jbd_slab_names[i],
- slab_size, slab_size, 0, NULL, NULL);
+ slab_size, slab_size, 0, NULL);
if (!jbd_slab[i]) {
printk(KERN_EMERG "JBD: no memory for jbd_slab cache\n");
return -ENOMEM;
@@ -1711,8 +1711,7 @@ static int journal_init_journal_head_cache(void)
sizeof(struct journal_head),
0, /* offset */
0, /* flags */
- NULL, /* ctor */
- NULL); /* dtor */
+ NULL); /* ctor */
retval = 0;
if (journal_head_cache == 0) {
retval = -ENOMEM;
@@ -2008,8 +2007,7 @@ static int __init journal_init_handle_cache(void)
sizeof(handle_t),
0, /* offset */
0, /* flags */
- NULL, /* ctor */
- NULL); /* dtor */
+ NULL); /* ctor */
if (jbd_handle_cache == NULL) {
printk(KERN_EMERG "JBD: failed to create handle cache\n");
return -ENOMEM;
diff --git a/fs/jbd/revoke.c b/fs/jbd/revoke.c
index 8db2fa2..62e13c8 100644
--- a/fs/jbd/revoke.c
+++ b/fs/jbd/revoke.c
@@ -170,13 +170,13 @@ int __init journal_init_revoke_caches(void)
{
revoke_record_cache = kmem_cache_create("revoke_record",
sizeof(struct jbd_revoke_record_s),
- 0, SLAB_HWCACHE_ALIGN, NULL, NULL);
+ 0, SLAB_HWCACHE_ALIGN, NULL);
if (revoke_record_cache == 0)
return -ENOMEM;
revoke_table_cache = kmem_cache_create("revoke_table",
sizeof(struct jbd_revoke_table_s),
- 0, 0, NULL, NULL);
+ 0, 0, NULL);
if (revoke_table_cache == 0) {
kmem_cache_destroy(revoke_record_cache);
revoke_record_cache = NULL;
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 78d63b8..f37324a 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -35,6 +35,7 @@
#include <linux/kthread.h>
#include <linux/poison.h>
#include <linux/proc_fs.h>
+#include <linux/debugfs.h>
#include <asm/uaccess.h>
#include <asm/page.h>
@@ -528,7 +529,7 @@ int jbd2_log_wait_commit(journal_t *journal, tid_t tid)
{
int err = 0;
-#ifdef CONFIG_JBD_DEBUG
+#ifdef CONFIG_JBD2_DEBUG
spin_lock(&journal->j_state_lock);
if (!tid_geq(journal->j_commit_request, tid)) {
printk(KERN_EMERG
@@ -1679,7 +1680,7 @@ static int jbd2_journal_create_jbd_slab(size_t slab_size)
* boundary.
*/
jbd_slab[i] = kmem_cache_create(jbd_slab_names[i],
- slab_size, slab_size, 0, NULL, NULL);
+ slab_size, slab_size, 0, NULL);
if (!jbd_slab[i]) {
printk(KERN_EMERG "JBD: no memory for jbd_slab cache\n");
return -ENOMEM;
@@ -1709,7 +1710,7 @@ void jbd2_slab_free(void *ptr, size_t size)
* Journal_head storage management
*/
static struct kmem_cache *jbd2_journal_head_cache;
-#ifdef CONFIG_JBD_DEBUG
+#ifdef CONFIG_JBD2_DEBUG
static atomic_t nr_journal_heads = ATOMIC_INIT(0);
#endif
@@ -1722,8 +1723,7 @@ static int journal_init_jbd2_journal_head_cache(void)
sizeof(struct journal_head),
0, /* offset */
0, /* flags */
- NULL, /* ctor */
- NULL); /* dtor */
+ NULL); /* ctor */
retval = 0;
if (jbd2_journal_head_cache == 0) {
retval = -ENOMEM;
@@ -1747,7 +1747,7 @@ static struct journal_head *journal_alloc_journal_head(void)
struct journal_head *ret;
static unsigned long last_warning;
-#ifdef CONFIG_JBD_DEBUG
+#ifdef CONFIG_JBD2_DEBUG
atomic_inc(&nr_journal_heads);
#endif
ret = kmem_cache_alloc(jbd2_journal_head_cache, GFP_NOFS);
@@ -1768,7 +1768,7 @@ static struct journal_head *journal_alloc_journal_head(void)
static void journal_free_journal_head(struct journal_head *jh)
{
-#ifdef CONFIG_JBD_DEBUG
+#ifdef CONFIG_JBD2_DEBUG
atomic_dec(&nr_journal_heads);
memset(jh, JBD_POISON_FREE, sizeof(*jh));
#endif
@@ -1951,64 +1951,50 @@ void jbd2_journal_put_journal_head(struct journal_head *jh)
}
/*
- * /proc tunables
+ * debugfs tunables
*/
-#if defined(CONFIG_JBD_DEBUG)
-int jbd2_journal_enable_debug;
+#if defined(CONFIG_JBD2_DEBUG)
+u8 jbd2_journal_enable_debug;
EXPORT_SYMBOL(jbd2_journal_enable_debug);
#endif
-#if defined(CONFIG_JBD_DEBUG) && defined(CONFIG_PROC_FS)
+#if defined(CONFIG_JBD2_DEBUG) && defined(CONFIG_DEBUG_FS)
-static struct proc_dir_entry *proc_jbd_debug;
+#define JBD2_DEBUG_NAME "jbd2-debug"
-static int read_jbd_debug(char *page, char **start, off_t off,
- int count, int *eof, void *data)
-{
- int ret;
+struct dentry *jbd2_debugfs_dir, *jbd2_debug;
- ret = sprintf(page + off, "%d\n", jbd2_journal_enable_debug);
- *eof = 1;
- return ret;
+static void __init jbd2_create_debugfs_entry(void)
+{
+ jbd2_debugfs_dir = debugfs_create_dir("jbd2", NULL);
+ if (jbd2_debugfs_dir)
+ jbd2_debug = debugfs_create_u8(JBD2_DEBUG_NAME, S_IRUGO,
+ jbd2_debugfs_dir,
+ &jbd2_journal_enable_debug);
}
-static int write_jbd_debug(struct file *file, const char __user *buffer,
- unsigned long count, void *data)
+static void __exit jbd2_remove_debugfs_entry(void)
{
- char buf[32];
-
- if (count > ARRAY_SIZE(buf) - 1)
- count = ARRAY_SIZE(buf) - 1;
- if (copy_from_user(buf, buffer, count))
- return -EFAULT;
- buf[ARRAY_SIZE(buf) - 1] = '\0';
- jbd2_journal_enable_debug = simple_strtoul(buf, NULL, 10);
- return count;
+ if (jbd2_debug)
+ debugfs_remove(jbd2_debug);
+ if (jbd2_debugfs_dir)
+ debugfs_remove(jbd2_debugfs_dir);
}
-#define JBD_PROC_NAME "sys/fs/jbd2-debug"
+#else
-static void __init create_jbd_proc_entry(void)
+static void __init jbd2_create_debugfs_entry(void)
{
- proc_jbd_debug = create_proc_entry(JBD_PROC_NAME, 0644, NULL);
- if (proc_jbd_debug) {
- /* Why is this so hard? */
- proc_jbd_debug->read_proc = read_jbd_debug;
- proc_jbd_debug->write_proc = write_jbd_debug;
- }
+ do {
+ } while (0);
}
-static void __exit jbd2_remove_jbd_proc_entry(void)
+static void __exit jbd2_remove_debugfs_entry(void)
{
- if (proc_jbd_debug)
- remove_proc_entry(JBD_PROC_NAME, NULL);
+ do {
+ } while (0);
}
-#else
-
-#define create_jbd_proc_entry() do {} while (0)
-#define jbd2_remove_jbd_proc_entry() do {} while (0)
-
#endif
struct kmem_cache *jbd2_handle_cache;
@@ -2019,8 +2005,7 @@ static int __init journal_init_handle_cache(void)
sizeof(handle_t),
0, /* offset */
0, /* flags */
- NULL, /* ctor */
- NULL); /* dtor */
+ NULL); /* ctor */
if (jbd2_handle_cache == NULL) {
printk(KERN_EMERG "JBD: failed to create handle cache\n");
return -ENOMEM;
@@ -2067,18 +2052,18 @@ static int __init journal_init(void)
ret = journal_init_caches();
if (ret != 0)
jbd2_journal_destroy_caches();
- create_jbd_proc_entry();
+ jbd2_create_debugfs_entry();
return ret;
}
static void __exit journal_exit(void)
{
-#ifdef CONFIG_JBD_DEBUG
+#ifdef CONFIG_JBD2_DEBUG
int n = atomic_read(&nr_journal_heads);
if (n)
printk(KERN_EMERG "JBD: leaked %d journal_heads!\n", n);
#endif
- jbd2_remove_jbd_proc_entry();
+ jbd2_remove_debugfs_entry();
jbd2_journal_destroy_caches();
}
diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c
index 395c92a..b50be8a 100644
--- a/fs/jbd2/recovery.c
+++ b/fs/jbd2/recovery.c
@@ -251,10 +251,10 @@ int jbd2_journal_recover(journal_t *journal)
if (!err)
err = do_one_pass(journal, &info, PASS_REPLAY);
- jbd_debug(0, "JBD: recovery, exit status %d, "
+ jbd_debug(1, "JBD: recovery, exit status %d, "
"recovered transactions %u to %u\n",
err, info.start_transaction, info.end_transaction);
- jbd_debug(0, "JBD: Replayed %d and revoked %d/%d blocks\n",
+ jbd_debug(1, "JBD: Replayed %d and revoked %d/%d blocks\n",
info.nr_replays, info.nr_revoke_hits, info.nr_revokes);
/* Restart the log at the next transaction ID, thus invalidating
@@ -295,10 +295,10 @@ int jbd2_journal_skip_recovery(journal_t *journal)
printk(KERN_ERR "JBD: error %d scanning journal\n", err);
++journal->j_transaction_sequence;
} else {
-#ifdef CONFIG_JBD_DEBUG
+#ifdef CONFIG_JBD2_DEBUG
int dropped = info.end_transaction - be32_to_cpu(sb->s_sequence);
#endif
- jbd_debug(0,
+ jbd_debug(1,
"JBD: ignoring %d transaction%s from the journal.\n",
dropped, (dropped == 1) ? "" : "s");
journal->j_transaction_sequence = ++info.end_transaction;
diff --git a/fs/jbd2/revoke.c b/fs/jbd2/revoke.c
index 28cac04..01d8897 100644
--- a/fs/jbd2/revoke.c
+++ b/fs/jbd2/revoke.c
@@ -171,13 +171,13 @@ int __init jbd2_journal_init_revoke_caches(void)
{
jbd2_revoke_record_cache = kmem_cache_create("jbd2_revoke_record",
sizeof(struct jbd2_revoke_record_s),
- 0, SLAB_HWCACHE_ALIGN, NULL, NULL);
+ 0, SLAB_HWCACHE_ALIGN, NULL);
if (jbd2_revoke_record_cache == 0)
return -ENOMEM;
jbd2_revoke_table_cache = kmem_cache_create("jbd2_revoke_table",
sizeof(struct jbd2_revoke_table_s),
- 0, 0, NULL, NULL);
+ 0, 0, NULL);
if (jbd2_revoke_table_cache == 0) {
kmem_cache_destroy(jbd2_revoke_record_cache);
jbd2_revoke_record_cache = NULL;
diff --git a/fs/jffs2/background.c b/fs/jffs2/background.c
index 143c553..504643f 100644
--- a/fs/jffs2/background.c
+++ b/fs/jffs2/background.c
@@ -84,7 +84,7 @@ static int jffs2_garbage_collect_thread(void *_c)
set_freezable();
for (;;) {
allow_signal(SIGHUP);
-
+ again:
if (!jffs2_thread_should_wake(c)) {
set_current_state (TASK_INTERRUPTIBLE);
D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread sleeping...\n"));
@@ -95,9 +95,6 @@ static int jffs2_garbage_collect_thread(void *_c)
schedule();
}
- if (try_to_freeze())
- continue;
-
/* This thread is purely an optimisation. But if it runs when
other things could be running, it actually makes things a
lot worse. Use yield() and put it at the back of the runqueue
@@ -112,6 +109,9 @@ static int jffs2_garbage_collect_thread(void *_c)
siginfo_t info;
unsigned long signr;
+ if (try_to_freeze())
+ goto again;
+
signr = dequeue_signal_lock(current, &current->blocked, &info);
switch(signr) {
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
index 1d3b7a9..8bc727b 100644
--- a/fs/jffs2/fs.c
+++ b/fs/jffs2/fs.c
@@ -627,7 +627,7 @@ unsigned char *jffs2_gc_fetch_page(struct jffs2_sb_info *c,
struct inode *inode = OFNI_EDONI_2SFFJ(f);
struct page *pg;
- pg = read_cache_page(inode->i_mapping, offset >> PAGE_CACHE_SHIFT,
+ pg = read_cache_page_async(inode->i_mapping, offset >> PAGE_CACHE_SHIFT,
(void *)jffs2_do_readpage_unlock, inode);
if (IS_ERR(pg))
return (void *)pg;
diff --git a/fs/jffs2/malloc.c b/fs/jffs2/malloc.c
index 35c1a5e..f921125 100644
--- a/fs/jffs2/malloc.c
+++ b/fs/jffs2/malloc.c
@@ -33,56 +33,56 @@ int __init jffs2_create_slab_caches(void)
{
full_dnode_slab = kmem_cache_create("jffs2_full_dnode",
sizeof(struct jffs2_full_dnode),
- 0, 0, NULL, NULL);
+ 0, 0, NULL);
if (!full_dnode_slab)
goto err;
raw_dirent_slab = kmem_cache_create("jffs2_raw_dirent",
sizeof(struct jffs2_raw_dirent),
- 0, 0, NULL, NULL);
+ 0, 0, NULL);
if (!raw_dirent_slab)
goto err;
raw_inode_slab = kmem_cache_create("jffs2_raw_inode",
sizeof(struct jffs2_raw_inode),
- 0, 0, NULL, NULL);
+ 0, 0, NULL);
if (!raw_inode_slab)
goto err;
tmp_dnode_info_slab = kmem_cache_create("jffs2_tmp_dnode",
sizeof(struct jffs2_tmp_dnode_info),
- 0, 0, NULL, NULL);
+ 0, 0, NULL);
if (!tmp_dnode_info_slab)
goto err;
raw_node_ref_slab = kmem_cache_create("jffs2_refblock",
sizeof(struct jffs2_raw_node_ref) * (REFS_PER_BLOCK + 1),
- 0, 0, NULL, NULL);
+ 0, 0, NULL);
if (!raw_node_ref_slab)
goto err;
node_frag_slab = kmem_cache_create("jffs2_node_frag",
sizeof(struct jffs2_node_frag),
- 0, 0, NULL, NULL);
+ 0, 0, NULL);
if (!node_frag_slab)
goto err;
inode_cache_slab = kmem_cache_create("jffs2_inode_cache",
sizeof(struct jffs2_inode_cache),
- 0, 0, NULL, NULL);
+ 0, 0, NULL);
if (!inode_cache_slab)
goto err;
#ifdef CONFIG_JFFS2_FS_XATTR
xattr_datum_cache = kmem_cache_create("jffs2_xattr_datum",
sizeof(struct jffs2_xattr_datum),
- 0, 0, NULL, NULL);
+ 0, 0, NULL);
if (!xattr_datum_cache)
goto err;
xattr_ref_cache = kmem_cache_create("jffs2_xattr_ref",
sizeof(struct jffs2_xattr_ref),
- 0, 0, NULL, NULL);
+ 0, 0, NULL);
if (!xattr_ref_cache)
goto err;
#endif
diff --git a/fs/jffs2/nodelist.h b/fs/jffs2/nodelist.h
index 25126a0..bc5509f 100644
--- a/fs/jffs2/nodelist.h
+++ b/fs/jffs2/nodelist.h
@@ -139,6 +139,11 @@ static inline struct jffs2_inode_cache *jffs2_raw_ref_to_ic(struct jffs2_raw_nod
#define ref_obsolete(ref) (((ref)->flash_offset & 3) == REF_OBSOLETE)
#define mark_ref_normal(ref) do { (ref)->flash_offset = ref_offset(ref) | REF_NORMAL; } while(0)
+/* Dirent nodes should be REF_PRISTINE only if they are not a deletion
+ dirent. Deletion dirents should be REF_NORMAL so that GC gets to
+ throw them away when appropriate */
+#define dirent_node_state(rd) ( (je32_to_cpu((rd)->ino)?REF_PRISTINE:REF_NORMAL) )
+
/* NB: REF_PRISTINE for an inode-less node (ref->next_in_ino == NULL) indicates
it is an unknown node of type JFFS2_NODETYPE_RWCOMPAT_COPY, so it'll get
copied. If you need to do anything different to GC inode-less nodes, then
diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c
index 7b36378..b5baa35 100644
--- a/fs/jffs2/readinode.c
+++ b/fs/jffs2/readinode.c
@@ -104,7 +104,7 @@ static int check_node_data(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info
if (crc != tn->data_crc) {
JFFS2_NOTICE("wrong data CRC in data node at 0x%08x: read %#08x, calculated %#08x.\n",
- ofs, tn->data_crc, crc);
+ ref_offset(ref), tn->data_crc, crc);
return 1;
}
@@ -613,7 +613,7 @@ static inline int read_direntry(struct jffs2_sb_info *c, struct jffs2_raw_node_r
jeb->unchecked_size -= len;
c->used_size += len;
c->unchecked_size -= len;
- ref->flash_offset = ref_offset(ref) | REF_PRISTINE;
+ ref->flash_offset = ref_offset(ref) | dirent_node_state(rd);
spin_unlock(&c->erase_completion_lock);
}
diff --git a/fs/jffs2/scan.c b/fs/jffs2/scan.c
index 2a1c976..6c75cd4 100644
--- a/fs/jffs2/scan.c
+++ b/fs/jffs2/scan.c
@@ -1049,7 +1049,8 @@ static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblo
return -ENOMEM;
}
- fd->raw = jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, PAD(je32_to_cpu(rd->totlen)), ic);
+ fd->raw = jffs2_link_node_ref(c, jeb, ofs | dirent_node_state(rd),
+ PAD(je32_to_cpu(rd->totlen)), ic);
fd->next = NULL;
fd->version = je32_to_cpu(rd->version);
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
index e220d3b..be2b70c2 100644
--- a/fs/jffs2/super.c
+++ b/fs/jffs2/super.c
@@ -192,7 +192,7 @@ static int __init init_jffs2_fs(void)
sizeof(struct jffs2_inode_info),
0, (SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD),
- jffs2_i_init_once, NULL);
+ jffs2_i_init_once);
if (!jffs2_inode_cachep) {
printk(KERN_ERR "JFFS2 error: Failed to initialise inode cache\n");
return -ENOMEM;
diff --git a/fs/jffs2/write.c b/fs/jffs2/write.c
index c9fe0ab..664c164 100644
--- a/fs/jffs2/write.c
+++ b/fs/jffs2/write.c
@@ -173,6 +173,12 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2
flash_ofs |= REF_NORMAL;
}
fn->raw = jffs2_add_physical_node_ref(c, flash_ofs, PAD(sizeof(*ri)+datalen), f->inocache);
+ if (IS_ERR(fn->raw)) {
+ void *hold_err = fn->raw;
+ /* Release the full_dnode which is now useless, and return */
+ jffs2_free_full_dnode(fn);
+ return ERR_PTR(PTR_ERR(hold_err));
+ }
fn->ofs = je32_to_cpu(ri->offset);
fn->size = je32_to_cpu(ri->dsize);
fn->frags = 0;
@@ -290,7 +296,14 @@ struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jff
return ERR_PTR(ret?ret:-EIO);
}
/* Mark the space used */
- fd->raw = jffs2_add_physical_node_ref(c, flash_ofs | REF_PRISTINE, PAD(sizeof(*rd)+namelen), f->inocache);
+ fd->raw = jffs2_add_physical_node_ref(c, flash_ofs | dirent_node_state(rd),
+ PAD(sizeof(*rd)+namelen), f->inocache);
+ if (IS_ERR(fd->raw)) {
+ void *hold_err = fd->raw;
+ /* Release the full_dirent which is now useless, and return */
+ jffs2_free_full_dirent(fd);
+ return ERR_PTR(PTR_ERR(hold_err));
+ }
if (retried) {
jffs2_dbg_acct_sanity_check(c,NULL);
@@ -553,6 +566,9 @@ int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f,
struct jffs2_full_dirent **prev = &dir_f->dents;
uint32_t nhash = full_name_hash(name, namelen);
+ /* We don't actually want to reserve any space, but we do
+ want to be holding the alloc_sem when we write to flash */
+ down(&c->alloc_sem);
down(&dir_f->sem);
while ((*prev) && (*prev)->nhash <= nhash) {
diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c
index de3e4a5..ccfd029 100644
--- a/fs/jfs/jfs_logmgr.c
+++ b/fs/jfs/jfs_logmgr.c
@@ -2162,7 +2162,7 @@ static void lbmStartIO(struct lbuf * bp)
/* check if journaling to disk has been disabled */
if (log->no_integrity) {
bio->bi_size = 0;
- lbmIODone(bio, 0, 0);
+ lbmIODone(bio, 0);
} else {
submit_bio(WRITE_SYNC, bio);
INCREMENT(lmStat.submitted);
@@ -2200,16 +2200,13 @@ static int lbmIOWait(struct lbuf * bp, int flag)
*
* executed at INTIODONE level
*/
-static int lbmIODone(struct bio *bio, unsigned int bytes_done, int error)
+static void lbmIODone(struct bio *bio, int error)
{
struct lbuf *bp = bio->bi_private;
struct lbuf *nextbp, *tail;
struct jfs_log *log;
unsigned long flags;
- if (bio->bi_size)
- return 1;
-
/*
* get back jfs buffer bound to the i/o buffer
*/
@@ -2237,8 +2234,6 @@ static int lbmIODone(struct bio *bio, unsigned int bytes_done, int error)
/* wakeup I/O initiator */
LCACHE_WAKEUP(&bp->l_ioevent);
-
- return 0;
}
/*
@@ -2263,7 +2258,6 @@ static int lbmIODone(struct bio *bio, unsigned int bytes_done, int error)
if (bp->l_flag & lbmDIRECT) {
LCACHE_WAKEUP(&bp->l_ioevent);
LCACHE_UNLOCK(flags);
- return 0;
}
tail = log->wqueue;
@@ -2342,8 +2336,6 @@ static int lbmIODone(struct bio *bio, unsigned int bytes_done, int error)
LCACHE_UNLOCK(flags); /* unlock+enable */
}
-
- return 0;
}
int jfsIOWait(void *arg)
diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c
index 77c7f11..941369c 100644
--- a/fs/jfs/jfs_metapage.c
+++ b/fs/jfs/jfs_metapage.c
@@ -213,7 +213,7 @@ int __init metapage_init(void)
* Allocate the metapage structures
*/
metapage_cache = kmem_cache_create("jfs_mp", sizeof(struct metapage),
- 0, 0, init_once, NULL);
+ 0, 0, init_once);
if (metapage_cache == NULL)
return -ENOMEM;
@@ -280,14 +280,10 @@ static void last_read_complete(struct page *page)
unlock_page(page);
}
-static int metapage_read_end_io(struct bio *bio, unsigned int bytes_done,
- int err)
+static void metapage_read_end_io(struct bio *bio, int err)
{
struct page *page = bio->bi_private;
- if (bio->bi_size)
- return 1;
-
if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) {
printk(KERN_ERR "metapage_read_end_io: I/O error\n");
SetPageError(page);
@@ -295,8 +291,6 @@ static int metapage_read_end_io(struct bio *bio, unsigned int bytes_done,
dec_io(page, last_read_complete);
bio_put(bio);
-
- return 0;
}
static void remove_from_logsync(struct metapage *mp)
@@ -341,23 +335,18 @@ static void last_write_complete(struct page *page)
end_page_writeback(page);
}
-static int metapage_write_end_io(struct bio *bio, unsigned int bytes_done,
- int err)
+static void metapage_write_end_io(struct bio *bio, int err)
{
struct page *page = bio->bi_private;
BUG_ON(!PagePrivate(page));
- if (bio->bi_size)
- return 1;
-
if (! test_bit(BIO_UPTODATE, &bio->bi_flags)) {
printk(KERN_ERR "metapage_write_end_io: I/O error\n");
SetPageError(page);
}
dec_io(page, last_write_complete);
bio_put(bio);
- return 0;
}
static int metapage_writepage(struct page *page, struct writeback_control *wbc)
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index 929fcec..4b372f5 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -776,7 +776,7 @@ static int __init init_jfs_fs(void)
jfs_inode_cachep =
kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
- init_once, NULL);
+ init_once);
if (jfs_inode_cachep == NULL)
return -ENOMEM;
diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
index b3efa45..d120ec39 100644
--- a/fs/lockd/svclock.c
+++ b/fs/lockd/svclock.c
@@ -171,19 +171,14 @@ found:
* GRANTED_RES message by cookie, without having to rely on the client's IP
* address. --okir
*/
-static inline struct nlm_block *
-nlmsvc_create_block(struct svc_rqst *rqstp, struct nlm_file *file,
- struct nlm_lock *lock, struct nlm_cookie *cookie)
+static struct nlm_block *
+nlmsvc_create_block(struct svc_rqst *rqstp, struct nlm_host *host,
+ struct nlm_file *file, struct nlm_lock *lock,
+ struct nlm_cookie *cookie)
{
struct nlm_block *block;
- struct nlm_host *host;
struct nlm_rqst *call = NULL;
- /* Create host handle for callback */
- host = nlmsvc_lookup_host(rqstp, lock->caller, lock->len);
- if (host == NULL)
- return NULL;
-
call = nlm_alloc_call(host);
if (call == NULL)
return NULL;
@@ -335,10 +330,10 @@ static void nlmsvc_freegrantargs(struct nlm_rqst *call)
/*
* Deferred lock request handling for non-blocking lock
*/
-static u32
+static __be32
nlmsvc_defer_lock_rqst(struct svc_rqst *rqstp, struct nlm_block *block)
{
- u32 status = nlm_lck_denied_nolocks;
+ __be32 status = nlm_lck_denied_nolocks;
block->b_flags |= B_QUEUED;
@@ -352,7 +347,7 @@ nlmsvc_defer_lock_rqst(struct svc_rqst *rqstp, struct nlm_block *block)
status = nlm_drop_reply;
}
dprintk("lockd: nlmsvc_defer_lock_rqst block %p flags %d status %d\n",
- block, block->b_flags, status);
+ block, block->b_flags, ntohl(status));
return status;
}
@@ -366,6 +361,7 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
struct nlm_lock *lock, int wait, struct nlm_cookie *cookie)
{
struct nlm_block *block = NULL;
+ struct nlm_host *host;
int error;
__be32 ret;
@@ -377,6 +373,10 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
(long long)lock->fl.fl_end,
wait);
+ /* Create host handle for callback */
+ host = nlmsvc_lookup_host(rqstp, lock->caller, lock->len);
+ if (host == NULL)
+ return nlm_lck_denied_nolocks;
/* Lock file against concurrent access */
mutex_lock(&file->f_mutex);
@@ -385,7 +385,8 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
*/
block = nlmsvc_lookup_block(file, lock);
if (block == NULL) {
- block = nlmsvc_create_block(rqstp, file, lock, cookie);
+ block = nlmsvc_create_block(rqstp, nlm_get_host(host), file,
+ lock, cookie);
ret = nlm_lck_denied_nolocks;
if (block == NULL)
goto out;
@@ -449,6 +450,7 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
out:
mutex_unlock(&file->f_mutex);
nlmsvc_release_block(block);
+ nlm_release_host(host);
dprintk("lockd: nlmsvc_lock returned %u\n", ret);
return ret;
}
@@ -477,10 +479,17 @@ nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file,
if (block == NULL) {
struct file_lock *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
+ struct nlm_host *host;
if (conf == NULL)
return nlm_granted;
- block = nlmsvc_create_block(rqstp, file, lock, cookie);
+ /* Create host handle for callback */
+ host = nlmsvc_lookup_host(rqstp, lock->caller, lock->len);
+ if (host == NULL) {
+ kfree(conf);
+ return nlm_lck_denied_nolocks;
+ }
+ block = nlmsvc_create_block(rqstp, host, file, lock, cookie);
if (block == NULL) {
kfree(conf);
return nlm_granted;
diff --git a/fs/locks.c b/fs/locks.c
index 431a8b8..c795eaa 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -458,22 +458,20 @@ static int lease_init(struct file *filp, int type, struct file_lock *fl)
}
/* Allocate a file_lock initialised to this type of lease */
-static int lease_alloc(struct file *filp, int type, struct file_lock **flp)
+static struct file_lock *lease_alloc(struct file *filp, int type)
{
struct file_lock *fl = locks_alloc_lock();
int error = -ENOMEM;
if (fl == NULL)
- goto out;
+ return ERR_PTR(error);
error = lease_init(filp, type, fl);
if (error) {
locks_free_lock(fl);
- fl = NULL;
+ return ERR_PTR(error);
}
-out:
- *flp = fl;
- return error;
+ return fl;
}
/* Check if two locks overlap each other.
@@ -661,7 +659,7 @@ static int locks_block_on_timeout(struct file_lock *blocker, struct file_lock *w
return result;
}
-int
+void
posix_test_lock(struct file *filp, struct file_lock *fl)
{
struct file_lock *cfl;
@@ -673,14 +671,12 @@ posix_test_lock(struct file *filp, struct file_lock *fl)
if (posix_locks_conflict(cfl, fl))
break;
}
- if (cfl) {
+ if (cfl)
__locks_copy_lock(fl, cfl);
- unlock_kernel();
- return 1;
- } else
+ else
fl->fl_type = F_UNLCK;
unlock_kernel();
- return 0;
+ return;
}
EXPORT_SYMBOL(posix_test_lock);
@@ -786,7 +782,7 @@ find_conflict:
if (request->fl_flags & FL_ACCESS)
goto out;
locks_copy_lock(new_fl, request);
- locks_insert_lock(&inode->i_flock, new_fl);
+ locks_insert_lock(before, new_fl);
new_fl = NULL;
error = 0;
@@ -1169,9 +1165,9 @@ static void time_out_leases(struct inode *inode)
* @inode: the inode of the file to return
* @mode: the open mode (read or write)
*
- * break_lease (inlined for speed) has checked there already
- * is a lease on this file. Leases are broken on a call to open()
- * or truncate(). This function can sleep unless you
+ * break_lease (inlined for speed) has checked there already is at least
+ * some kind of lock (maybe a lease) on this file. Leases are broken on
+ * a call to open() or truncate(). This function can sleep unless you
* specified %O_NONBLOCK to your open().
*/
int __break_lease(struct inode *inode, unsigned int mode)
@@ -1179,12 +1175,10 @@ int __break_lease(struct inode *inode, unsigned int mode)
int error = 0, future;
struct file_lock *new_fl, *flock;
struct file_lock *fl;
- int alloc_err;
unsigned long break_time;
int i_have_this_lease = 0;
- alloc_err = lease_alloc(NULL, mode & FMODE_WRITE ? F_WRLCK : F_RDLCK,
- &new_fl);
+ new_fl = lease_alloc(NULL, mode & FMODE_WRITE ? F_WRLCK : F_RDLCK);
lock_kernel();
@@ -1212,8 +1206,9 @@ int __break_lease(struct inode *inode, unsigned int mode)
goto out;
}
- if (alloc_err && !i_have_this_lease && ((mode & O_NONBLOCK) == 0)) {
- error = alloc_err;
+ if (IS_ERR(new_fl) && !i_have_this_lease
+ && ((mode & O_NONBLOCK) == 0)) {
+ error = PTR_ERR(new_fl);
goto out;
}
@@ -1260,7 +1255,7 @@ restart:
out:
unlock_kernel();
- if (!alloc_err)
+ if (!IS_ERR(new_fl))
locks_free_lock(new_fl);
return error;
}
@@ -1329,7 +1324,7 @@ int fcntl_getlease(struct file *filp)
}
/**
- * __setlease - sets a lease on an open file
+ * generic_setlease - sets a lease on an open file
* @filp: file pointer
* @arg: type of lease to obtain
* @flp: input - file_lock to use, output - file_lock inserted
@@ -1339,18 +1334,24 @@ int fcntl_getlease(struct file *filp)
*
* Called with kernel lock held.
*/
-static int __setlease(struct file *filp, long arg, struct file_lock **flp)
+int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
{
struct file_lock *fl, **before, **my_before = NULL, *lease;
struct dentry *dentry = filp->f_path.dentry;
struct inode *inode = dentry->d_inode;
int error, rdlease_count = 0, wrlease_count = 0;
+ if ((current->fsuid != inode->i_uid) && !capable(CAP_LEASE))
+ return -EACCES;
+ if (!S_ISREG(inode->i_mode))
+ return -EINVAL;
+ error = security_file_lock(filp, arg);
+ if (error)
+ return error;
+
time_out_leases(inode);
- error = -EINVAL;
- if (!flp || !(*flp) || !(*flp)->fl_lmops || !(*flp)->fl_lmops->fl_break)
- goto out;
+ BUG_ON(!(*flp)->fl_lmops->fl_break);
lease = *flp;
@@ -1418,39 +1419,49 @@ static int __setlease(struct file *filp, long arg, struct file_lock **flp)
out:
return error;
}
+EXPORT_SYMBOL(generic_setlease);
/**
- * setlease - sets a lease on an open file
+ * vfs_setlease - sets a lease on an open file
* @filp: file pointer
* @arg: type of lease to obtain
* @lease: file_lock to use
*
* Call this to establish a lease on the file.
- * The fl_lmops fl_break function is required by break_lease
+ * The (*lease)->fl_lmops->fl_break operation must be set; if not,
+ * break_lease will oops!
+ *
+ * This will call the filesystem's setlease file method, if
+ * defined. Note that there is no getlease method; instead, the
+ * filesystem setlease method should call back to setlease() to
+ * add a lease to the inode's lease list, where fcntl_getlease() can
+ * find it. Since fcntl_getlease() only reports whether the current
+ * task holds a lease, a cluster filesystem need only do this for
+ * leases held by processes on this node.
+ *
+ * There is also no break_lease method; filesystems that
+ * handle their own leases shoud break leases themselves from the
+ * filesystem's open, create, and (on truncate) setattr methods.
+ *
+ * Warning: the only current setlease methods exist only to disable
+ * leases in certain cases. More vfs changes may be required to
+ * allow a full filesystem lease implementation.
*/
-int setlease(struct file *filp, long arg, struct file_lock **lease)
+int vfs_setlease(struct file *filp, long arg, struct file_lock **lease)
{
- struct dentry *dentry = filp->f_path.dentry;
- struct inode *inode = dentry->d_inode;
int error;
- if ((current->fsuid != inode->i_uid) && !capable(CAP_LEASE))
- return -EACCES;
- if (!S_ISREG(inode->i_mode))
- return -EINVAL;
- error = security_file_lock(filp, arg);
- if (error)
- return error;
-
lock_kernel();
- error = __setlease(filp, arg, lease);
+ if (filp->f_op && filp->f_op->setlease)
+ error = filp->f_op->setlease(filp, arg, lease);
+ else
+ error = generic_setlease(filp, arg, lease);
unlock_kernel();
return error;
}
-
-EXPORT_SYMBOL(setlease);
+EXPORT_SYMBOL_GPL(vfs_setlease);
/**
* fcntl_setlease - sets a lease on an open file
@@ -1469,14 +1480,6 @@ int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
struct inode *inode = dentry->d_inode;
int error;
- if ((current->fsuid != inode->i_uid) && !capable(CAP_LEASE))
- return -EACCES;
- if (!S_ISREG(inode->i_mode))
- return -EINVAL;
- error = security_file_lock(filp, arg);
- if (error)
- return error;
-
locks_init_lock(&fl);
error = lease_init(filp, arg, &fl);
if (error)
@@ -1484,15 +1487,15 @@ int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
lock_kernel();
- error = __setlease(filp, arg, &flp);
+ error = vfs_setlease(filp, arg, &flp);
if (error || arg == F_UNLCK)
goto out_unlock;
error = fasync_helper(fd, filp, 1, &flp->fl_fasync);
if (error < 0) {
- /* remove lease just inserted by __setlease */
+ /* remove lease just inserted by setlease */
flp->fl_type = F_UNLCK | F_INPROGRESS;
- flp->fl_break_time = jiffies- 10;
+ flp->fl_break_time = jiffies - 10;
time_out_leases(inode);
goto out_unlock;
}
@@ -1597,8 +1600,7 @@ asmlinkage long sys_flock(unsigned int fd, unsigned int cmd)
/**
* vfs_test_lock - test file byte range lock
* @filp: The file to test lock for
- * @fl: The lock to test
- * @conf: Place to return a copy of the conflicting lock, if found
+ * @fl: The lock to test; also used to hold result
*
* Returns -ERRNO on failure. Indicates presence of conflicting lock by
* setting conf->fl_type to something other than F_UNLCK.
@@ -2274,7 +2276,7 @@ static int __init filelock_init(void)
{
filelock_cache = kmem_cache_create("file_lock_cache",
sizeof(struct file_lock), 0, SLAB_PANIC,
- init_once, NULL);
+ init_once);
return 0;
}
diff --git a/fs/mbcache.c b/fs/mbcache.c
index fbb1d02..1046cbe 100644
--- a/fs/mbcache.c
+++ b/fs/mbcache.c
@@ -292,7 +292,7 @@ mb_cache_create(const char *name, struct mb_cache_op *cache_op,
INIT_LIST_HEAD(&cache->c_indexes_hash[m][n]);
}
cache->c_entry_cache = kmem_cache_create(name, entry_size, 0,
- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL, NULL);
+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL);
if (!cache->c_entry_cache)
goto fail;
diff --git a/fs/minix/inode.c b/fs/minix/inode.c
index be40446..43668d7 100644
--- a/fs/minix/inode.c
+++ b/fs/minix/inode.c
@@ -75,14 +75,14 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
inode_init_once(&ei->vfs_inode);
}
-
+
static int init_inodecache(void)
{
minix_inode_cachep = kmem_cache_create("minix_inode_cache",
sizeof(struct minix_inode_info),
0, (SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD),
- init_once, NULL);
+ init_once);
if (minix_inode_cachep == NULL)
return -ENOMEM;
return 0;
diff --git a/fs/mpage.c b/fs/mpage.c
index c1698f2..b1c3e58 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -39,14 +39,11 @@
* status of that page is hard. See end_buffer_async_read() for the details.
* There is no point in duplicating all that complexity.
*/
-static int mpage_end_io_read(struct bio *bio, unsigned int bytes_done, int err)
+static void mpage_end_io_read(struct bio *bio, int err)
{
const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
- if (bio->bi_size)
- return 1;
-
do {
struct page *page = bvec->bv_page;
@@ -62,17 +59,13 @@ static int mpage_end_io_read(struct bio *bio, unsigned int bytes_done, int err)
unlock_page(page);
} while (bvec >= bio->bi_io_vec);
bio_put(bio);
- return 0;
}
-static int mpage_end_io_write(struct bio *bio, unsigned int bytes_done, int err)
+static void mpage_end_io_write(struct bio *bio, int err)
{
const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
- if (bio->bi_size)
- return 1;
-
do {
struct page *page = bvec->bv_page;
@@ -87,7 +80,6 @@ static int mpage_end_io_write(struct bio *bio, unsigned int bytes_done, int err)
end_page_writeback(page);
} while (bvec >= bio->bi_io_vec);
bio_put(bio);
- return 0;
}
static struct bio *mpage_bio_submit(int rw, struct bio *bio)
diff --git a/fs/namei.c b/fs/namei.c
index defaa47..a83160a 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -107,6 +107,8 @@
* any extra contention...
*/
+static int fastcall link_path_walk(const char *name, struct nameidata *nd);
+
/* In order to reduce some races, while at the same time doing additional
* checking and hopefully speeding things up, we copy filenames to the
* kernel data space before using them..
@@ -998,7 +1000,7 @@ return_err:
* Retry the whole path once, forcing real lookup requests
* instead of relying on the dcache.
*/
-int fastcall link_path_walk(const char *name, struct nameidata *nd)
+static int fastcall link_path_walk(const char *name, struct nameidata *nd)
{
struct nameidata save = *nd;
int result;
@@ -1022,7 +1024,7 @@ int fastcall link_path_walk(const char *name, struct nameidata *nd)
return result;
}
-int fastcall path_walk(const char * name, struct nameidata *nd)
+static int fastcall path_walk(const char * name, struct nameidata *nd)
{
current->total_link_count = 0;
return link_path_walk(name, nd);
@@ -1172,6 +1174,37 @@ int fastcall path_lookup(const char *name, unsigned int flags,
return do_path_lookup(AT_FDCWD, name, flags, nd);
}
+/**
+ * vfs_path_lookup - lookup a file path relative to a dentry-vfsmount pair
+ * @dentry: pointer to dentry of the base directory
+ * @mnt: pointer to vfs mount of the base directory
+ * @name: pointer to file name
+ * @flags: lookup flags
+ * @nd: pointer to nameidata
+ */
+int vfs_path_lookup(struct dentry *dentry, struct vfsmount *mnt,
+ const char *name, unsigned int flags,
+ struct nameidata *nd)
+{
+ int retval;
+
+ /* same as do_path_lookup */
+ nd->last_type = LAST_ROOT;
+ nd->flags = flags;
+ nd->depth = 0;
+
+ nd->mnt = mntget(mnt);
+ nd->dentry = dget(dentry);
+
+ retval = path_walk(name, nd);
+ if (unlikely(!retval && !audit_dummy_context() && nd->dentry &&
+ nd->dentry->d_inode))
+ audit_inode(name, nd->dentry->d_inode);
+
+ return retval;
+
+}
+
static int __path_lookup_intent_open(int dfd, const char *name,
unsigned int lookup_flags, struct nameidata *nd,
int open_flags, int create_mode)
@@ -2774,8 +2807,8 @@ EXPORT_SYMBOL(__page_symlink);
EXPORT_SYMBOL(page_symlink);
EXPORT_SYMBOL(page_symlink_inode_operations);
EXPORT_SYMBOL(path_lookup);
+EXPORT_SYMBOL(vfs_path_lookup);
EXPORT_SYMBOL(path_release);
-EXPORT_SYMBOL(path_walk);
EXPORT_SYMBOL(permission);
EXPORT_SYMBOL(vfs_permission);
EXPORT_SYMBOL(file_permission);
diff --git a/fs/namespace.c b/fs/namespace.c
index 4198003..ddbda13 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1801,7 +1801,7 @@ void __init mnt_init(unsigned long mempages)
init_rwsem(&namespace_sem);
mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct vfsmount),
- 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL, NULL);
+ 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
mount_hashtable = (struct list_head *)__get_free_page(GFP_ATOMIC);
diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
index cf06eb9..7f8536d 100644
--- a/fs/ncpfs/inode.c
+++ b/fs/ncpfs/inode.c
@@ -63,14 +63,14 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
mutex_init(&ei->open_mutex);
inode_init_once(&ei->vfs_inode);
}
-
+
static int init_inodecache(void)
{
ncp_inode_cachep = kmem_cache_create("ncp_inode_cache",
sizeof(struct ncp_inode_info),
0, (SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD),
- init_once, NULL);
+ init_once);
if (ncp_inode_cachep == NULL)
return -ENOMEM;
return 0;
diff --git a/fs/ncpfs/mmap.c b/fs/ncpfs/mmap.c
index 70a6911..a94473d 100644
--- a/fs/ncpfs/mmap.c
+++ b/fs/ncpfs/mmap.c
@@ -24,31 +24,35 @@
/*
* Fill in the supplied page for mmap
+ * XXX: how are we excluding truncate/invalidate here? Maybe need to lock
+ * page?
*/
-static struct page* ncp_file_mmap_nopage(struct vm_area_struct *area,
- unsigned long address, int *type)
+static int ncp_file_mmap_fault(struct vm_area_struct *area,
+ struct vm_fault *vmf)
{
struct file *file = area->vm_file;
struct dentry *dentry = file->f_path.dentry;
struct inode *inode = dentry->d_inode;
- struct page* page;
char *pg_addr;
unsigned int already_read;
unsigned int count;
int bufsize;
- int pos;
+ int pos; /* XXX: loff_t ? */
- page = alloc_page(GFP_HIGHUSER); /* ncpfs has nothing against high pages
- as long as recvmsg and memset works on it */
- if (!page)
- return page;
- pg_addr = kmap(page);
- address &= PAGE_MASK;
- pos = address - area->vm_start + (area->vm_pgoff << PAGE_SHIFT);
+ /*
+ * ncpfs has nothing against high pages as long
+ * as recvmsg and memset works on it
+ */
+ vmf->page = alloc_page(GFP_HIGHUSER);
+ if (!vmf->page)
+ return VM_FAULT_OOM;
+ pg_addr = kmap(vmf->page);
+ pos = vmf->pgoff << PAGE_SHIFT;
count = PAGE_SIZE;
- if (address + PAGE_SIZE > area->vm_end) {
- count = area->vm_end - address;
+ if ((unsigned long)vmf->virtual_address + PAGE_SIZE > area->vm_end) {
+ WARN_ON(1); /* shouldn't happen? */
+ count = area->vm_end - (unsigned long)vmf->virtual_address;
}
/* what we can read in one go */
bufsize = NCP_SERVER(inode)->buffer_size;
@@ -83,23 +87,21 @@ static struct page* ncp_file_mmap_nopage(struct vm_area_struct *area,
if (already_read < PAGE_SIZE)
memset(pg_addr + already_read, 0, PAGE_SIZE - already_read);
- flush_dcache_page(page);
- kunmap(page);
+ flush_dcache_page(vmf->page);
+ kunmap(vmf->page);
/*
* If I understand ncp_read_kernel() properly, the above always
* fetches from the network, here the analogue of disk.
* -- wli
*/
- if (type)
- *type = VM_FAULT_MAJOR;
count_vm_event(PGMAJFAULT);
- return page;
+ return VM_FAULT_MAJOR;
}
static struct vm_operations_struct ncp_file_mmap =
{
- .nopage = ncp_file_mmap_nopage,
+ .fault = ncp_file_mmap_fault,
};
diff --git a/fs/ncpfs/ncplib_kernel.c b/fs/ncpfs/ncplib_kernel.c
index 551e0ba..df6d60b 100644
--- a/fs/ncpfs/ncplib_kernel.c
+++ b/fs/ncpfs/ncplib_kernel.c
@@ -726,9 +726,6 @@ ncp_del_file_or_subdir2(struct ncp_server *server,
__le32 dirent;
if (!inode) {
-#ifdef CONFIG_NCPFS_DEBUGDENTRY
- PRINTK("ncpfs: ncpdel2: dentry->d_inode == NULL\n");
-#endif
return 0xFF; /* Any error */
}
volnum = NCP_FINFO(inode)->volNumber;
diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
index 849a202..058ade7 100644
--- a/fs/nfs/callback_xdr.c
+++ b/fs/nfs/callback_xdr.c
@@ -179,7 +179,7 @@ static __be32 decode_getattr_args(struct svc_rqst *rqstp, struct xdr_stream *xdr
args->addr = svc_addr_in(rqstp);
status = decode_bitmap(xdr, args->bitmap);
out:
- dprintk("%s: exit with status = %d\n", __FUNCTION__, status);
+ dprintk("%s: exit with status = %d\n", __FUNCTION__, ntohl(status));
return status;
}
@@ -200,7 +200,7 @@ static __be32 decode_recall_args(struct svc_rqst *rqstp, struct xdr_stream *xdr,
args->truncate = ntohl(*p);
status = decode_fh(xdr, &args->fh);
out:
- dprintk("%s: exit with status = %d\n", __FUNCTION__, status);
+ dprintk("%s: exit with status = %d\n", __FUNCTION__, ntohl(status));
return status;
}
@@ -349,7 +349,7 @@ static __be32 encode_getattr_res(struct svc_rqst *rqstp, struct xdr_stream *xdr,
status = encode_attr_mtime(xdr, res->bitmap, &res->mtime);
*savep = htonl((unsigned int)((char *)xdr->p - (char *)(savep+1)));
out:
- dprintk("%s: exit with status = %d\n", __FUNCTION__, status);
+ dprintk("%s: exit with status = %d\n", __FUNCTION__, ntohl(status));
return status;
}
@@ -392,7 +392,7 @@ static __be32 process_op(struct svc_rqst *rqstp,
status = res;
if (op->encode_res != NULL && status == 0)
status = op->encode_res(rqstp, xdr_out, resp);
- dprintk("%s: done, status = %d\n", __FUNCTION__, status);
+ dprintk("%s: done, status = %d\n", __FUNCTION__, ntohl(status));
return status;
}
@@ -431,7 +431,7 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r
}
*hdr_res.status = status;
*hdr_res.nops = htonl(nops);
- dprintk("%s: done, status = %u\n", __FUNCTION__, status);
+ dprintk("%s: done, status = %u\n", __FUNCTION__, ntohl(status));
return rpc_success;
}
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index a49f9fe..a204484 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -588,16 +588,6 @@ static int nfs_init_server(struct nfs_server *server, const struct nfs_mount_dat
server->namelen = data->namlen;
/* Create a client RPC handle for the NFSv3 ACL management interface */
nfs_init_server_aclclient(server);
- if (clp->cl_nfsversion == 3) {
- if (server->namelen == 0 || server->namelen > NFS3_MAXNAMLEN)
- server->namelen = NFS3_MAXNAMLEN;
- if (!(data->flags & NFS_MOUNT_NORDIRPLUS))
- server->caps |= NFS_CAP_READDIRPLUS;
- } else {
- if (server->namelen == 0 || server->namelen > NFS2_MAXNAMLEN)
- server->namelen = NFS2_MAXNAMLEN;
- }
-
dprintk("<-- nfs_init_server() = 0 [new %p]\n", clp);
return 0;
@@ -794,6 +784,16 @@ struct nfs_server *nfs_create_server(const struct nfs_mount_data *data,
error = nfs_probe_fsinfo(server, mntfh, &fattr);
if (error < 0)
goto error;
+ if (server->nfs_client->rpc_ops->version == 3) {
+ if (server->namelen == 0 || server->namelen > NFS3_MAXNAMLEN)
+ server->namelen = NFS3_MAXNAMLEN;
+ if (!(data->flags & NFS_MOUNT_NORDIRPLUS))
+ server->caps |= NFS_CAP_READDIRPLUS;
+ } else {
+ if (server->namelen == 0 || server->namelen > NFS2_MAXNAMLEN)
+ server->namelen = NFS2_MAXNAMLEN;
+ }
+
if (!(fattr.valid & NFS_ATTR_FATTR)) {
error = server->nfs_client->rpc_ops->getattr(server, mntfh, &fattr);
if (error < 0) {
@@ -984,6 +984,9 @@ struct nfs_server *nfs4_create_server(const struct nfs4_mount_data *data,
if (error < 0)
goto error;
+ if (server->namelen == 0 || server->namelen > NFS4_MAXNAMLEN)
+ server->namelen = NFS4_MAXNAMLEN;
+
BUG_ON(!server->nfs_client);
BUG_ON(!server->nfs_client->rpc_ops);
BUG_ON(!server->nfs_client->rpc_ops->file_inode_ops);
@@ -1056,6 +1059,9 @@ struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data,
if (error < 0)
goto error;
+ if (server->namelen == 0 || server->namelen > NFS4_MAXNAMLEN)
+ server->namelen = NFS4_MAXNAMLEN;
+
dprintk("Referral FSID: %llx:%llx\n",
(unsigned long long) server->fsid.major,
(unsigned long long) server->fsid.minor);
@@ -1115,6 +1121,9 @@ struct nfs_server *nfs_clone_server(struct nfs_server *source,
if (error < 0)
goto out_free_server;
+ if (server->namelen == 0 || server->namelen > NFS4_MAXNAMLEN)
+ server->namelen = NFS4_MAXNAMLEN;
+
dprintk("Cloned FSID: %llx:%llx\n",
(unsigned long long) server->fsid.major,
(unsigned long long) server->fsid.minor);
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index 20ac403..c55a761 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -20,10 +20,8 @@
#include "delegation.h"
#include "internal.h"
-static void nfs_free_delegation(struct nfs_delegation *delegation)
+static void nfs_do_free_delegation(struct nfs_delegation *delegation)
{
- if (delegation->cred)
- put_rpccred(delegation->cred);
kfree(delegation);
}
@@ -31,7 +29,18 @@ static void nfs_free_delegation_callback(struct rcu_head *head)
{
struct nfs_delegation *delegation = container_of(head, struct nfs_delegation, rcu);
- nfs_free_delegation(delegation);
+ nfs_do_free_delegation(delegation);
+}
+
+static void nfs_free_delegation(struct nfs_delegation *delegation)
+{
+ struct rpc_cred *cred;
+
+ cred = rcu_dereference(delegation->cred);
+ rcu_assign_pointer(delegation->cred, NULL);
+ call_rcu(&delegation->rcu, nfs_free_delegation_callback);
+ if (cred)
+ put_rpccred(cred);
}
static int nfs_delegation_claim_locks(struct nfs_open_context *ctx, struct nfs4_state *state)
@@ -166,7 +175,7 @@ static int nfs_do_return_delegation(struct inode *inode, struct nfs_delegation *
int res = 0;
res = nfs4_proc_delegreturn(inode, delegation->cred, &delegation->stateid);
- call_rcu(&delegation->rcu, nfs_free_delegation_callback);
+ nfs_free_delegation(delegation);
return res;
}
@@ -448,7 +457,7 @@ restart:
spin_unlock(&clp->cl_lock);
rcu_read_unlock();
if (delegation != NULL)
- call_rcu(&delegation->rcu, nfs_free_delegation_callback);
+ nfs_free_delegation(delegation);
goto restart;
}
rcu_read_unlock();
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 322141f..e4a04d1 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -654,7 +654,7 @@ static int nfs_check_verifier(struct inode *dir, struct dentry *dentry)
if (IS_ROOT(dentry))
return 1;
- verf = (unsigned long)dentry->d_fsdata;
+ verf = dentry->d_time;
if (nfs_caches_unstable(dir)
|| verf != NFS_I(dir)->cache_change_attribute)
return 0;
@@ -663,7 +663,7 @@ static int nfs_check_verifier(struct inode *dir, struct dentry *dentry)
static inline void nfs_set_verifier(struct dentry * dentry, unsigned long verf)
{
- dentry->d_fsdata = (void *)verf;
+ dentry->d_time = verf;
}
static void nfs_refresh_verifier(struct dentry * dentry, unsigned long verf)
@@ -869,7 +869,7 @@ static void nfs_dentry_iput(struct dentry *dentry, struct inode *inode)
if (dentry->d_flags & DCACHE_NFSFS_RENAMED) {
lock_kernel();
drop_nlink(inode);
- nfs_complete_unlink(dentry);
+ nfs_complete_unlink(dentry, inode);
unlock_kernel();
}
/* When creating a negative dentry, we want to renew d_time */
@@ -1162,6 +1162,8 @@ static struct dentry *nfs_readdir_lookup(nfs_readdir_descriptor_t *desc)
}
if (!desc->plus || !(entry->fattr->valid & NFS_ATTR_FATTR))
return NULL;
+ if (name.len > NFS_SERVER(dir)->namelen)
+ return NULL;
/* Note: caller is already holding the dir->i_mutex! */
dentry = d_alloc(parent, &name);
if (dentry == NULL)
@@ -1411,7 +1413,7 @@ static int nfs_sillyrename(struct inode *dir, struct dentry *dentry)
nfs_renew_times(dentry);
nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
d_move(dentry, sdentry);
- error = nfs_async_unlink(dentry);
+ error = nfs_async_unlink(dir, dentry);
/* If we return 0 we don't unlink */
}
dput(sdentry);
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index a5c82b6..fcf4d38 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -875,7 +875,7 @@ int __init nfs_init_directcache(void)
sizeof(struct nfs_direct_req),
0, (SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD),
- NULL, NULL);
+ NULL);
if (nfs_direct_cachep == NULL)
return -ENOMEM;
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 8689b73..579cf8a 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -53,6 +53,7 @@ static int nfs_fsync(struct file *, struct dentry *dentry, int datasync);
static int nfs_check_flags(int flags);
static int nfs_lock(struct file *filp, int cmd, struct file_lock *fl);
static int nfs_flock(struct file *filp, int cmd, struct file_lock *fl);
+static int nfs_setlease(struct file *file, long arg, struct file_lock **fl);
const struct file_operations nfs_file_operations = {
.llseek = nfs_file_llseek,
@@ -69,6 +70,7 @@ const struct file_operations nfs_file_operations = {
.flock = nfs_flock,
.splice_read = nfs_file_splice_read,
.check_flags = nfs_check_flags,
+ .setlease = nfs_setlease,
};
const struct inode_operations nfs_file_inode_operations = {
@@ -314,7 +316,7 @@ static void nfs_invalidate_page(struct page *page, unsigned long offset)
if (offset != 0)
return;
/* Cancel any unstarted writes on this page */
- nfs_wb_page_priority(page->mapping->host, page, FLUSH_INVALIDATE);
+ nfs_wb_page_cancel(page->mapping->host, page);
}
static int nfs_release_page(struct page *page, gfp_t gfp)
@@ -400,7 +402,9 @@ static int do_getlk(struct file *filp, int cmd, struct file_lock *fl)
lock_kernel();
/* Try local locking first */
- if (posix_test_lock(filp, fl)) {
+ posix_test_lock(filp, fl);
+ if (fl->fl_type != F_UNLCK) {
+ /* found a conflict */
goto out;
}
@@ -558,3 +562,13 @@ static int nfs_flock(struct file *filp, int cmd, struct file_lock *fl)
return do_unlk(filp, cmd, fl);
return do_setlk(filp, cmd, fl);
}
+
+static int nfs_setlease(struct file *file, long arg, struct file_lock **fl)
+{
+ /*
+ * There is no protocol support for leases, so we have no way
+ * to implement them correctly in the face of opens by other
+ * clients.
+ */
+ return -EINVAL;
+}
diff --git a/fs/nfs/getroot.c b/fs/nfs/getroot.c
index d1cbf0a..522e5ad 100644
--- a/fs/nfs/getroot.c
+++ b/fs/nfs/getroot.c
@@ -175,6 +175,9 @@ next_component:
path++;
name.len = path - (const char *) name.name;
+ if (name.len > NFS4_MAXNAMLEN)
+ return -ENAMETOOLONG;
+
eat_dot_dir:
while (*path == '/')
path++;
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 3d9fccf..71a49c3 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -468,7 +468,7 @@ static struct nfs_open_context *alloc_nfs_open_context(struct vfsmount *mnt, str
ctx->lockowner = current->files;
ctx->error = 0;
ctx->dir_cookie = 0;
- kref_init(&ctx->kref);
+ atomic_set(&ctx->count, 1);
}
return ctx;
}
@@ -476,21 +476,18 @@ static struct nfs_open_context *alloc_nfs_open_context(struct vfsmount *mnt, str
struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx)
{
if (ctx != NULL)
- kref_get(&ctx->kref);
+ atomic_inc(&ctx->count);
return ctx;
}
-static void nfs_free_open_context(struct kref *kref)
+void put_nfs_open_context(struct nfs_open_context *ctx)
{
- struct nfs_open_context *ctx = container_of(kref,
- struct nfs_open_context, kref);
+ struct inode *inode = ctx->path.dentry->d_inode;
- if (!list_empty(&ctx->list)) {
- struct inode *inode = ctx->path.dentry->d_inode;
- spin_lock(&inode->i_lock);
- list_del(&ctx->list);
- spin_unlock(&inode->i_lock);
- }
+ if (!atomic_dec_and_lock(&ctx->count, &inode->i_lock))
+ return;
+ list_del(&ctx->list);
+ spin_unlock(&inode->i_lock);
if (ctx->state != NULL)
nfs4_close_state(&ctx->path, ctx->state, ctx->mode);
if (ctx->cred != NULL)
@@ -500,11 +497,6 @@ static void nfs_free_open_context(struct kref *kref)
kfree(ctx);
}
-void put_nfs_open_context(struct nfs_open_context *ctx)
-{
- kref_put(&ctx->kref, nfs_free_open_context);
-}
-
/*
* Ensure that mmap has a recent RPC credential for use when writing out
* shared pages
@@ -1165,14 +1157,14 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
nfsi->npages = 0;
nfs4_init_once(nfsi);
}
-
+
static int __init nfs_init_inodecache(void)
{
nfs_inode_cachep = kmem_cache_create("nfs_inode_cache",
sizeof(struct nfs_inode),
0, (SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD),
- init_once, NULL);
+ init_once);
if (nfs_inode_cachep == NULL)
return -ENOMEM;
diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
index 7f86e65..acfc56f 100644
--- a/fs/nfs/namespace.c
+++ b/fs/nfs/namespace.c
@@ -175,10 +175,8 @@ static void nfs_expire_automounts(struct work_struct *work)
void nfs_release_automount_timer(void)
{
- if (list_empty(&nfs_automount_list)) {
+ if (list_empty(&nfs_automount_list))
cancel_delayed_work(&nfs_automount_task);
- flush_scheduled_work();
- }
}
/*
diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c
index 7fcc78f..c5fce75 100644
--- a/fs/nfs/nfs2xdr.c
+++ b/fs/nfs/nfs2xdr.c
@@ -43,6 +43,7 @@
#define NFS_entry_sz (NFS_filename_sz+3)
#define NFS_diropargs_sz (NFS_fhandle_sz+NFS_filename_sz)
+#define NFS_removeargs_sz (NFS_fhandle_sz+NFS_filename_sz)
#define NFS_sattrargs_sz (NFS_fhandle_sz+NFS_sattr_sz)
#define NFS_readlinkargs_sz (NFS_fhandle_sz)
#define NFS_readargs_sz (NFS_fhandle_sz+3)
@@ -66,7 +67,7 @@
* Common NFS XDR functions as inlines
*/
static inline __be32 *
-xdr_encode_fhandle(__be32 *p, struct nfs_fh *fhandle)
+xdr_encode_fhandle(__be32 *p, const struct nfs_fh *fhandle)
{
memcpy(p, fhandle->data, NFS2_FHSIZE);
return p + XDR_QUADLEN(NFS2_FHSIZE);
@@ -204,7 +205,7 @@ nfs_xdr_sattrargs(struct rpc_rqst *req, __be32 *p, struct nfs_sattrargs *args)
/*
* Encode directory ops argument
- * LOOKUP, REMOVE, RMDIR
+ * LOOKUP, RMDIR
*/
static int
nfs_xdr_diropargs(struct rpc_rqst *req, __be32 *p, struct nfs_diropargs *args)
@@ -216,6 +217,18 @@ nfs_xdr_diropargs(struct rpc_rqst *req, __be32 *p, struct nfs_diropargs *args)
}
/*
+ * Encode REMOVE argument
+ */
+static int
+nfs_xdr_removeargs(struct rpc_rqst *req, __be32 *p, const struct nfs_removeargs *args)
+{
+ p = xdr_encode_fhandle(p, args->fh);
+ p = xdr_encode_array(p, args->name.name, args->name.len);
+ req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
+ return 0;
+}
+
+/*
* Arguments to a READ call. Since we read data directly into the page
* cache, we also set up the reply iovec here so that iov[1] points
* exactly to the page we want to fetch.
@@ -705,7 +718,7 @@ struct rpc_procinfo nfs_procedures[] = {
PROC(READ, readargs, readres, 3),
PROC(WRITE, writeargs, writeres, 4),
PROC(CREATE, createargs, diropres, 0),
- PROC(REMOVE, diropargs, stat, 0),
+ PROC(REMOVE, removeargs, stat, 0),
PROC(RENAME, renameargs, stat, 0),
PROC(LINK, linkargs, stat, 0),
PROC(SYMLINK, symlinkargs, stat, 0),
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index 814d886..c7ca5d7 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -349,62 +349,42 @@ out:
static int
nfs3_proc_remove(struct inode *dir, struct qstr *name)
{
- struct nfs_fattr dir_attr;
- struct nfs3_diropargs arg = {
- .fh = NFS_FH(dir),
- .name = name->name,
- .len = name->len
+ struct nfs_removeargs arg = {
+ .fh = NFS_FH(dir),
+ .name.len = name->len,
+ .name.name = name->name,
};
- struct rpc_message msg = {
- .rpc_proc = &nfs3_procedures[NFS3PROC_REMOVE],
- .rpc_argp = &arg,
- .rpc_resp = &dir_attr,
+ struct nfs_removeres res;
+ struct rpc_message msg = {
+ .rpc_proc = &nfs3_procedures[NFS3PROC_REMOVE],
+ .rpc_argp = &arg,
+ .rpc_resp = &res,
};
int status;
dprintk("NFS call remove %s\n", name->name);
- nfs_fattr_init(&dir_attr);
+ nfs_fattr_init(&res.dir_attr);
status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
- nfs_post_op_update_inode(dir, &dir_attr);
+ nfs_post_op_update_inode(dir, &res.dir_attr);
dprintk("NFS reply remove: %d\n", status);
return status;
}
-static int
-nfs3_proc_unlink_setup(struct rpc_message *msg, struct dentry *dir, struct qstr *name)
+static void
+nfs3_proc_unlink_setup(struct rpc_message *msg, struct inode *dir)
{
- struct unlinkxdr {
- struct nfs3_diropargs arg;
- struct nfs_fattr res;
- } *ptr;
-
- ptr = kmalloc(sizeof(*ptr), GFP_KERNEL);
- if (!ptr)
- return -ENOMEM;
- ptr->arg.fh = NFS_FH(dir->d_inode);
- ptr->arg.name = name->name;
- ptr->arg.len = name->len;
- nfs_fattr_init(&ptr->res);
msg->rpc_proc = &nfs3_procedures[NFS3PROC_REMOVE];
- msg->rpc_argp = &ptr->arg;
- msg->rpc_resp = &ptr->res;
- return 0;
}
static int
-nfs3_proc_unlink_done(struct dentry *dir, struct rpc_task *task)
+nfs3_proc_unlink_done(struct rpc_task *task, struct inode *dir)
{
- struct rpc_message *msg = &task->tk_msg;
- struct nfs_fattr *dir_attr;
-
- if (nfs3_async_handle_jukebox(task, dir->d_inode))
- return 1;
- if (msg->rpc_argp) {
- dir_attr = (struct nfs_fattr*)msg->rpc_resp;
- nfs_post_op_update_inode(dir->d_inode, dir_attr);
- kfree(msg->rpc_argp);
- }
- return 0;
+ struct nfs_removeres *res;
+ if (nfs3_async_handle_jukebox(task, dir))
+ return 0;
+ res = task->tk_msg.rpc_resp;
+ nfs_post_op_update_inode(dir, &res->dir_attr);
+ return 1;
}
static int
diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c
index b4647a2..d9e08f0 100644
--- a/fs/nfs/nfs3xdr.c
+++ b/fs/nfs/nfs3xdr.c
@@ -50,6 +50,7 @@
#define NFS3_sattrargs_sz (NFS3_fh_sz+NFS3_sattr_sz+3)
#define NFS3_diropargs_sz (NFS3_fh_sz+NFS3_filename_sz)
+#define NFS3_removeargs_sz (NFS3_fh_sz+NFS3_filename_sz)
#define NFS3_accessargs_sz (NFS3_fh_sz+1)
#define NFS3_readlinkargs_sz (NFS3_fh_sz)
#define NFS3_readargs_sz (NFS3_fh_sz+3)
@@ -65,6 +66,7 @@
#define NFS3_attrstat_sz (1+NFS3_fattr_sz)
#define NFS3_wccstat_sz (1+NFS3_wcc_data_sz)
+#define NFS3_removeres_sz (NFS3_wccstat_sz)
#define NFS3_lookupres_sz (1+NFS3_fh_sz+(2 * NFS3_post_op_attr_sz))
#define NFS3_accessres_sz (1+NFS3_post_op_attr_sz+1)
#define NFS3_readlinkres_sz (1+NFS3_post_op_attr_sz+1)
@@ -106,7 +108,7 @@ static struct {
* Common NFS XDR functions as inlines
*/
static inline __be32 *
-xdr_encode_fhandle(__be32 *p, struct nfs_fh *fh)
+xdr_encode_fhandle(__be32 *p, const struct nfs_fh *fh)
{
return xdr_encode_array(p, fh->data, fh->size);
}
@@ -300,6 +302,18 @@ nfs3_xdr_diropargs(struct rpc_rqst *req, __be32 *p, struct nfs3_diropargs *args)
}
/*
+ * Encode REMOVE argument
+ */
+static int
+nfs3_xdr_removeargs(struct rpc_rqst *req, __be32 *p, const struct nfs_removeargs *args)
+{
+ p = xdr_encode_fhandle(p, args->fh);
+ p = xdr_encode_array(p, args->name.name, args->name.len);
+ req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
+ return 0;
+}
+
+/*
* Encode access() argument
*/
static int
@@ -736,6 +750,12 @@ nfs3_xdr_wccstat(struct rpc_rqst *req, __be32 *p, struct nfs_fattr *fattr)
return status;
}
+static int
+nfs3_xdr_removeres(struct rpc_rqst *req, __be32 *p, struct nfs_removeres *res)
+{
+ return nfs3_xdr_wccstat(req, p, &res->dir_attr);
+}
+
/*
* Decode LOOKUP reply
*/
@@ -1126,7 +1146,7 @@ struct rpc_procinfo nfs3_procedures[] = {
PROC(MKDIR, mkdirargs, createres, 0),
PROC(SYMLINK, symlinkargs, createres, 0),
PROC(MKNOD, mknodargs, createres, 0),
- PROC(REMOVE, diropargs, wccstat, 0),
+ PROC(REMOVE, removeargs, removeres, 0),
PROC(RMDIR, diropargs, wccstat, 0),
PROC(RENAME, renameargs, renameres, 0),
PROC(LINK, linkargs, linkres, 0),
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 6c028e7..d2802b1 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -182,7 +182,7 @@ extern int nfs4_do_close(struct path *path, struct nfs4_state *state);
extern struct dentry *nfs4_atomic_open(struct inode *, struct dentry *, struct nameidata *);
extern int nfs4_open_revalidate(struct inode *, struct dentry *, int, struct nameidata *);
extern int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle);
-extern int nfs4_proc_fs_locations(struct inode *dir, struct qstr *name,
+extern int nfs4_proc_fs_locations(struct inode *dir, const struct qstr *name,
struct nfs4_fs_locations *fs_locations, struct page *page);
extern struct nfs4_state_recovery_ops nfs4_reboot_recovery_ops;
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index fee2da8..4b90e17 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -66,6 +66,8 @@ static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry
static int nfs4_handle_exception(const struct nfs_server *server, int errorcode, struct nfs4_exception *exception);
static int nfs4_wait_clnt_recover(struct rpc_clnt *clnt, struct nfs_client *clp);
static int _nfs4_do_access(struct inode *inode, struct rpc_cred *cred, int openflags);
+static int _nfs4_proc_lookup(struct inode *dir, const struct qstr *name, struct nfs_fh *fhandle, struct nfs_fattr *fattr);
+static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr);
/* Prevent leaks of NFSv4 errors into userland */
int nfs4_map_errors(int err)
@@ -330,11 +332,9 @@ static int can_open_cached(struct nfs4_state *state, int mode)
switch (mode & (FMODE_READ|FMODE_WRITE|O_EXCL)) {
case FMODE_READ:
ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0;
- ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0;
break;
case FMODE_WRITE:
ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0;
- ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0;
break;
case FMODE_READ|FMODE_WRITE:
ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0;
@@ -552,6 +552,18 @@ static struct nfs_open_context *nfs4_state_find_open_context(struct nfs4_state *
return ERR_PTR(-ENOENT);
}
+static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context *ctx, struct nfs4_state *state)
+{
+ struct nfs4_opendata *opendata;
+
+ opendata = nfs4_opendata_alloc(&ctx->path, state->owner, 0, NULL);
+ if (opendata == NULL)
+ return ERR_PTR(-ENOMEM);
+ opendata->state = state;
+ atomic_inc(&state->count);
+ return opendata;
+}
+
static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, mode_t openflags, struct nfs4_state **res)
{
struct nfs4_state *newstate;
@@ -626,16 +638,15 @@ static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state
int delegation_type = 0;
int status;
- opendata = nfs4_opendata_alloc(&ctx->path, state->owner, 0, NULL);
- if (opendata == NULL)
- return -ENOMEM;
+ opendata = nfs4_open_recoverdata_alloc(ctx, state);
+ if (IS_ERR(opendata))
+ return PTR_ERR(opendata);
opendata->o_arg.claim = NFS4_OPEN_CLAIM_PREVIOUS;
opendata->o_arg.fh = NFS_FH(state->inode);
- nfs_copy_fh(&opendata->o_res.fh, opendata->o_arg.fh);
rcu_read_lock();
delegation = rcu_dereference(NFS_I(state->inode)->delegation);
if (delegation != NULL && (delegation->flags & NFS_DELEGATION_NEED_RECLAIM) != 0)
- delegation_type = delegation->flags;
+ delegation_type = delegation->type;
rcu_read_unlock();
opendata->o_arg.u.delegation_type = delegation_type;
status = nfs4_open_recover(opendata, state);
@@ -672,13 +683,12 @@ static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *sta
static int _nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid)
{
- struct nfs4_state_owner *sp = state->owner;
struct nfs4_opendata *opendata;
int ret;
- opendata = nfs4_opendata_alloc(&ctx->path, sp, 0, NULL);
- if (opendata == NULL)
- return -ENOMEM;
+ opendata = nfs4_open_recoverdata_alloc(ctx, state);
+ if (IS_ERR(opendata))
+ return PTR_ERR(opendata);
opendata->o_arg.claim = NFS4_OPEN_CLAIM_DELEGATE_CUR;
memcpy(opendata->o_arg.u.delegation.data, stateid->data,
sizeof(opendata->o_arg.u.delegation.data));
@@ -823,8 +833,10 @@ static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
/* Update sequence id. */
data->o_arg.id = sp->so_owner_id.id;
data->o_arg.clientid = sp->so_client->cl_clientid;
- if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS)
+ if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS) {
msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR];
+ nfs_copy_fh(&data->o_res.fh, data->o_arg.fh);
+ }
data->timestamp = jiffies;
rpc_call_setup(task, &msg, 0);
return;
@@ -918,6 +930,9 @@ static int _nfs4_proc_open(struct nfs4_opendata *data)
if (status != 0 || !data->rpc_done)
return status;
+ if (o_res->fh.size == 0)
+ _nfs4_proc_lookup(dir, o_arg->name, &o_res->fh, o_res->f_attr);
+
if (o_arg->open_flags & O_CREAT) {
update_changeattr(dir, &o_res->cinfo);
nfs_post_op_update_inode(dir, o_res->dir_attr);
@@ -929,7 +944,7 @@ static int _nfs4_proc_open(struct nfs4_opendata *data)
return status;
}
if (!(o_res->f_attr->valid & NFS_ATTR_FATTR))
- return server->nfs_client->rpc_ops->getattr(server, &o_res->fh, o_res->f_attr);
+ _nfs4_proc_getattr(server, &o_res->fh, o_res->f_attr);
return 0;
}
@@ -989,9 +1004,9 @@ static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *s
struct nfs4_opendata *opendata;
int ret;
- opendata = nfs4_opendata_alloc(&ctx->path, state->owner, 0, NULL);
- if (opendata == NULL)
- return -ENOMEM;
+ opendata = nfs4_open_recoverdata_alloc(ctx, state);
+ if (IS_ERR(opendata))
+ return PTR_ERR(opendata);
ret = nfs4_open_recover(opendata, state);
if (ret == -ESTALE) {
/* Invalidate the state owner so we don't ever use it again */
@@ -1243,7 +1258,7 @@ static void nfs4_close_done(struct rpc_task *task, void *data)
nfs_increment_open_seqid(task->tk_status, calldata->arg.seqid);
switch (task->tk_status) {
case 0:
- nfs_set_open_stateid(state, &calldata->res.stateid, calldata->arg.open_flags);
+ nfs_set_open_stateid(state, &calldata->res.stateid, 0);
renew_lease(server, calldata->timestamp);
break;
case -NFS4ERR_STALE_STATEID:
@@ -1269,23 +1284,19 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
.rpc_cred = state->owner->so_cred,
};
int clear_rd, clear_wr, clear_rdwr;
- int mode;
if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
return;
- mode = FMODE_READ|FMODE_WRITE;
clear_rd = clear_wr = clear_rdwr = 0;
spin_lock(&state->owner->so_lock);
/* Calculate the change in open mode */
if (state->n_rdwr == 0) {
if (state->n_rdonly == 0) {
- mode &= ~FMODE_READ;
clear_rd |= test_and_clear_bit(NFS_O_RDONLY_STATE, &state->flags);
clear_rdwr |= test_and_clear_bit(NFS_O_RDWR_STATE, &state->flags);
}
if (state->n_wronly == 0) {
- mode &= ~FMODE_WRITE;
clear_wr |= test_and_clear_bit(NFS_O_WRONLY_STATE, &state->flags);
clear_rdwr |= test_and_clear_bit(NFS_O_RDWR_STATE, &state->flags);
}
@@ -1297,9 +1308,13 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
return;
}
nfs_fattr_init(calldata->res.fattr);
- if (mode != 0)
+ if (test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0) {
+ msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE];
+ calldata->arg.open_flags = FMODE_READ;
+ } else if (test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0) {
msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE];
- calldata->arg.open_flags = mode;
+ calldata->arg.open_flags = FMODE_WRITE;
+ }
calldata->timestamp = jiffies;
rpc_call_setup(task, &msg, 0);
}
@@ -1419,7 +1434,7 @@ nfs4_atomic_open(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
}
res = d_add_unique(dentry, igrab(state->inode));
if (res != NULL)
- dentry = res;
+ path.dentry = res;
nfs4_intent_set_file(nd, &path, state);
return res;
}
@@ -1553,7 +1568,7 @@ static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *fhandle,
* Note that we'll actually follow the referral later when
* we detect fsid mismatch in inode revalidation
*/
-static int nfs4_get_referral(struct inode *dir, struct qstr *name, struct nfs_fattr *fattr, struct nfs_fh *fhandle)
+static int nfs4_get_referral(struct inode *dir, const struct qstr *name, struct nfs_fattr *fattr, struct nfs_fh *fhandle)
{
int status = -ENOMEM;
struct page *page = NULL;
@@ -1668,8 +1683,8 @@ nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
return status;
}
-static int _nfs4_proc_lookupfh(struct nfs_server *server, struct nfs_fh *dirfh,
- struct qstr *name, struct nfs_fh *fhandle,
+static int _nfs4_proc_lookupfh(struct nfs_server *server, const struct nfs_fh *dirfh,
+ const struct qstr *name, struct nfs_fh *fhandle,
struct nfs_fattr *fattr)
{
int status;
@@ -1715,7 +1730,7 @@ static int nfs4_proc_lookupfh(struct nfs_server *server, struct nfs_fh *dirfh,
return err;
}
-static int _nfs4_proc_lookup(struct inode *dir, struct qstr *name,
+static int _nfs4_proc_lookup(struct inode *dir, const struct qstr *name,
struct nfs_fh *fhandle, struct nfs_fattr *fattr)
{
int status;
@@ -1908,28 +1923,27 @@ out:
static int _nfs4_proc_remove(struct inode *dir, struct qstr *name)
{
struct nfs_server *server = NFS_SERVER(dir);
- struct nfs4_remove_arg args = {
+ struct nfs_removeargs args = {
.fh = NFS_FH(dir),
- .name = name,
+ .name.len = name->len,
+ .name.name = name->name,
.bitmask = server->attr_bitmask,
};
- struct nfs_fattr dir_attr;
- struct nfs4_remove_res res = {
+ struct nfs_removeres res = {
.server = server,
- .dir_attr = &dir_attr,
};
struct rpc_message msg = {
- .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE],
- .rpc_argp = &args,
- .rpc_resp = &res,
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE],
+ .rpc_argp = &args,
+ .rpc_resp = &res,
};
int status;
- nfs_fattr_init(res.dir_attr);
+ nfs_fattr_init(&res.dir_attr);
status = rpc_call_sync(server->client, &msg, 0);
if (status == 0) {
update_changeattr(dir, &res.cinfo);
- nfs_post_op_update_inode(dir, res.dir_attr);
+ nfs_post_op_update_inode(dir, &res.dir_attr);
}
return status;
}
@@ -1946,48 +1960,26 @@ static int nfs4_proc_remove(struct inode *dir, struct qstr *name)
return err;
}
-struct unlink_desc {
- struct nfs4_remove_arg args;
- struct nfs4_remove_res res;
- struct nfs_fattr dir_attr;
-};
-
-static int nfs4_proc_unlink_setup(struct rpc_message *msg, struct dentry *dir,
- struct qstr *name)
+static void nfs4_proc_unlink_setup(struct rpc_message *msg, struct inode *dir)
{
- struct nfs_server *server = NFS_SERVER(dir->d_inode);
- struct unlink_desc *up;
+ struct nfs_server *server = NFS_SERVER(dir);
+ struct nfs_removeargs *args = msg->rpc_argp;
+ struct nfs_removeres *res = msg->rpc_resp;
- up = kmalloc(sizeof(*up), GFP_KERNEL);
- if (!up)
- return -ENOMEM;
-
- up->args.fh = NFS_FH(dir->d_inode);
- up->args.name = name;
- up->args.bitmask = server->attr_bitmask;
- up->res.server = server;
- up->res.dir_attr = &up->dir_attr;
-
+ args->bitmask = server->attr_bitmask;
+ res->server = server;
msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE];
- msg->rpc_argp = &up->args;
- msg->rpc_resp = &up->res;
- return 0;
}
-static int nfs4_proc_unlink_done(struct dentry *dir, struct rpc_task *task)
+static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir)
{
- struct rpc_message *msg = &task->tk_msg;
- struct unlink_desc *up;
-
- if (msg->rpc_resp != NULL) {
- up = container_of(msg->rpc_resp, struct unlink_desc, res);
- update_changeattr(dir->d_inode, &up->res.cinfo);
- nfs_post_op_update_inode(dir->d_inode, up->res.dir_attr);
- kfree(up);
- msg->rpc_resp = NULL;
- msg->rpc_argp = NULL;
- }
- return 0;
+ struct nfs_removeres *res = task->tk_msg.rpc_resp;
+
+ if (nfs4_async_handle_error(task, res->server) == -EAGAIN)
+ return 0;
+ update_changeattr(dir, &res->cinfo);
+ nfs_post_op_update_inode(dir, &res->dir_attr);
+ return 1;
}
static int _nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name,
@@ -3672,7 +3664,7 @@ ssize_t nfs4_listxattr(struct dentry *dentry, char *buf, size_t buflen)
return len;
}
-int nfs4_proc_fs_locations(struct inode *dir, struct qstr *name,
+int nfs4_proc_fs_locations(struct inode *dir, const struct qstr *name,
struct nfs4_fs_locations *fs_locations, struct page *page)
{
struct nfs_server *server = NFS_SERVER(dir);
diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c
index 0505ca1..3ea352d 100644
--- a/fs/nfs/nfs4renewd.c
+++ b/fs/nfs/nfs4renewd.c
@@ -127,16 +127,15 @@ nfs4_schedule_state_renewal(struct nfs_client *clp)
void
nfs4_renewd_prepare_shutdown(struct nfs_server *server)
{
- flush_scheduled_work();
+ cancel_delayed_work(&server->nfs_client->cl_renewd);
}
void
nfs4_kill_renewd(struct nfs_client *clp)
{
down_read(&clp->cl_sem);
- cancel_delayed_work(&clp->cl_renewd);
+ cancel_delayed_work_sync(&clp->cl_renewd);
up_read(&clp->cl_sem);
- flush_scheduled_work();
}
/*
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index e9662ba..3e4adf8 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -341,8 +341,6 @@ nfs4_state_set_mode_locked(struct nfs4_state *state, mode_t mode)
else
list_move_tail(&state->open_states, &state->owner->so_states);
}
- if (mode == 0)
- list_del_init(&state->inode_states);
state->state = mode;
}
@@ -415,8 +413,7 @@ void nfs4_put_open_state(struct nfs4_state *state)
if (!atomic_dec_and_lock(&state->count, &owner->so_lock))
return;
spin_lock(&inode->i_lock);
- if (!list_empty(&state->inode_states))
- list_del(&state->inode_states);
+ list_del(&state->inode_states);
list_del(&state->open_states);
spin_unlock(&inode->i_lock);
spin_unlock(&owner->so_lock);
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index c087384..badd73b 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -72,10 +72,15 @@ static int nfs4_stat_to_errno(int);
*/
#define open_owner_id_maxsz (1 + 4)
#define lock_owner_id_maxsz (1 + 4)
+#define decode_lockowner_maxsz (1 + XDR_QUADLEN(IDMAP_NAMESZ))
#define compound_encode_hdr_maxsz (3 + (NFS4_MAXTAGLEN >> 2))
#define compound_decode_hdr_maxsz (3 + (NFS4_MAXTAGLEN >> 2))
#define op_encode_hdr_maxsz (1)
#define op_decode_hdr_maxsz (2)
+#define encode_stateid_maxsz (XDR_QUADLEN(NFS4_STATEID_SIZE))
+#define decode_stateid_maxsz (XDR_QUADLEN(NFS4_STATEID_SIZE))
+#define encode_verifier_maxsz (XDR_QUADLEN(NFS4_VERIFIER_SIZE))
+#define decode_verifier_maxsz (XDR_QUADLEN(NFS4_VERIFIER_SIZE))
#define encode_putfh_maxsz (op_encode_hdr_maxsz + 1 + \
(NFS4_FHSIZE >> 2))
#define decode_putfh_maxsz (op_decode_hdr_maxsz)
@@ -96,6 +101,11 @@ static int nfs4_stat_to_errno(int);
#define nfs4_fattr_maxsz (nfs4_fattr_bitmap_maxsz + \
nfs4_fattr_value_maxsz)
#define decode_getattr_maxsz (op_decode_hdr_maxsz + nfs4_fattr_maxsz)
+#define encode_attrs_maxsz (nfs4_fattr_bitmap_maxsz + \
+ 1 + 2 + 1 + \
+ nfs4_owner_maxsz + \
+ nfs4_group_maxsz + \
+ 4 + 4)
#define encode_savefh_maxsz (op_encode_hdr_maxsz)
#define decode_savefh_maxsz (op_decode_hdr_maxsz)
#define encode_restorefh_maxsz (op_encode_hdr_maxsz)
@@ -123,7 +133,7 @@ static int nfs4_stat_to_errno(int);
#define decode_lookup_maxsz (op_decode_hdr_maxsz)
#define encode_share_access_maxsz \
(2)
-#define encode_createmode_maxsz (1 + nfs4_fattr_maxsz)
+#define encode_createmode_maxsz (1 + encode_attrs_maxsz)
#define encode_opentype_maxsz (1 + encode_createmode_maxsz)
#define encode_claim_null_maxsz (1 + nfs4_name_maxsz)
#define encode_open_maxsz (op_encode_hdr_maxsz + \
@@ -132,14 +142,52 @@ static int nfs4_stat_to_errno(int);
encode_opentype_maxsz + \
encode_claim_null_maxsz)
#define decode_ace_maxsz (3 + nfs4_owner_maxsz)
-#define decode_delegation_maxsz (1 + XDR_QUADLEN(NFS4_STATEID_SIZE) + 1 + \
+#define decode_delegation_maxsz (1 + decode_stateid_maxsz + 1 + \
decode_ace_maxsz)
#define decode_change_info_maxsz (5)
#define decode_open_maxsz (op_decode_hdr_maxsz + \
- XDR_QUADLEN(NFS4_STATEID_SIZE) + \
+ decode_stateid_maxsz + \
decode_change_info_maxsz + 1 + \
nfs4_fattr_bitmap_maxsz + \
decode_delegation_maxsz)
+#define encode_open_confirm_maxsz \
+ (op_encode_hdr_maxsz + \
+ encode_stateid_maxsz + 1)
+#define decode_open_confirm_maxsz \
+ (op_decode_hdr_maxsz + \
+ decode_stateid_maxsz)
+#define encode_open_downgrade_maxsz \
+ (op_encode_hdr_maxsz + \
+ encode_stateid_maxsz + 1 + \
+ encode_share_access_maxsz)
+#define decode_open_downgrade_maxsz \
+ (op_decode_hdr_maxsz + \
+ decode_stateid_maxsz)
+#define encode_close_maxsz (op_encode_hdr_maxsz + \
+ 1 + encode_stateid_maxsz)
+#define decode_close_maxsz (op_decode_hdr_maxsz + \
+ decode_stateid_maxsz)
+#define encode_setattr_maxsz (op_encode_hdr_maxsz + \
+ encode_stateid_maxsz + \
+ encode_attrs_maxsz)
+#define decode_setattr_maxsz (op_decode_hdr_maxsz + \
+ nfs4_fattr_bitmap_maxsz)
+#define encode_read_maxsz (op_encode_hdr_maxsz + \
+ encode_stateid_maxsz + 3)
+#define decode_read_maxsz (op_decode_hdr_maxsz + 2)
+#define encode_readdir_maxsz (op_encode_hdr_maxsz + \
+ 2 + encode_verifier_maxsz + 5)
+#define decode_readdir_maxsz (op_decode_hdr_maxsz + \
+ decode_verifier_maxsz)
+#define encode_readlink_maxsz (op_encode_hdr_maxsz)
+#define decode_readlink_maxsz (op_decode_hdr_maxsz + 1)
+#define encode_write_maxsz (op_encode_hdr_maxsz + \
+ encode_stateid_maxsz + 4)
+#define decode_write_maxsz (op_decode_hdr_maxsz + \
+ 2 + decode_verifier_maxsz)
+#define encode_commit_maxsz (op_encode_hdr_maxsz + 3)
+#define decode_commit_maxsz (op_decode_hdr_maxsz + \
+ decode_verifier_maxsz)
#define encode_remove_maxsz (op_encode_hdr_maxsz + \
nfs4_name_maxsz)
#define encode_rename_maxsz (op_encode_hdr_maxsz + \
@@ -148,19 +196,44 @@ static int nfs4_stat_to_errno(int);
#define encode_link_maxsz (op_encode_hdr_maxsz + \
nfs4_name_maxsz)
#define decode_link_maxsz (op_decode_hdr_maxsz + 5)
+#define encode_lock_maxsz (op_encode_hdr_maxsz + \
+ 7 + \
+ 1 + encode_stateid_maxsz + 8)
+#define decode_lock_denied_maxsz \
+ (8 + decode_lockowner_maxsz)
+#define decode_lock_maxsz (op_decode_hdr_maxsz + \
+ decode_lock_denied_maxsz)
+#define encode_lockt_maxsz (op_encode_hdr_maxsz + 12)
+#define decode_lockt_maxsz (op_decode_hdr_maxsz + \
+ decode_lock_denied_maxsz)
+#define encode_locku_maxsz (op_encode_hdr_maxsz + 3 + \
+ encode_stateid_maxsz + \
+ 4)
+#define decode_locku_maxsz (op_decode_hdr_maxsz + \
+ decode_stateid_maxsz)
+#define encode_access_maxsz (op_encode_hdr_maxsz + 1)
+#define decode_access_maxsz (op_decode_hdr_maxsz + 2)
#define encode_symlink_maxsz (op_encode_hdr_maxsz + \
1 + nfs4_name_maxsz + \
1 + \
nfs4_fattr_maxsz)
#define decode_symlink_maxsz (op_decode_hdr_maxsz + 8)
#define encode_create_maxsz (op_encode_hdr_maxsz + \
- 2 + nfs4_name_maxsz + \
- nfs4_fattr_maxsz)
+ 1 + 2 + nfs4_name_maxsz + \
+ encode_attrs_maxsz)
#define decode_create_maxsz (op_decode_hdr_maxsz + \
decode_change_info_maxsz + \
nfs4_fattr_bitmap_maxsz)
+#define encode_statfs_maxsz (encode_getattr_maxsz)
+#define decode_statfs_maxsz (decode_getattr_maxsz)
#define encode_delegreturn_maxsz (op_encode_hdr_maxsz + 4)
#define decode_delegreturn_maxsz (op_decode_hdr_maxsz)
+#define encode_getacl_maxsz (encode_getattr_maxsz)
+#define decode_getacl_maxsz (op_decode_hdr_maxsz + \
+ nfs4_fattr_bitmap_maxsz + 1)
+#define encode_setacl_maxsz (op_encode_hdr_maxsz + \
+ encode_stateid_maxsz + 3)
+#define decode_setacl_maxsz (decode_setattr_maxsz)
#define encode_fs_locations_maxsz \
(encode_getattr_maxsz)
#define decode_fs_locations_maxsz \
@@ -169,37 +242,37 @@ static int nfs4_stat_to_errno(int);
#define NFS4_dec_compound_sz (1024) /* XXX: large enough? */
#define NFS4_enc_read_sz (compound_encode_hdr_maxsz + \
encode_putfh_maxsz + \
- op_encode_hdr_maxsz + 7)
+ encode_read_maxsz)
#define NFS4_dec_read_sz (compound_decode_hdr_maxsz + \
decode_putfh_maxsz + \
- op_decode_hdr_maxsz + 2)
+ decode_read_maxsz)
#define NFS4_enc_readlink_sz (compound_encode_hdr_maxsz + \
encode_putfh_maxsz + \
- op_encode_hdr_maxsz)
+ encode_readlink_maxsz)
#define NFS4_dec_readlink_sz (compound_decode_hdr_maxsz + \
decode_putfh_maxsz + \
- op_decode_hdr_maxsz)
+ decode_readlink_maxsz)
#define NFS4_enc_readdir_sz (compound_encode_hdr_maxsz + \
encode_putfh_maxsz + \
- op_encode_hdr_maxsz + 9)
+ encode_readdir_maxsz)
#define NFS4_dec_readdir_sz (compound_decode_hdr_maxsz + \
decode_putfh_maxsz + \
- op_decode_hdr_maxsz + 2)
+ decode_readdir_maxsz)
#define NFS4_enc_write_sz (compound_encode_hdr_maxsz + \
encode_putfh_maxsz + \
- op_encode_hdr_maxsz + 8 + \
+ encode_write_maxsz + \
encode_getattr_maxsz)
#define NFS4_dec_write_sz (compound_decode_hdr_maxsz + \
decode_putfh_maxsz + \
- op_decode_hdr_maxsz + 4 + \
+ decode_write_maxsz + \
decode_getattr_maxsz)
#define NFS4_enc_commit_sz (compound_encode_hdr_maxsz + \
encode_putfh_maxsz + \
- op_encode_hdr_maxsz + 3 + \
+ encode_commit_maxsz + \
encode_getattr_maxsz)
#define NFS4_dec_commit_sz (compound_decode_hdr_maxsz + \
decode_putfh_maxsz + \
- op_decode_hdr_maxsz + 2 + \
+ decode_commit_maxsz + \
decode_getattr_maxsz)
#define NFS4_enc_open_sz (compound_encode_hdr_maxsz + \
encode_putfh_maxsz + \
@@ -217,13 +290,14 @@ static int nfs4_stat_to_errno(int);
decode_getattr_maxsz + \
decode_restorefh_maxsz + \
decode_getattr_maxsz)
-#define NFS4_enc_open_confirm_sz \
- (compound_encode_hdr_maxsz + \
- encode_putfh_maxsz + \
- op_encode_hdr_maxsz + 5)
-#define NFS4_dec_open_confirm_sz (compound_decode_hdr_maxsz + \
- decode_putfh_maxsz + \
- op_decode_hdr_maxsz + 4)
+#define NFS4_enc_open_confirm_sz \
+ (compound_encode_hdr_maxsz + \
+ encode_putfh_maxsz + \
+ encode_open_confirm_maxsz)
+#define NFS4_dec_open_confirm_sz \
+ (compound_decode_hdr_maxsz + \
+ decode_putfh_maxsz + \
+ decode_open_confirm_maxsz)
#define NFS4_enc_open_noattr_sz (compound_encode_hdr_maxsz + \
encode_putfh_maxsz + \
encode_open_maxsz + \
@@ -234,31 +308,30 @@ static int nfs4_stat_to_errno(int);
decode_getattr_maxsz)
#define NFS4_enc_open_downgrade_sz \
(compound_encode_hdr_maxsz + \
- encode_putfh_maxsz + \
- op_encode_hdr_maxsz + 7 + \
- encode_getattr_maxsz)
+ encode_putfh_maxsz + \
+ encode_open_downgrade_maxsz + \
+ encode_getattr_maxsz)
#define NFS4_dec_open_downgrade_sz \
(compound_decode_hdr_maxsz + \
- decode_putfh_maxsz + \
- op_decode_hdr_maxsz + 4 + \
- decode_getattr_maxsz)
-#define NFS4_enc_close_sz (compound_encode_hdr_maxsz + \
- encode_putfh_maxsz + \
- op_encode_hdr_maxsz + 5 + \
- encode_getattr_maxsz)
-#define NFS4_dec_close_sz (compound_decode_hdr_maxsz + \
- decode_putfh_maxsz + \
- op_decode_hdr_maxsz + 4 + \
- decode_getattr_maxsz)
-#define NFS4_enc_setattr_sz (compound_encode_hdr_maxsz + \
- encode_putfh_maxsz + \
- op_encode_hdr_maxsz + 4 + \
- nfs4_fattr_maxsz + \
- encode_getattr_maxsz)
-#define NFS4_dec_setattr_sz (compound_decode_hdr_maxsz + \
- decode_putfh_maxsz + \
- op_decode_hdr_maxsz + 3 + \
- nfs4_fattr_maxsz)
+ decode_putfh_maxsz + \
+ decode_open_downgrade_maxsz + \
+ decode_getattr_maxsz)
+#define NFS4_enc_close_sz (compound_encode_hdr_maxsz + \
+ encode_putfh_maxsz + \
+ encode_close_maxsz + \
+ encode_getattr_maxsz)
+#define NFS4_dec_close_sz (compound_decode_hdr_maxsz + \
+ decode_putfh_maxsz + \
+ decode_close_maxsz + \
+ decode_getattr_maxsz)
+#define NFS4_enc_setattr_sz (compound_encode_hdr_maxsz + \
+ encode_putfh_maxsz + \
+ encode_setattr_maxsz + \
+ encode_getattr_maxsz)
+#define NFS4_dec_setattr_sz (compound_decode_hdr_maxsz + \
+ decode_putfh_maxsz + \
+ decode_setattr_maxsz + \
+ decode_getattr_maxsz)
#define NFS4_enc_fsinfo_sz (compound_encode_hdr_maxsz + \
encode_putfh_maxsz + \
encode_fsinfo_maxsz)
@@ -285,39 +358,28 @@ static int nfs4_stat_to_errno(int);
decode_fsinfo_maxsz)
#define NFS4_enc_lock_sz (compound_encode_hdr_maxsz + \
encode_putfh_maxsz + \
- encode_getattr_maxsz + \
- op_encode_hdr_maxsz + \
- 1 + 1 + 2 + 2 + \
- 1 + 4 + 1 + 2 + \
- lock_owner_id_maxsz)
+ encode_lock_maxsz)
#define NFS4_dec_lock_sz (compound_decode_hdr_maxsz + \
decode_putfh_maxsz + \
- decode_getattr_maxsz + \
- op_decode_hdr_maxsz + \
- 2 + 2 + 1 + 2 + \
- lock_owner_id_maxsz)
+ decode_lock_maxsz)
#define NFS4_enc_lockt_sz (compound_encode_hdr_maxsz + \
encode_putfh_maxsz + \
- encode_getattr_maxsz + \
- op_encode_hdr_maxsz + \
- 1 + 2 + 2 + 2 + \
- lock_owner_id_maxsz)
-#define NFS4_dec_lockt_sz (NFS4_dec_lock_sz)
+ encode_lockt_maxsz)
+#define NFS4_dec_lockt_sz (compound_decode_hdr_maxsz + \
+ decode_putfh_maxsz + \
+ decode_lockt_maxsz)
#define NFS4_enc_locku_sz (compound_encode_hdr_maxsz + \
encode_putfh_maxsz + \
- encode_getattr_maxsz + \
- op_encode_hdr_maxsz + \
- 1 + 1 + 4 + 2 + 2)
+ encode_locku_maxsz)
#define NFS4_dec_locku_sz (compound_decode_hdr_maxsz + \
decode_putfh_maxsz + \
- decode_getattr_maxsz + \
- op_decode_hdr_maxsz + 4)
+ decode_locku_maxsz)
#define NFS4_enc_access_sz (compound_encode_hdr_maxsz + \
encode_putfh_maxsz + \
- op_encode_hdr_maxsz + 1)
+ encode_access_maxsz)
#define NFS4_dec_access_sz (compound_decode_hdr_maxsz + \
decode_putfh_maxsz + \
- op_decode_hdr_maxsz + 2)
+ decode_access_maxsz)
#define NFS4_enc_getattr_sz (compound_encode_hdr_maxsz + \
encode_putfh_maxsz + \
encode_getattr_maxsz)
@@ -416,10 +478,10 @@ static int nfs4_stat_to_errno(int);
decode_getattr_maxsz)
#define NFS4_enc_statfs_sz (compound_encode_hdr_maxsz + \
encode_putfh_maxsz + \
- encode_getattr_maxsz)
+ encode_statfs_maxsz)
#define NFS4_dec_statfs_sz (compound_decode_hdr_maxsz + \
decode_putfh_maxsz + \
- op_decode_hdr_maxsz + 12)
+ decode_statfs_maxsz)
#define NFS4_enc_server_caps_sz (compound_encode_hdr_maxsz + \
encode_putfh_maxsz + \
encode_getattr_maxsz)
@@ -435,18 +497,16 @@ static int nfs4_stat_to_errno(int);
decode_getattr_maxsz)
#define NFS4_enc_getacl_sz (compound_encode_hdr_maxsz + \
encode_putfh_maxsz + \
- encode_getattr_maxsz)
+ encode_getacl_maxsz)
#define NFS4_dec_getacl_sz (compound_decode_hdr_maxsz + \
decode_putfh_maxsz + \
- op_decode_hdr_maxsz + \
- nfs4_fattr_bitmap_maxsz + 1)
+ decode_getacl_maxsz)
#define NFS4_enc_setacl_sz (compound_encode_hdr_maxsz + \
encode_putfh_maxsz + \
- op_encode_hdr_maxsz + 4 + \
- nfs4_fattr_bitmap_maxsz + 1)
+ encode_setacl_maxsz)
#define NFS4_dec_setacl_sz (compound_decode_hdr_maxsz + \
decode_putfh_maxsz + \
- op_decode_hdr_maxsz + nfs4_fattr_bitmap_maxsz)
+ decode_setacl_maxsz)
#define NFS4_enc_fs_locations_sz \
(compound_encode_hdr_maxsz + \
encode_putfh_maxsz + \
@@ -1108,12 +1168,10 @@ static int encode_read(struct xdr_stream *xdr, const struct nfs_readargs *args)
static int encode_readdir(struct xdr_stream *xdr, const struct nfs4_readdir_arg *readdir, struct rpc_rqst *req)
{
- struct rpc_auth *auth = req->rq_task->tk_msg.rpc_cred->cr_auth;
uint32_t attrs[2] = {
FATTR4_WORD0_RDATTR_ERROR|FATTR4_WORD0_FILEID,
FATTR4_WORD1_MOUNTED_ON_FILEID,
};
- int replen;
__be32 *p;
RESERVE_SPACE(12+NFS4_VERIFIER_SIZE+20);
@@ -1138,37 +1196,16 @@ static int encode_readdir(struct xdr_stream *xdr, const struct nfs4_readdir_arg
attrs[0] & readdir->bitmask[0],
attrs[1] & readdir->bitmask[1]);
- /* set up reply kvec
- * toplevel_status + taglen + rescount + OP_PUTFH + status
- * + OP_READDIR + status + verifer(2) = 9
- */
- replen = (RPC_REPHDRSIZE + auth->au_rslack + 9) << 2;
- xdr_inline_pages(&req->rq_rcv_buf, replen, readdir->pages,
- readdir->pgbase, readdir->count);
- dprintk("%s: inlined page args = (%u, %p, %u, %u)\n",
- __FUNCTION__, replen, readdir->pages,
- readdir->pgbase, readdir->count);
-
return 0;
}
static int encode_readlink(struct xdr_stream *xdr, const struct nfs4_readlink *readlink, struct rpc_rqst *req)
{
- struct rpc_auth *auth = req->rq_task->tk_msg.rpc_cred->cr_auth;
- unsigned int replen;
__be32 *p;
RESERVE_SPACE(4);
WRITE32(OP_READLINK);
- /* set up reply kvec
- * toplevel_status + taglen + rescount + OP_PUTFH + status
- * + OP_READLINK + status + string length = 8
- */
- replen = (RPC_REPHDRSIZE + auth->au_rslack + 8) << 2;
- xdr_inline_pages(&req->rq_rcv_buf, replen, readlink->pages,
- readlink->pgbase, readlink->pglen);
-
return 0;
}
@@ -1398,7 +1435,7 @@ out:
/*
* Encode REMOVE request
*/
-static int nfs4_xdr_enc_remove(struct rpc_rqst *req, __be32 *p, const struct nfs4_remove_arg *args)
+static int nfs4_xdr_enc_remove(struct rpc_rqst *req, __be32 *p, const struct nfs_removeargs *args)
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
@@ -1410,7 +1447,7 @@ static int nfs4_xdr_enc_remove(struct rpc_rqst *req, __be32 *p, const struct nfs
encode_compound_hdr(&xdr, &hdr);
if ((status = encode_putfh(&xdr, args->fh)) != 0)
goto out;
- if ((status = encode_remove(&xdr, args->name)) != 0)
+ if ((status = encode_remove(&xdr, &args->name)) != 0)
goto out;
status = encode_getfattr(&xdr, args->bitmask);
out:
@@ -1734,6 +1771,8 @@ static int nfs4_xdr_enc_readlink(struct rpc_rqst *req, __be32 *p, const struct n
struct compound_hdr hdr = {
.nops = 2,
};
+ struct rpc_auth *auth = req->rq_task->tk_msg.rpc_cred->cr_auth;
+ unsigned int replen;
int status;
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
@@ -1742,6 +1781,15 @@ static int nfs4_xdr_enc_readlink(struct rpc_rqst *req, __be32 *p, const struct n
if(status)
goto out;
status = encode_readlink(&xdr, args, req);
+
+ /* set up reply kvec
+ * toplevel_status + taglen + rescount + OP_PUTFH + status
+ * + OP_READLINK + status + string length = 8
+ */
+ replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS4_dec_readlink_sz) << 2;
+ xdr_inline_pages(&req->rq_rcv_buf, replen, args->pages,
+ args->pgbase, args->pglen);
+
out:
return status;
}
@@ -1755,6 +1803,8 @@ static int nfs4_xdr_enc_readdir(struct rpc_rqst *req, __be32 *p, const struct nf
struct compound_hdr hdr = {
.nops = 2,
};
+ struct rpc_auth *auth = req->rq_task->tk_msg.rpc_cred->cr_auth;
+ int replen;
int status;
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
@@ -1763,6 +1813,18 @@ static int nfs4_xdr_enc_readdir(struct rpc_rqst *req, __be32 *p, const struct nf
if(status)
goto out;
status = encode_readdir(&xdr, args, req);
+
+ /* set up reply kvec
+ * toplevel_status + taglen + rescount + OP_PUTFH + status
+ * + OP_READDIR + status + verifer(2) = 9
+ */
+ replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS4_dec_readdir_sz) << 2;
+ xdr_inline_pages(&req->rq_rcv_buf, replen, args->pages,
+ args->pgbase, args->count);
+ dprintk("%s: inlined page args = (%u, %p, %u, %u)\n",
+ __FUNCTION__, replen, args->pages,
+ args->pgbase, args->count);
+
out:
return status;
}
@@ -3161,11 +3223,12 @@ static int decode_getfh(struct xdr_stream *xdr, struct nfs_fh *fh)
uint32_t len;
int status;
+ /* Zero handle first to allow comparisons */
+ memset(fh, 0, sizeof(*fh));
+
status = decode_op_hdr(xdr, OP_GETFH);
if (status)
return status;
- /* Zero handle first to allow comparisons */
- memset(fh, 0, sizeof(*fh));
READ_BUF(4);
READ32(len);
@@ -3772,7 +3835,7 @@ out:
/*
* Decode REMOVE response
*/
-static int nfs4_xdr_dec_remove(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_remove_res *res)
+static int nfs4_xdr_dec_remove(struct rpc_rqst *rqstp, __be32 *p, struct nfs_removeres *res)
{
struct xdr_stream xdr;
struct compound_hdr hdr;
@@ -3785,7 +3848,7 @@ static int nfs4_xdr_dec_remove(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_re
goto out;
if ((status = decode_remove(&xdr, &res->cinfo)) != 0)
goto out;
- decode_getfattr(&xdr, res->dir_attr, res->server);
+ decode_getfattr(&xdr, &res->dir_attr, res->server);
out:
return status;
}
@@ -4030,12 +4093,11 @@ static int nfs4_xdr_dec_open(struct rpc_rqst *rqstp, __be32 *p, struct nfs_openr
status = decode_open(&xdr, res);
if (status)
goto out;
- status = decode_getfh(&xdr, &res->fh);
- if (status)
+ if (decode_getfh(&xdr, &res->fh) != 0)
goto out;
if (decode_getfattr(&xdr, res->f_attr, res->server) != 0)
goto out;
- if ((status = decode_restorefh(&xdr)) != 0)
+ if (decode_restorefh(&xdr) != 0)
goto out;
decode_getfattr(&xdr, res->dir_attr, res->server);
out:
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index f56dae5..345bb9b 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -442,7 +442,7 @@ int __init nfs_init_nfspagecache(void)
nfs_page_cachep = kmem_cache_create("nfs_page",
sizeof(struct nfs_page),
0, SLAB_HWCACHE_ALIGN,
- NULL, NULL);
+ NULL);
if (nfs_page_cachep == NULL)
return -ENOMEM;
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
index 7be0ee2..845cdde 100644
--- a/fs/nfs/proc.c
+++ b/fs/nfs/proc.c
@@ -272,14 +272,14 @@ nfs_proc_mknod(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
static int
nfs_proc_remove(struct inode *dir, struct qstr *name)
{
- struct nfs_diropargs arg = {
- .fh = NFS_FH(dir),
- .name = name->name,
- .len = name->len
+ struct nfs_removeargs arg = {
+ .fh = NFS_FH(dir),
+ .name.len = name->len,
+ .name.name = name->name,
};
- struct rpc_message msg = {
- .rpc_proc = &nfs_procedures[NFSPROC_REMOVE],
- .rpc_argp = &arg,
+ struct rpc_message msg = {
+ .rpc_proc = &nfs_procedures[NFSPROC_REMOVE],
+ .rpc_argp = &arg,
};
int status;
@@ -291,32 +291,16 @@ nfs_proc_remove(struct inode *dir, struct qstr *name)
return status;
}
-static int
-nfs_proc_unlink_setup(struct rpc_message *msg, struct dentry *dir, struct qstr *name)
+static void
+nfs_proc_unlink_setup(struct rpc_message *msg, struct inode *dir)
{
- struct nfs_diropargs *arg;
-
- arg = kmalloc(sizeof(*arg), GFP_KERNEL);
- if (!arg)
- return -ENOMEM;
- arg->fh = NFS_FH(dir->d_inode);
- arg->name = name->name;
- arg->len = name->len;
msg->rpc_proc = &nfs_procedures[NFSPROC_REMOVE];
- msg->rpc_argp = arg;
- return 0;
}
-static int
-nfs_proc_unlink_done(struct dentry *dir, struct rpc_task *task)
+static int nfs_proc_unlink_done(struct rpc_task *task, struct inode *dir)
{
- struct rpc_message *msg = &task->tk_msg;
-
- if (msg->rpc_argp) {
- nfs_mark_for_revalidate(dir->d_inode);
- kfree(msg->rpc_argp);
- }
- return 0;
+ nfs_mark_for_revalidate(dir);
+ return 1;
}
static int
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index 6ae2e58..19e0563 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -598,7 +598,7 @@ int __init nfs_init_readpagecache(void)
nfs_rdata_cachep = kmem_cache_create("nfs_read_data",
sizeof(struct nfs_read_data),
0, SLAB_HWCACHE_ALIGN,
- NULL, NULL);
+ NULL);
if (nfs_rdata_cachep == NULL)
return -ENOMEM;
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index adffe16..b878528 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -345,8 +345,8 @@ void __exit unregister_nfs_fs(void)
unregister_shrinker(&acl_shrinker);
#ifdef CONFIG_NFS_V4
unregister_filesystem(&nfs4_fs_type);
- nfs_unregister_sysctl();
#endif
+ nfs_unregister_sysctl();
unregister_filesystem(&nfs_fs_type);
}
@@ -732,7 +732,7 @@ static int nfs_parse_mount_options(char *raw,
return 0;
if (option < 0 || option > 65535)
return 0;
- mnt->nfs_server.address.sin_port = htonl(option);
+ mnt->nfs_server.address.sin_port = htons(option);
break;
case Opt_rsize:
if (match_int(args, &mnt->rsize))
@@ -911,13 +911,13 @@ static int nfs_parse_mount_options(char *raw,
kfree(string);
switch (token) {
- case Opt_udp:
+ case Opt_xprt_udp:
mnt->flags &= ~NFS_MOUNT_TCP;
mnt->nfs_server.protocol = IPPROTO_UDP;
mnt->timeo = 7;
mnt->retrans = 5;
break;
- case Opt_tcp:
+ case Opt_xprt_tcp:
mnt->flags |= NFS_MOUNT_TCP;
mnt->nfs_server.protocol = IPPROTO_TCP;
mnt->timeo = 600;
@@ -936,10 +936,10 @@ static int nfs_parse_mount_options(char *raw,
kfree(string);
switch (token) {
- case Opt_udp:
+ case Opt_xprt_udp:
mnt->mount_server.protocol = IPPROTO_UDP;
break;
- case Opt_tcp:
+ case Opt_xprt_tcp:
mnt->mount_server.protocol = IPPROTO_TCP;
break;
default:
@@ -1153,20 +1153,20 @@ static int nfs_validate_mount_data(struct nfs_mount_data **options,
c = strchr(dev_name, ':');
if (c == NULL)
return -EINVAL;
- len = c - dev_name - 1;
+ len = c - dev_name;
if (len > sizeof(data->hostname))
- return -EINVAL;
+ return -ENAMETOOLONG;
strncpy(data->hostname, dev_name, len);
args.nfs_server.hostname = data->hostname;
c++;
if (strlen(c) > NFS_MAXPATHLEN)
- return -EINVAL;
+ return -ENAMETOOLONG;
args.nfs_server.export_path = c;
status = nfs_try_mount(&args, mntfh);
if (status)
- return -EINVAL;
+ return status;
/*
* Translate to nfs_mount_data, which nfs_fill_super
@@ -1303,34 +1303,6 @@ static void nfs_clone_super(struct super_block *sb,
nfs_initialise_sb(sb);
}
-static int nfs_set_super(struct super_block *s, void *_server)
-{
- struct nfs_server *server = _server;
- int ret;
-
- s->s_fs_info = server;
- ret = set_anon_super(s, server);
- if (ret == 0)
- server->s_dev = s->s_dev;
- return ret;
-}
-
-static int nfs_compare_super(struct super_block *sb, void *data)
-{
- struct nfs_server *server = data, *old = NFS_SB(sb);
-
- if (memcmp(&old->nfs_client->cl_addr,
- &server->nfs_client->cl_addr,
- sizeof(old->nfs_client->cl_addr)) != 0)
- return 0;
- /* Note: NFS_MOUNT_UNSHARED == NFS4_MOUNT_UNSHARED */
- if (old->flags & NFS_MOUNT_UNSHARED)
- return 0;
- if (memcmp(&old->fsid, &server->fsid, sizeof(old->fsid)) != 0)
- return 0;
- return 1;
-}
-
#define NFS_MS_MASK (MS_RDONLY|MS_NOSUID|MS_NODEV|MS_NOEXEC|MS_SYNCHRONOUS)
static int nfs_compare_mount_options(const struct super_block *s, const struct nfs_server *b, int flags)
@@ -1359,9 +1331,46 @@ static int nfs_compare_mount_options(const struct super_block *s, const struct n
goto Ebusy;
if (clnt_a->cl_auth->au_flavor != clnt_b->cl_auth->au_flavor)
goto Ebusy;
- return 0;
+ return 1;
Ebusy:
- return -EBUSY;
+ return 0;
+}
+
+struct nfs_sb_mountdata {
+ struct nfs_server *server;
+ int mntflags;
+};
+
+static int nfs_set_super(struct super_block *s, void *data)
+{
+ struct nfs_sb_mountdata *sb_mntdata = data;
+ struct nfs_server *server = sb_mntdata->server;
+ int ret;
+
+ s->s_flags = sb_mntdata->mntflags;
+ s->s_fs_info = server;
+ ret = set_anon_super(s, server);
+ if (ret == 0)
+ server->s_dev = s->s_dev;
+ return ret;
+}
+
+static int nfs_compare_super(struct super_block *sb, void *data)
+{
+ struct nfs_sb_mountdata *sb_mntdata = data;
+ struct nfs_server *server = sb_mntdata->server, *old = NFS_SB(sb);
+ int mntflags = sb_mntdata->mntflags;
+
+ if (memcmp(&old->nfs_client->cl_addr,
+ &server->nfs_client->cl_addr,
+ sizeof(old->nfs_client->cl_addr)) != 0)
+ return 0;
+ /* Note: NFS_MOUNT_UNSHARED == NFS4_MOUNT_UNSHARED */
+ if (old->flags & NFS_MOUNT_UNSHARED)
+ return 0;
+ if (memcmp(&old->fsid, &server->fsid, sizeof(old->fsid)) != 0)
+ return 0;
+ return nfs_compare_mount_options(sb, server, mntflags);
}
static int nfs_get_sb(struct file_system_type *fs_type,
@@ -1373,6 +1382,9 @@ static int nfs_get_sb(struct file_system_type *fs_type,
struct nfs_mount_data *data = raw_data;
struct dentry *mntroot;
int (*compare_super)(struct super_block *, void *) = nfs_compare_super;
+ struct nfs_sb_mountdata sb_mntdata = {
+ .mntflags = flags,
+ };
int error;
/* Validate the mount data */
@@ -1386,28 +1398,25 @@ static int nfs_get_sb(struct file_system_type *fs_type,
error = PTR_ERR(server);
goto out;
}
+ sb_mntdata.server = server;
if (server->flags & NFS_MOUNT_UNSHARED)
compare_super = NULL;
/* Get a superblock - note that we may end up sharing one that already exists */
- s = sget(fs_type, compare_super, nfs_set_super, server);
+ s = sget(fs_type, compare_super, nfs_set_super, &sb_mntdata);
if (IS_ERR(s)) {
error = PTR_ERR(s);
goto out_err_nosb;
}
if (s->s_fs_info != server) {
- error = nfs_compare_mount_options(s, server, flags);
nfs_free_server(server);
server = NULL;
- if (error < 0)
- goto error_splat_super;
}
if (!s->s_root) {
/* initial superblock/root creation */
- s->s_flags = flags;
nfs_fill_super(s, data);
}
@@ -1460,6 +1469,9 @@ static int nfs_xdev_get_sb(struct file_system_type *fs_type, int flags,
struct nfs_server *server;
struct dentry *mntroot;
int (*compare_super)(struct super_block *, void *) = nfs_compare_super;
+ struct nfs_sb_mountdata sb_mntdata = {
+ .mntflags = flags,
+ };
int error;
dprintk("--> nfs_xdev_get_sb()\n");
@@ -1470,28 +1482,25 @@ static int nfs_xdev_get_sb(struct file_system_type *fs_type, int flags,
error = PTR_ERR(server);
goto out_err_noserver;
}
+ sb_mntdata.server = server;
if (server->flags & NFS_MOUNT_UNSHARED)
compare_super = NULL;
/* Get a superblock - note that we may end up sharing one that already exists */
- s = sget(&nfs_fs_type, compare_super, nfs_set_super, server);
+ s = sget(&nfs_fs_type, compare_super, nfs_set_super, &sb_mntdata);
if (IS_ERR(s)) {
error = PTR_ERR(s);
goto out_err_nosb;
}
if (s->s_fs_info != server) {
- error = nfs_compare_mount_options(s, server, flags);
nfs_free_server(server);
server = NULL;
- if (error < 0)
- goto error_splat_super;
}
if (!s->s_root) {
/* initial superblock/root creation */
- s->s_flags = flags;
nfs_clone_super(s, data->sb);
}
@@ -1668,7 +1677,7 @@ static int nfs4_validate_mount_data(struct nfs4_mount_data **options,
/* while calculating len, pretend ':' is '\0' */
len = c - dev_name;
if (len > NFS4_MAXNAMLEN)
- return -EINVAL;
+ return -ENAMETOOLONG;
*hostname = kzalloc(len, GFP_KERNEL);
if (*hostname == NULL)
return -ENOMEM;
@@ -1677,7 +1686,7 @@ static int nfs4_validate_mount_data(struct nfs4_mount_data **options,
c++; /* step over the ':' */
len = strlen(c);
if (len > NFS4_MAXPATHLEN)
- return -EINVAL;
+ return -ENAMETOOLONG;
*mntpath = kzalloc(len + 1, GFP_KERNEL);
if (*mntpath == NULL)
return -ENOMEM;
@@ -1685,6 +1694,9 @@ static int nfs4_validate_mount_data(struct nfs4_mount_data **options,
dprintk("MNTPATH: %s\n", *mntpath);
+ if (args.client_address == NULL)
+ goto out_no_client_address;
+
*ip_addr = args.client_address;
break;
@@ -1705,6 +1717,10 @@ out_inval_auth:
out_no_address:
dfprintk(MOUNT, "NFS4: mount program didn't pass remote address\n");
return -EINVAL;
+
+out_no_client_address:
+ dfprintk(MOUNT, "NFS4: mount program didn't pass callback address\n");
+ return -EINVAL;
}
/*
@@ -1722,6 +1738,9 @@ static int nfs4_get_sb(struct file_system_type *fs_type,
struct dentry *mntroot;
char *mntpath = NULL, *hostname = NULL, *ip_addr = NULL;
int (*compare_super)(struct super_block *, void *) = nfs_compare_super;
+ struct nfs_sb_mountdata sb_mntdata = {
+ .mntflags = flags,
+ };
int error;
/* Validate the mount data */
@@ -1737,12 +1756,13 @@ static int nfs4_get_sb(struct file_system_type *fs_type,
error = PTR_ERR(server);
goto out;
}
+ sb_mntdata.server = server;
if (server->flags & NFS4_MOUNT_UNSHARED)
compare_super = NULL;
/* Get a superblock - note that we may end up sharing one that already exists */
- s = sget(fs_type, compare_super, nfs_set_super, server);
+ s = sget(fs_type, compare_super, nfs_set_super, &sb_mntdata);
if (IS_ERR(s)) {
error = PTR_ERR(s);
goto out_free;
@@ -1755,7 +1775,6 @@ static int nfs4_get_sb(struct file_system_type *fs_type,
if (!s->s_root) {
/* initial superblock/root creation */
- s->s_flags = flags;
nfs4_fill_super(s);
}
@@ -1809,6 +1828,9 @@ static int nfs4_xdev_get_sb(struct file_system_type *fs_type, int flags,
struct nfs_server *server;
struct dentry *mntroot;
int (*compare_super)(struct super_block *, void *) = nfs_compare_super;
+ struct nfs_sb_mountdata sb_mntdata = {
+ .mntflags = flags,
+ };
int error;
dprintk("--> nfs4_xdev_get_sb()\n");
@@ -1819,12 +1841,13 @@ static int nfs4_xdev_get_sb(struct file_system_type *fs_type, int flags,
error = PTR_ERR(server);
goto out_err_noserver;
}
+ sb_mntdata.server = server;
if (server->flags & NFS4_MOUNT_UNSHARED)
compare_super = NULL;
/* Get a superblock - note that we may end up sharing one that already exists */
- s = sget(&nfs_fs_type, compare_super, nfs_set_super, server);
+ s = sget(&nfs_fs_type, compare_super, nfs_set_super, &sb_mntdata);
if (IS_ERR(s)) {
error = PTR_ERR(s);
goto out_err_nosb;
@@ -1837,7 +1860,6 @@ static int nfs4_xdev_get_sb(struct file_system_type *fs_type, int flags,
if (!s->s_root) {
/* initial superblock/root creation */
- s->s_flags = flags;
nfs4_clone_super(s, data->sb);
}
@@ -1880,6 +1902,9 @@ static int nfs4_referral_get_sb(struct file_system_type *fs_type, int flags,
struct dentry *mntroot;
struct nfs_fh mntfh;
int (*compare_super)(struct super_block *, void *) = nfs_compare_super;
+ struct nfs_sb_mountdata sb_mntdata = {
+ .mntflags = flags,
+ };
int error;
dprintk("--> nfs4_referral_get_sb()\n");
@@ -1890,12 +1915,13 @@ static int nfs4_referral_get_sb(struct file_system_type *fs_type, int flags,
error = PTR_ERR(server);
goto out_err_noserver;
}
+ sb_mntdata.server = server;
if (server->flags & NFS4_MOUNT_UNSHARED)
compare_super = NULL;
/* Get a superblock - note that we may end up sharing one that already exists */
- s = sget(&nfs_fs_type, compare_super, nfs_set_super, server);
+ s = sget(&nfs_fs_type, compare_super, nfs_set_super, &sb_mntdata);
if (IS_ERR(s)) {
error = PTR_ERR(s);
goto out_err_nosb;
@@ -1908,7 +1934,6 @@ static int nfs4_referral_get_sb(struct file_system_type *fs_type, int flags,
if (!s->s_root) {
/* initial superblock/root creation */
- s->s_flags = flags;
nfs4_fill_super(s);
}
diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c
index 0e28189..045ab80 100644
--- a/fs/nfs/unlink.c
+++ b/fs/nfs/unlink.c
@@ -3,7 +3,6 @@
*
* nfs sillydelete handling
*
- * NOTE: we rely on holding the BKL for list manipulation protection.
*/
#include <linux/slab.h>
@@ -15,46 +14,23 @@
struct nfs_unlinkdata {
- struct nfs_unlinkdata *next;
- struct dentry *dir, *dentry;
- struct qstr name;
- struct rpc_task task;
+ struct nfs_removeargs args;
+ struct nfs_removeres res;
+ struct inode *dir;
struct rpc_cred *cred;
- unsigned int count;
};
-static struct nfs_unlinkdata *nfs_deletes;
-static RPC_WAITQ(nfs_delete_queue, "nfs_delete_queue");
-
-/**
- * nfs_detach_unlinkdata - Remove asynchronous unlink from global list
- * @data: pointer to descriptor
- */
-static inline void
-nfs_detach_unlinkdata(struct nfs_unlinkdata *data)
-{
- struct nfs_unlinkdata **q;
-
- for (q = &nfs_deletes; *q != NULL; q = &((*q)->next)) {
- if (*q == data) {
- *q = data->next;
- break;
- }
- }
-}
-
/**
- * nfs_put_unlinkdata - release data from a sillydelete operation.
+ * nfs_free_unlinkdata - release data from a sillydelete operation.
* @data: pointer to unlink structure.
*/
static void
-nfs_put_unlinkdata(struct nfs_unlinkdata *data)
+nfs_free_unlinkdata(struct nfs_unlinkdata *data)
{
- if (--data->count == 0) {
- nfs_detach_unlinkdata(data);
- kfree(data->name.name);
- kfree(data);
- }
+ iput(data->dir);
+ put_rpccred(data->cred);
+ kfree(data->args.name.name);
+ kfree(data);
}
#define NAME_ALLOC_LEN(len) ((len+16) & ~15)
@@ -63,50 +39,36 @@ nfs_put_unlinkdata(struct nfs_unlinkdata *data)
* @dentry: pointer to dentry
* @data: nfs_unlinkdata
*/
-static inline void
-nfs_copy_dname(struct dentry *dentry, struct nfs_unlinkdata *data)
+static int nfs_copy_dname(struct dentry *dentry, struct nfs_unlinkdata *data)
{
char *str;
int len = dentry->d_name.len;
- str = kmalloc(NAME_ALLOC_LEN(len), GFP_KERNEL);
+ str = kmemdup(dentry->d_name.name, NAME_ALLOC_LEN(len), GFP_KERNEL);
if (!str)
- return;
- memcpy(str, dentry->d_name.name, len);
- if (!data->name.len) {
- data->name.len = len;
- data->name.name = str;
- } else
- kfree(str);
+ return -ENOMEM;
+ data->args.name.len = len;
+ data->args.name.name = str;
+ return 0;
}
/**
* nfs_async_unlink_init - Initialize the RPC info
- * @task: rpc_task of the sillydelete
- *
- * We delay initializing RPC info until after the call to dentry_iput()
- * in order to minimize races against rename().
+ * task: rpc_task of the sillydelete
*/
static void nfs_async_unlink_init(struct rpc_task *task, void *calldata)
{
- struct nfs_unlinkdata *data = calldata;
- struct dentry *dir = data->dir;
- struct rpc_message msg = {
- .rpc_cred = data->cred,
+ struct nfs_unlinkdata *data = calldata;
+ struct inode *dir = data->dir;
+ struct rpc_message msg = {
+ .rpc_argp = &data->args,
+ .rpc_resp = &data->res,
+ .rpc_cred = data->cred,
};
- int status = -ENOENT;
-
- if (!data->name.len)
- goto out_err;
- status = NFS_PROTO(dir->d_inode)->unlink_setup(&msg, dir, &data->name);
- if (status < 0)
- goto out_err;
- nfs_begin_data_update(dir->d_inode);
+ nfs_begin_data_update(dir);
+ NFS_PROTO(dir)->unlink_setup(&msg, dir);
rpc_call_setup(task, &msg, 0);
- return;
- out_err:
- rpc_exit(task, status);
}
/**
@@ -117,19 +79,13 @@ static void nfs_async_unlink_init(struct rpc_task *task, void *calldata)
*/
static void nfs_async_unlink_done(struct rpc_task *task, void *calldata)
{
- struct nfs_unlinkdata *data = calldata;
- struct dentry *dir = data->dir;
- struct inode *dir_i;
-
- if (!dir)
- return;
- dir_i = dir->d_inode;
- nfs_end_data_update(dir_i);
- if (NFS_PROTO(dir_i)->unlink_done(dir, task))
- return;
- put_rpccred(data->cred);
- data->cred = NULL;
- dput(dir);
+ struct nfs_unlinkdata *data = calldata;
+ struct inode *dir = data->dir;
+
+ if (!NFS_PROTO(dir)->unlink_done(task, dir))
+ rpc_restart_call(task);
+ else
+ nfs_end_data_update(dir);
}
/**
@@ -142,7 +98,7 @@ static void nfs_async_unlink_done(struct rpc_task *task, void *calldata)
static void nfs_async_unlink_release(void *calldata)
{
struct nfs_unlinkdata *data = calldata;
- nfs_put_unlinkdata(data);
+ nfs_free_unlinkdata(data);
}
static const struct rpc_call_ops nfs_unlink_ops = {
@@ -151,73 +107,94 @@ static const struct rpc_call_ops nfs_unlink_ops = {
.rpc_release = nfs_async_unlink_release,
};
+static int nfs_call_unlink(struct dentry *dentry, struct nfs_unlinkdata *data)
+{
+ struct rpc_task *task;
+ struct dentry *parent;
+ struct inode *dir;
+
+ if (nfs_copy_dname(dentry, data) < 0)
+ goto out_free;
+
+ parent = dget_parent(dentry);
+ if (parent == NULL)
+ goto out_free;
+ dir = igrab(parent->d_inode);
+ dput(parent);
+ if (dir == NULL)
+ goto out_free;
+
+ data->dir = dir;
+ data->args.fh = NFS_FH(dir);
+ nfs_fattr_init(&data->res.dir_attr);
+
+ task = rpc_run_task(NFS_CLIENT(dir), RPC_TASK_ASYNC, &nfs_unlink_ops, data);
+ if (!IS_ERR(task))
+ rpc_put_task(task);
+ return 1;
+out_free:
+ return 0;
+}
+
/**
* nfs_async_unlink - asynchronous unlinking of a file
+ * @dir: parent directory of dentry
* @dentry: dentry to unlink
*/
int
-nfs_async_unlink(struct dentry *dentry)
+nfs_async_unlink(struct inode *dir, struct dentry *dentry)
{
- struct dentry *dir = dentry->d_parent;
- struct nfs_unlinkdata *data;
- struct rpc_clnt *clnt = NFS_CLIENT(dir->d_inode);
- int status = -ENOMEM;
+ struct nfs_unlinkdata *data;
+ int status = -ENOMEM;
data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data)
+ if (data == NULL)
goto out;
- data->cred = rpcauth_lookupcred(clnt->cl_auth, 0);
+ data->cred = rpcauth_lookupcred(NFS_CLIENT(dir)->cl_auth, 0);
if (IS_ERR(data->cred)) {
status = PTR_ERR(data->cred);
goto out_free;
}
- data->dir = dget(dir);
- data->dentry = dentry;
-
- data->next = nfs_deletes;
- nfs_deletes = data;
- data->count = 1;
-
- rpc_init_task(&data->task, clnt, RPC_TASK_ASYNC, &nfs_unlink_ops, data);
+ status = -EBUSY;
spin_lock(&dentry->d_lock);
+ if (dentry->d_flags & DCACHE_NFSFS_RENAMED)
+ goto out_unlock;
dentry->d_flags |= DCACHE_NFSFS_RENAMED;
+ dentry->d_fsdata = data;
spin_unlock(&dentry->d_lock);
-
- rpc_sleep_on(&nfs_delete_queue, &data->task, NULL, NULL);
- status = 0;
- out:
- return status;
+ return 0;
+out_unlock:
+ spin_unlock(&dentry->d_lock);
+ put_rpccred(data->cred);
out_free:
kfree(data);
+out:
return status;
}
/**
* nfs_complete_unlink - Initialize completion of the sillydelete
* @dentry: dentry to delete
+ * @inode: inode
*
* Since we're most likely to be called by dentry_iput(), we
* only use the dentry to find the sillydelete. We then copy the name
* into the qstr.
*/
void
-nfs_complete_unlink(struct dentry *dentry)
+nfs_complete_unlink(struct dentry *dentry, struct inode *inode)
{
- struct nfs_unlinkdata *data;
+ struct nfs_unlinkdata *data = NULL;
- for(data = nfs_deletes; data != NULL; data = data->next) {
- if (dentry == data->dentry)
- break;
- }
- if (!data)
- return;
- data->count++;
- nfs_copy_dname(dentry, data);
spin_lock(&dentry->d_lock);
- dentry->d_flags &= ~DCACHE_NFSFS_RENAMED;
+ if (dentry->d_flags & DCACHE_NFSFS_RENAMED) {
+ dentry->d_flags &= ~DCACHE_NFSFS_RENAMED;
+ data = dentry->d_fsdata;
+ }
spin_unlock(&dentry->d_lock);
- rpc_wake_up_task(&data->task);
- nfs_put_unlinkdata(data);
+
+ if (data != NULL && (NFS_STALE(inode) || !nfs_call_unlink(dentry, data)))
+ nfs_free_unlinkdata(data);
}
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 73ac992..0d7a77c 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -1396,6 +1396,50 @@ out:
return ret;
}
+int nfs_wb_page_cancel(struct inode *inode, struct page *page)
+{
+ struct nfs_page *req;
+ loff_t range_start = page_offset(page);
+ loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
+ struct writeback_control wbc = {
+ .bdi = page->mapping->backing_dev_info,
+ .sync_mode = WB_SYNC_ALL,
+ .nr_to_write = LONG_MAX,
+ .range_start = range_start,
+ .range_end = range_end,
+ };
+ int ret = 0;
+
+ BUG_ON(!PageLocked(page));
+ for (;;) {
+ req = nfs_page_find_request(page);
+ if (req == NULL)
+ goto out;
+ if (test_bit(PG_NEED_COMMIT, &req->wb_flags)) {
+ nfs_release_request(req);
+ break;
+ }
+ if (nfs_lock_request_dontget(req)) {
+ nfs_inode_remove_request(req);
+ /*
+ * In case nfs_inode_remove_request has marked the
+ * page as being dirty
+ */
+ cancel_dirty_page(page, PAGE_CACHE_SIZE);
+ nfs_unlock_request(req);
+ break;
+ }
+ ret = nfs_wait_on_request(req);
+ if (ret < 0)
+ goto out;
+ }
+ if (!PagePrivate(page))
+ return 0;
+ ret = nfs_sync_mapping_wait(page->mapping, &wbc, FLUSH_INVALIDATE);
+out:
+ return ret;
+}
+
int nfs_wb_page_priority(struct inode *inode, struct page *page, int how)
{
loff_t range_start = page_offset(page);
@@ -1467,7 +1511,7 @@ int __init nfs_init_writepagecache(void)
nfs_wdata_cachep = kmem_cache_create("nfs_write_data",
sizeof(struct nfs_write_data),
0, SLAB_HWCACHE_ALIGN,
- NULL, NULL);
+ NULL);
if (nfs_wdata_cachep == NULL)
return -ENOMEM;
diff --git a/fs/nfsctl.c b/fs/nfsctl.c
index c043136a..51f1b31 100644
--- a/fs/nfsctl.c
+++ b/fs/nfsctl.c
@@ -23,19 +23,15 @@
static struct file *do_open(char *name, int flags)
{
struct nameidata nd;
+ struct vfsmount *mnt;
int error;
- nd.mnt = do_kern_mount("nfsd", 0, "nfsd", NULL);
+ mnt = do_kern_mount("nfsd", 0, "nfsd", NULL);
+ if (IS_ERR(mnt))
+ return (struct file *)mnt;
- if (IS_ERR(nd.mnt))
- return (struct file *)nd.mnt;
-
- nd.dentry = dget(nd.mnt->mnt_root);
- nd.last_type = LAST_ROOT;
- nd.flags = 0;
- nd.depth = 0;
-
- error = path_walk(name, &nd);
+ error = vfs_path_lookup(mnt->mnt_root, mnt, name, 0, &nd);
+ mntput(mnt); /* drop do_kern_mount reference */
if (error)
return ERR_PTR(error);
diff --git a/fs/nfsd/auth.c b/fs/nfsd/auth.c
index cf61dc8..2192805 100644
--- a/fs/nfsd/auth.c
+++ b/fs/nfsd/auth.c
@@ -9,10 +9,11 @@
#include <linux/sunrpc/svc.h>
#include <linux/sunrpc/svcauth.h>
#include <linux/nfsd/nfsd.h>
+#include <linux/nfsd/export.h>
#define CAP_NFSD_MASK (CAP_FS_MASK|CAP_TO_MASK(CAP_SYS_RESOURCE))
-static int nfsexp_flags(struct svc_rqst *rqstp, struct svc_export *exp)
+int nfsexp_flags(struct svc_rqst *rqstp, struct svc_export *exp)
{
struct exp_flavor_info *f;
struct exp_flavor_info *end = exp->ex_flavors + exp->ex_nflavors;
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index c7bbf46..cba899a 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -564,9 +564,10 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
/* flags */
err = get_int(&mesg, &an_int);
- if (err == -ENOENT)
+ if (err == -ENOENT) {
+ err = 0;
set_bit(CACHE_NEGATIVE, &exp.h.flags);
- else {
+ } else {
if (err || an_int < 0) goto out;
exp.ex_flags= an_int;
@@ -1265,7 +1266,7 @@ struct svc_export *
rqst_exp_get_by_name(struct svc_rqst *rqstp, struct vfsmount *mnt,
struct dentry *dentry)
{
- struct svc_export *gssexp, *exp = NULL;
+ struct svc_export *gssexp, *exp = ERR_PTR(-ENOENT);
if (rqstp->rq_client == NULL)
goto gss;
@@ -1288,7 +1289,7 @@ gss:
&rqstp->rq_chandle);
if (PTR_ERR(gssexp) == -ENOENT)
return exp;
- if (exp && !IS_ERR(exp))
+ if (!IS_ERR(exp))
exp_put(exp);
return gssexp;
}
@@ -1296,7 +1297,7 @@ gss:
struct svc_export *
rqst_exp_find(struct svc_rqst *rqstp, int fsid_type, u32 *fsidv)
{
- struct svc_export *gssexp, *exp = NULL;
+ struct svc_export *gssexp, *exp = ERR_PTR(-ENOENT);
if (rqstp->rq_client == NULL)
goto gss;
@@ -1318,7 +1319,7 @@ gss:
&rqstp->rq_chandle);
if (PTR_ERR(gssexp) == -ENOENT)
return exp;
- if (exp && !IS_ERR(exp))
+ if (!IS_ERR(exp))
exp_put(exp);
return gssexp;
}
@@ -1503,9 +1504,9 @@ static void exp_flags(struct seq_file *m, int flag, int fsid,
if (flag & NFSEXP_FSID)
seq_printf(m, ",fsid=%d", fsid);
if (anonu != (uid_t)-2 && anonu != (0x10000-2))
- seq_printf(m, ",sanonuid=%d", anonu);
+ seq_printf(m, ",anonuid=%u", anonu);
if (anong != (gid_t)-2 && anong != (0x10000-2))
- seq_printf(m, ",sanongid=%d", anong);
+ seq_printf(m, ",anongid=%u", anong);
if (fsloc && fsloc->locations_count > 0) {
char *loctype = (fsloc->migrated) ? "refer" : "replicas";
int i;
diff --git a/fs/nfsd/nfs4idmap.c b/fs/nfsd/nfs4idmap.c
index 2cf9a9a..2ccffde 100644
--- a/fs/nfsd/nfs4idmap.c
+++ b/fs/nfsd/nfs4idmap.c
@@ -138,7 +138,7 @@ idtoname_request(struct cache_detail *cd, struct cache_head *ch, char **bpp,
char idstr[11];
qword_add(bpp, blen, ent->authname);
- snprintf(idstr, sizeof(idstr), "%d", ent->id);
+ snprintf(idstr, sizeof(idstr), "%u", ent->id);
qword_add(bpp, blen, ent->type == IDMAP_TYPE_GROUP ? "group" : "user");
qword_add(bpp, blen, idstr);
@@ -165,7 +165,7 @@ idtoname_show(struct seq_file *m, struct cache_detail *cd, struct cache_head *h)
return 0;
}
ent = container_of(h, struct ent, h);
- seq_printf(m, "%s %s %d", ent->authname,
+ seq_printf(m, "%s %s %u", ent->authname,
ent->type == IDMAP_TYPE_GROUP ? "group" : "user",
ent->id);
if (test_bit(CACHE_VALID, &h->flags))
@@ -349,7 +349,7 @@ nametoid_show(struct seq_file *m, struct cache_detail *cd, struct cache_head *h)
ent->type == IDMAP_TYPE_GROUP ? "group" : "user",
ent->name);
if (test_bit(CACHE_VALID, &h->flags))
- seq_printf(m, " %d", ent->id);
+ seq_printf(m, " %u", ent->id);
seq_printf(m, "\n");
return 0;
}
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 3c62712..29b7e63 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -100,7 +100,15 @@ do_open_lookup(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_o
status = nfsd_create_v3(rqstp, current_fh, open->op_fname.data,
open->op_fname.len, &open->op_iattr,
&resfh, open->op_createmode,
- (u32 *)open->op_verf.data, &open->op_truncate, &created);
+ (u32 *)open->op_verf.data,
+ &open->op_truncate, &created);
+
+ /* If we ever decide to use different attrs to store the
+ * verifier in nfsd_create_v3, then we'll need to change this
+ */
+ if (open->op_createmode == NFS4_CREATE_EXCLUSIVE && status == 0)
+ open->op_bmval[1] |= (FATTR4_WORD1_TIME_ACCESS |
+ FATTR4_WORD1_TIME_MODIFY);
} else {
status = nfsd_lookup(rqstp, current_fh,
open->op_fname.data, open->op_fname.len, &resfh);
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index e4a4c87..3f55970 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -256,7 +256,7 @@ nfs4_close_delegation(struct nfs4_delegation *dp)
/* The following nfsd_close may not actually close the file,
* but we want to remove the lease in any case. */
if (dp->dl_flock)
- setlease(filp, F_UNLCK, &dp->dl_flock);
+ vfs_setlease(filp, F_UNLCK, &dp->dl_flock);
nfsd_close(filp);
}
@@ -1032,19 +1032,19 @@ static int
nfsd4_init_slabs(void)
{
stateowner_slab = kmem_cache_create("nfsd4_stateowners",
- sizeof(struct nfs4_stateowner), 0, 0, NULL, NULL);
+ sizeof(struct nfs4_stateowner), 0, 0, NULL);
if (stateowner_slab == NULL)
goto out_nomem;
file_slab = kmem_cache_create("nfsd4_files",
- sizeof(struct nfs4_file), 0, 0, NULL, NULL);
+ sizeof(struct nfs4_file), 0, 0, NULL);
if (file_slab == NULL)
goto out_nomem;
stateid_slab = kmem_cache_create("nfsd4_stateids",
- sizeof(struct nfs4_stateid), 0, 0, NULL, NULL);
+ sizeof(struct nfs4_stateid), 0, 0, NULL);
if (stateid_slab == NULL)
goto out_nomem;
deleg_slab = kmem_cache_create("nfsd4_delegations",
- sizeof(struct nfs4_delegation), 0, 0, NULL, NULL);
+ sizeof(struct nfs4_delegation), 0, 0, NULL);
if (deleg_slab == NULL)
goto out_nomem;
return 0;
@@ -1402,7 +1402,7 @@ void nfsd_release_deleg_cb(struct file_lock *fl)
/*
* Set the delegation file_lock back pointer.
*
- * Called from __setlease() with lock_kernel() held.
+ * Called from setlease() with lock_kernel() held.
*/
static
void nfsd_copy_lock_deleg_cb(struct file_lock *new, struct file_lock *fl)
@@ -1416,7 +1416,7 @@ void nfsd_copy_lock_deleg_cb(struct file_lock *new, struct file_lock *fl)
}
/*
- * Called from __setlease() with lock_kernel() held
+ * Called from setlease() with lock_kernel() held
*/
static
int nfsd_same_client_deleg_cb(struct file_lock *onlist, struct file_lock *try)
@@ -1716,10 +1716,10 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_sta
fl.fl_file = stp->st_vfs_file;
fl.fl_pid = current->tgid;
- /* setlease checks to see if delegation should be handed out.
+ /* vfs_setlease checks to see if delegation should be handed out.
* the lock_manager callbacks fl_mylease and fl_change are used
*/
- if ((status = setlease(stp->st_vfs_file,
+ if ((status = vfs_setlease(stp->st_vfs_file,
flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK, &flp))) {
dprintk("NFSD: setlease failed [%d], no delegation\n", status);
unhash_delegation(dp);
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index b3d55c6..8ef0964 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -2450,7 +2450,7 @@ nfsd4_encode_rename(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_
}
static void
-nfsd4_encode_secinfo(struct nfsd4_compoundres *resp, int nfserr,
+nfsd4_encode_secinfo(struct nfsd4_compoundres *resp, __be32 nfserr,
struct nfsd4_secinfo *secinfo)
{
int i = 0;
diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c
index 0eb464a..7011d62a 100644
--- a/fs/nfsd/nfsfh.c
+++ b/fs/nfsd/nfsfh.c
@@ -566,13 +566,23 @@ enum fsid_source fsid_source(struct svc_fh *fhp)
case FSID_DEV:
case FSID_ENCODE_DEV:
case FSID_MAJOR_MINOR:
- return FSIDSOURCE_DEV;
+ if (fhp->fh_export->ex_dentry->d_inode->i_sb->s_type->fs_flags
+ & FS_REQUIRES_DEV)
+ return FSIDSOURCE_DEV;
+ break;
case FSID_NUM:
- return FSIDSOURCE_FSID;
- default:
if (fhp->fh_export->ex_flags & NFSEXP_FSID)
return FSIDSOURCE_FSID;
- else
- return FSIDSOURCE_UUID;
+ break;
+ default:
+ break;
}
+ /* either a UUID type filehandle, or the filehandle doesn't
+ * match the export.
+ */
+ if (fhp->fh_export->ex_flags & NFSEXP_FSID)
+ return FSIDSOURCE_FSID;
+ if (fhp->fh_export->ex_uuid)
+ return FSIDSOURCE_UUID;
+ return FSIDSOURCE_DEV;
}
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index e90f4a8..7867151 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -115,19 +115,20 @@ nfsd_cross_mnt(struct svc_rqst *rqstp, struct dentry **dpp,
exp2 = rqst_exp_get_by_name(rqstp, mnt, mounts);
if (IS_ERR(exp2)) {
- err = PTR_ERR(exp2);
+ if (PTR_ERR(exp2) != -ENOENT)
+ err = PTR_ERR(exp2);
dput(mounts);
mntput(mnt);
goto out;
}
- if (exp2 && ((exp->ex_flags & NFSEXP_CROSSMOUNT) || EX_NOHIDE(exp2))) {
+ if ((exp->ex_flags & NFSEXP_CROSSMOUNT) || EX_NOHIDE(exp2)) {
/* successfully crossed mount point */
exp_put(exp);
*expp = exp2;
dput(dentry);
*dpp = mounts;
} else {
- if (exp2) exp_put(exp2);
+ exp_put(exp2);
dput(mounts);
}
mntput(mnt);
@@ -1309,7 +1310,10 @@ nfsd_create_v3(struct svc_rqst *rqstp, struct svc_fh *fhp,
if (createmode == NFS3_CREATE_EXCLUSIVE) {
/* solaris7 gets confused (bugid 4218508) if these have
- * the high bit set, so just clear the high bits.
+ * the high bit set, so just clear the high bits. If this is
+ * ever changed to use different attrs for storing the
+ * verifier, then do_open_lookup() will also need to be fixed
+ * accordingly.
*/
v_mtime = verifier[0]&0x7fffffff;
v_atime = verifier[1]&0x7fffffff;
@@ -1797,6 +1801,11 @@ nfsd_statfs(struct svc_rqst *rqstp, struct svc_fh *fhp, struct kstatfs *stat)
return err;
}
+static int exp_rdonly(struct svc_rqst *rqstp, struct svc_export *exp)
+{
+ return nfsexp_flags(rqstp, exp) & NFSEXP_READONLY;
+}
+
/*
* Check for a user's access permissions to this inode.
*/
@@ -1833,7 +1842,7 @@ nfsd_permission(struct svc_rqst *rqstp, struct svc_export *exp,
*/
if (!(acc & MAY_LOCAL_ACCESS))
if (acc & (MAY_WRITE | MAY_SATTR | MAY_TRUNC)) {
- if (EX_RDONLY(exp, rqstp) || IS_RDONLY(inode))
+ if (exp_rdonly(rqstp, exp) || IS_RDONLY(inode))
return nfserr_rofs;
if (/* (acc & MAY_WRITE) && */ IS_IMMUTABLE(inode))
return nfserr_perm;
@@ -1916,7 +1925,7 @@ nfsd_racache_init(int cache_size)
raparm_hash[i].pb_head = NULL;
spin_lock_init(&raparm_hash[i].pb_lock);
}
- nperbucket = cache_size >> RAPARM_HASH_BITS;
+ nperbucket = DIV_ROUND_UP(cache_size, RAPARM_HASH_SIZE);
for (i = 0; i < cache_size - 1; i++) {
if (i % nperbucket == 0)
raparm_hash[j++].pb_head = raparml + i;
diff --git a/fs/ntfs/ChangeLog b/fs/ntfs/ChangeLog
index af4ef80..345798e 100644
--- a/fs/ntfs/ChangeLog
+++ b/fs/ntfs/ChangeLog
@@ -17,6 +17,18 @@ ToDo/Notes:
happen is unclear however so it is worth waiting until someone hits
the problem.
+2.1.29 - Fix a deadlock at mount time.
+
+ - During mount the VFS holds s_umount lock on the superblock. So when
+ we try to empty the journal $LogFile contents by calling
+ ntfs_attr_set() when the machine does not have much memory and the
+ journal is large ntfs_attr_set() results in the VM trying to balance
+ dirty pages which in turn tries to that the s_umount lock and thus we
+ get a deadlock. The solution is to not use ntfs_attr_set() and
+ instead do the zeroing by hand at the block level rather than page
+ cache level.
+ - Fix sparse warnings.
+
2.1.28 - Fix a deadlock.
- Fix deadlock in fs/ntfs/inode.c::ntfs_put_inode(). Thanks to Sergey
diff --git a/fs/ntfs/Makefile b/fs/ntfs/Makefile
index 8255083..58b6be9 100644
--- a/fs/ntfs/Makefile
+++ b/fs/ntfs/Makefile
@@ -6,7 +6,7 @@ ntfs-objs := aops.o attrib.o collate.o compress.o debug.o dir.o file.o \
index.o inode.o mft.o mst.o namei.o runlist.o super.o sysctl.o \
unistr.o upcase.o
-EXTRA_CFLAGS = -DNTFS_VERSION=\"2.1.28\"
+EXTRA_CFLAGS = -DNTFS_VERSION=\"2.1.29\"
ifeq ($(CONFIG_NTFS_DEBUG),y)
EXTRA_CFLAGS += -DDEBUG
diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c
index 6e5c253..cfdc790 100644
--- a/fs/ntfs/aops.c
+++ b/fs/ntfs/aops.c
@@ -2,7 +2,7 @@
* aops.c - NTFS kernel address space operations and page cache handling.
* Part of the Linux-NTFS project.
*
- * Copyright (c) 2001-2006 Anton Altaparmakov
+ * Copyright (c) 2001-2007 Anton Altaparmakov
* Copyright (c) 2002 Richard Russon
*
* This program/include file is free software; you can redistribute it and/or
@@ -396,7 +396,7 @@ static int ntfs_readpage(struct file *file, struct page *page)
loff_t i_size;
struct inode *vi;
ntfs_inode *ni, *base_ni;
- u8 *kaddr;
+ u8 *addr;
ntfs_attr_search_ctx *ctx;
MFT_RECORD *mrec;
unsigned long flags;
@@ -491,15 +491,15 @@ retry_readpage:
/* Race with shrinking truncate. */
attr_len = i_size;
}
- kaddr = kmap_atomic(page, KM_USER0);
+ addr = kmap_atomic(page, KM_USER0);
/* Copy the data to the page. */
- memcpy(kaddr, (u8*)ctx->attr +
+ memcpy(addr, (u8*)ctx->attr +
le16_to_cpu(ctx->attr->data.resident.value_offset),
attr_len);
/* Zero the remainder of the page. */
- memset(kaddr + attr_len, 0, PAGE_CACHE_SIZE - attr_len);
+ memset(addr + attr_len, 0, PAGE_CACHE_SIZE - attr_len);
flush_dcache_page(page);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(addr, KM_USER0);
put_unm_err_out:
ntfs_attr_put_search_ctx(ctx);
unm_err_out:
@@ -1344,7 +1344,7 @@ static int ntfs_writepage(struct page *page, struct writeback_control *wbc)
loff_t i_size;
struct inode *vi = page->mapping->host;
ntfs_inode *base_ni = NULL, *ni = NTFS_I(vi);
- char *kaddr;
+ char *addr;
ntfs_attr_search_ctx *ctx = NULL;
MFT_RECORD *m = NULL;
u32 attr_len;
@@ -1484,14 +1484,14 @@ retry_writepage:
/* Shrinking cannot fail. */
BUG_ON(err);
}
- kaddr = kmap_atomic(page, KM_USER0);
+ addr = kmap_atomic(page, KM_USER0);
/* Copy the data from the page to the mft record. */
memcpy((u8*)ctx->attr +
le16_to_cpu(ctx->attr->data.resident.value_offset),
- kaddr, attr_len);
+ addr, attr_len);
/* Zero out of bounds area in the page cache page. */
- memset(kaddr + attr_len, 0, PAGE_CACHE_SIZE - attr_len);
- kunmap_atomic(kaddr, KM_USER0);
+ memset(addr + attr_len, 0, PAGE_CACHE_SIZE - attr_len);
+ kunmap_atomic(addr, KM_USER0);
flush_dcache_page(page);
flush_dcache_mft_record_page(ctx->ntfs_ino);
/* We are done with the page. */
diff --git a/fs/ntfs/attrib.c b/fs/ntfs/attrib.c
index 1c08fef..92dabdc 100644
--- a/fs/ntfs/attrib.c
+++ b/fs/ntfs/attrib.c
@@ -1,7 +1,7 @@
/**
* attrib.c - NTFS attribute operations. Part of the Linux-NTFS project.
*
- * Copyright (c) 2001-2006 Anton Altaparmakov
+ * Copyright (c) 2001-2007 Anton Altaparmakov
* Copyright (c) 2002 Richard Russon
*
* This program/include file is free software; you can redistribute it and/or
@@ -2500,7 +2500,7 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val)
struct page *page;
u8 *kaddr;
pgoff_t idx, end;
- unsigned int start_ofs, end_ofs, size;
+ unsigned start_ofs, end_ofs, size;
ntfs_debug("Entering for ofs 0x%llx, cnt 0x%llx, val 0x%hx.",
(long long)ofs, (long long)cnt, val);
@@ -2548,6 +2548,8 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val)
kunmap_atomic(kaddr, KM_USER0);
set_page_dirty(page);
page_cache_release(page);
+ balance_dirty_pages_ratelimited(mapping);
+ cond_resched();
if (idx == end)
goto done;
idx++;
@@ -2604,6 +2606,8 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val)
kunmap_atomic(kaddr, KM_USER0);
set_page_dirty(page);
page_cache_release(page);
+ balance_dirty_pages_ratelimited(mapping);
+ cond_resched();
}
done:
ntfs_debug("Done.");
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
index ffcc504..c814204 100644
--- a/fs/ntfs/file.c
+++ b/fs/ntfs/file.c
@@ -1,7 +1,7 @@
/*
* file.c - NTFS kernel file operations. Part of the Linux-NTFS project.
*
- * Copyright (c) 2001-2006 Anton Altaparmakov
+ * Copyright (c) 2001-2007 Anton Altaparmakov
*
* This program/include file is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
@@ -26,7 +26,6 @@
#include <linux/swap.h>
#include <linux/uio.h>
#include <linux/writeback.h>
-#include <linux/sched.h>
#include <asm/page.h>
#include <asm/uaccess.h>
@@ -362,7 +361,7 @@ static inline void ntfs_fault_in_pages_readable(const char __user *uaddr,
volatile char c;
/* Set @end to the first byte outside the last page we care about. */
- end = (const char __user*)PAGE_ALIGN((ptrdiff_t __user)uaddr + bytes);
+ end = (const char __user*)PAGE_ALIGN((unsigned long)uaddr + bytes);
while (!__get_user(c, uaddr) && (uaddr += PAGE_SIZE, uaddr < end))
;
@@ -532,7 +531,8 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
blocksize_bits = vol->sb->s_blocksize_bits;
u = 0;
do {
- struct page *page = pages[u];
+ page = pages[u];
+ BUG_ON(!page);
/*
* create_empty_buffers() will create uptodate/dirty buffers if
* the page is uptodate/dirty.
@@ -1291,7 +1291,7 @@ static inline size_t ntfs_copy_from_user(struct page **pages,
size_t bytes)
{
struct page **last_page = pages + nr_pages;
- char *kaddr;
+ char *addr;
size_t total = 0;
unsigned len;
int left;
@@ -1300,13 +1300,13 @@ static inline size_t ntfs_copy_from_user(struct page **pages,
len = PAGE_CACHE_SIZE - ofs;
if (len > bytes)
len = bytes;
- kaddr = kmap_atomic(*pages, KM_USER0);
- left = __copy_from_user_inatomic(kaddr + ofs, buf, len);
- kunmap_atomic(kaddr, KM_USER0);
+ addr = kmap_atomic(*pages, KM_USER0);
+ left = __copy_from_user_inatomic(addr + ofs, buf, len);
+ kunmap_atomic(addr, KM_USER0);
if (unlikely(left)) {
/* Do it the slow way. */
- kaddr = kmap(*pages);
- left = __copy_from_user(kaddr + ofs, buf, len);
+ addr = kmap(*pages);
+ left = __copy_from_user(addr + ofs, buf, len);
kunmap(*pages);
if (unlikely(left))
goto err_out;
@@ -1408,26 +1408,26 @@ static inline size_t ntfs_copy_from_user_iovec(struct page **pages,
size_t *iov_ofs, size_t bytes)
{
struct page **last_page = pages + nr_pages;
- char *kaddr;
+ char *addr;
size_t copied, len, total = 0;
do {
len = PAGE_CACHE_SIZE - ofs;
if (len > bytes)
len = bytes;
- kaddr = kmap_atomic(*pages, KM_USER0);
- copied = __ntfs_copy_from_user_iovec_inatomic(kaddr + ofs,
+ addr = kmap_atomic(*pages, KM_USER0);
+ copied = __ntfs_copy_from_user_iovec_inatomic(addr + ofs,
*iov, *iov_ofs, len);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(addr, KM_USER0);
if (unlikely(copied != len)) {
/* Do it the slow way. */
- kaddr = kmap(*pages);
- copied = __ntfs_copy_from_user_iovec_inatomic(kaddr + ofs,
+ addr = kmap(*pages);
+ copied = __ntfs_copy_from_user_iovec_inatomic(addr + ofs,
*iov, *iov_ofs, len);
/*
* Zero the rest of the target like __copy_from_user().
*/
- memset(kaddr + ofs + copied, 0, len - copied);
+ memset(addr + ofs + copied, 0, len - copied);
kunmap(*pages);
if (unlikely(copied != len))
goto err_out;
@@ -1735,8 +1735,6 @@ static int ntfs_commit_pages_after_write(struct page **pages,
read_unlock_irqrestore(&ni->size_lock, flags);
BUG_ON(initialized_size != i_size);
if (end > initialized_size) {
- unsigned long flags;
-
write_lock_irqsave(&ni->size_lock, flags);
ni->initialized_size = end;
i_size_write(vi, end);
diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
index b532a73..e9da092 100644
--- a/fs/ntfs/inode.c
+++ b/fs/ntfs/inode.c
@@ -34,7 +34,6 @@
#include "dir.h"
#include "debug.h"
#include "inode.h"
-#include "attrib.h"
#include "lcnalloc.h"
#include "malloc.h"
#include "mft.h"
@@ -2500,8 +2499,6 @@ retry_truncate:
/* Resize the attribute record to best fit the new attribute size. */
if (new_size < vol->mft_record_size &&
!ntfs_resident_attr_value_resize(m, a, new_size)) {
- unsigned long flags;
-
/* The resize succeeded! */
flush_dcache_mft_record_page(ctx->ntfs_ino);
mark_mft_record_dirty(ctx->ntfs_ino);
diff --git a/fs/ntfs/logfile.c b/fs/ntfs/logfile.c
index acfed32..d7932e9 100644
--- a/fs/ntfs/logfile.c
+++ b/fs/ntfs/logfile.c
@@ -1,7 +1,7 @@
/*
* logfile.c - NTFS kernel journal handling. Part of the Linux-NTFS project.
*
- * Copyright (c) 2002-2005 Anton Altaparmakov
+ * Copyright (c) 2002-2007 Anton Altaparmakov
*
* This program/include file is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
@@ -724,24 +724,139 @@ bool ntfs_is_logfile_clean(struct inode *log_vi, const RESTART_PAGE_HEADER *rp)
*/
bool ntfs_empty_logfile(struct inode *log_vi)
{
- ntfs_volume *vol = NTFS_SB(log_vi->i_sb);
+ VCN vcn, end_vcn;
+ ntfs_inode *log_ni = NTFS_I(log_vi);
+ ntfs_volume *vol = log_ni->vol;
+ struct super_block *sb = vol->sb;
+ runlist_element *rl;
+ unsigned long flags;
+ unsigned block_size, block_size_bits;
+ int err;
+ bool should_wait = true;
ntfs_debug("Entering.");
- if (!NVolLogFileEmpty(vol)) {
- int err;
-
- err = ntfs_attr_set(NTFS_I(log_vi), 0, i_size_read(log_vi),
- 0xff);
- if (unlikely(err)) {
- ntfs_error(vol->sb, "Failed to fill $LogFile with "
- "0xff bytes (error code %i).", err);
- return false;
- }
- /* Set the flag so we do not have to do it again on remount. */
- NVolSetLogFileEmpty(vol);
+ if (NVolLogFileEmpty(vol)) {
+ ntfs_debug("Done.");
+ return true;
}
+ /*
+ * We cannot use ntfs_attr_set() because we may be still in the middle
+ * of a mount operation. Thus we do the emptying by hand by first
+ * zapping the page cache pages for the $LogFile/$DATA attribute and
+ * then emptying each of the buffers in each of the clusters specified
+ * by the runlist by hand.
+ */
+ block_size = sb->s_blocksize;
+ block_size_bits = sb->s_blocksize_bits;
+ vcn = 0;
+ read_lock_irqsave(&log_ni->size_lock, flags);
+ end_vcn = (log_ni->initialized_size + vol->cluster_size_mask) >>
+ vol->cluster_size_bits;
+ read_unlock_irqrestore(&log_ni->size_lock, flags);
+ truncate_inode_pages(log_vi->i_mapping, 0);
+ down_write(&log_ni->runlist.lock);
+ rl = log_ni->runlist.rl;
+ if (unlikely(!rl || vcn < rl->vcn || !rl->length)) {
+map_vcn:
+ err = ntfs_map_runlist_nolock(log_ni, vcn, NULL);
+ if (err) {
+ ntfs_error(sb, "Failed to map runlist fragment (error "
+ "%d).", -err);
+ goto err;
+ }
+ rl = log_ni->runlist.rl;
+ BUG_ON(!rl || vcn < rl->vcn || !rl->length);
+ }
+ /* Seek to the runlist element containing @vcn. */
+ while (rl->length && vcn >= rl[1].vcn)
+ rl++;
+ do {
+ LCN lcn;
+ sector_t block, end_block;
+ s64 len;
+
+ /*
+ * If this run is not mapped map it now and start again as the
+ * runlist will have been updated.
+ */
+ lcn = rl->lcn;
+ if (unlikely(lcn == LCN_RL_NOT_MAPPED)) {
+ vcn = rl->vcn;
+ goto map_vcn;
+ }
+ /* If this run is not valid abort with an error. */
+ if (unlikely(!rl->length || lcn < LCN_HOLE))
+ goto rl_err;
+ /* Skip holes. */
+ if (lcn == LCN_HOLE)
+ continue;
+ block = lcn << vol->cluster_size_bits >> block_size_bits;
+ len = rl->length;
+ if (rl[1].vcn > end_vcn)
+ len = end_vcn - rl->vcn;
+ end_block = (lcn + len) << vol->cluster_size_bits >>
+ block_size_bits;
+ /* Iterate over the blocks in the run and empty them. */
+ do {
+ struct buffer_head *bh;
+
+ /* Obtain the buffer, possibly not uptodate. */
+ bh = sb_getblk(sb, block);
+ BUG_ON(!bh);
+ /* Setup buffer i/o submission. */
+ lock_buffer(bh);
+ bh->b_end_io = end_buffer_write_sync;
+ get_bh(bh);
+ /* Set the entire contents of the buffer to 0xff. */
+ memset(bh->b_data, -1, block_size);
+ if (!buffer_uptodate(bh))
+ set_buffer_uptodate(bh);
+ if (buffer_dirty(bh))
+ clear_buffer_dirty(bh);
+ /*
+ * Submit the buffer and wait for i/o to complete but
+ * only for the first buffer so we do not miss really
+ * serious i/o errors. Once the first buffer has
+ * completed ignore errors afterwards as we can assume
+ * that if one buffer worked all of them will work.
+ */
+ submit_bh(WRITE, bh);
+ if (should_wait) {
+ should_wait = false;
+ wait_on_buffer(bh);
+ if (unlikely(!buffer_uptodate(bh)))
+ goto io_err;
+ }
+ brelse(bh);
+ } while (++block < end_block);
+ } while ((++rl)->vcn < end_vcn);
+ up_write(&log_ni->runlist.lock);
+ /*
+ * Zap the pages again just in case any got instantiated whilst we were
+ * emptying the blocks by hand. FIXME: We may not have completed
+ * writing to all the buffer heads yet so this may happen too early.
+ * We really should use a kernel thread to do the emptying
+ * asynchronously and then we can also set the volume dirty and output
+ * an error message if emptying should fail.
+ */
+ truncate_inode_pages(log_vi->i_mapping, 0);
+ /* Set the flag so we do not have to do it again on remount. */
+ NVolSetLogFileEmpty(vol);
ntfs_debug("Done.");
return true;
+io_err:
+ ntfs_error(sb, "Failed to write buffer. Unmount and run chkdsk.");
+ goto dirty_err;
+rl_err:
+ ntfs_error(sb, "Runlist is corrupt. Unmount and run chkdsk.");
+dirty_err:
+ NVolSetErrors(vol);
+ err = -EIO;
+err:
+ up_write(&log_ni->runlist.lock);
+ ntfs_error(sb, "Failed to fill $LogFile with 0xff bytes (error %d).",
+ -err);
+ return false;
}
#endif /* NTFS_RW */
diff --git a/fs/ntfs/runlist.c b/fs/ntfs/runlist.c
index 9afd72c..56a9a6d2 100644
--- a/fs/ntfs/runlist.c
+++ b/fs/ntfs/runlist.c
@@ -1,7 +1,7 @@
/**
* runlist.c - NTFS runlist handling code. Part of the Linux-NTFS project.
*
- * Copyright (c) 2001-2005 Anton Altaparmakov
+ * Copyright (c) 2001-2007 Anton Altaparmakov
* Copyright (c) 2002-2005 Richard Russon
*
* This program/include file is free software; you can redistribute it and/or
@@ -1714,7 +1714,7 @@ extend_hole:
sizeof(*rl));
/* Adjust the beginning of the tail if necessary. */
if (end > rl->vcn) {
- s64 delta = end - rl->vcn;
+ delta = end - rl->vcn;
rl->vcn = end;
rl->length -= delta;
/* Only adjust the lcn if it is real. */
diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
index 4566b91..90c4e3a 100644
--- a/fs/ntfs/super.c
+++ b/fs/ntfs/super.c
@@ -3143,7 +3143,7 @@ static int __init init_ntfs_fs(void)
ntfs_index_ctx_cache = kmem_cache_create(ntfs_index_ctx_cache_name,
sizeof(ntfs_index_context), 0 /* offset */,
- SLAB_HWCACHE_ALIGN, NULL /* ctor */, NULL /* dtor */);
+ SLAB_HWCACHE_ALIGN, NULL /* ctor */);
if (!ntfs_index_ctx_cache) {
printk(KERN_CRIT "NTFS: Failed to create %s!\n",
ntfs_index_ctx_cache_name);
@@ -3151,7 +3151,7 @@ static int __init init_ntfs_fs(void)
}
ntfs_attr_ctx_cache = kmem_cache_create(ntfs_attr_ctx_cache_name,
sizeof(ntfs_attr_search_ctx), 0 /* offset */,
- SLAB_HWCACHE_ALIGN, NULL /* ctor */, NULL /* dtor */);
+ SLAB_HWCACHE_ALIGN, NULL /* ctor */);
if (!ntfs_attr_ctx_cache) {
printk(KERN_CRIT "NTFS: Failed to create %s!\n",
ntfs_attr_ctx_cache_name);
@@ -3160,7 +3160,7 @@ static int __init init_ntfs_fs(void)
ntfs_name_cache = kmem_cache_create(ntfs_name_cache_name,
(NTFS_MAX_NAME_LEN+1) * sizeof(ntfschar), 0,
- SLAB_HWCACHE_ALIGN, NULL, NULL);
+ SLAB_HWCACHE_ALIGN, NULL);
if (!ntfs_name_cache) {
printk(KERN_CRIT "NTFS: Failed to create %s!\n",
ntfs_name_cache_name);
@@ -3169,7 +3169,7 @@ static int __init init_ntfs_fs(void)
ntfs_inode_cache = kmem_cache_create(ntfs_inode_cache_name,
sizeof(ntfs_inode), 0,
- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL, NULL);
+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL);
if (!ntfs_inode_cache) {
printk(KERN_CRIT "NTFS: Failed to create %s!\n",
ntfs_inode_cache_name);
@@ -3179,7 +3179,7 @@ static int __init init_ntfs_fs(void)
ntfs_big_inode_cache = kmem_cache_create(ntfs_big_inode_cache_name,
sizeof(big_ntfs_inode), 0,
SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
- ntfs_big_inode_init_once, NULL);
+ ntfs_big_inode_init_once);
if (!ntfs_big_inode_cache) {
printk(KERN_CRIT "NTFS: Failed to create %s!\n",
ntfs_big_inode_cache_name);
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index f5e11f4..4ba7f0b 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -354,7 +354,6 @@ struct ocfs2_insert_type {
enum ocfs2_append_type ins_appending;
enum ocfs2_contig_type ins_contig;
int ins_contig_index;
- int ins_free_records;
int ins_tree_depth;
};
@@ -362,7 +361,6 @@ struct ocfs2_merge_ctxt {
enum ocfs2_contig_type c_contig_type;
int c_has_empty_extent;
int c_split_covers_rec;
- int c_used_tail_recs;
};
/*
@@ -2808,36 +2806,28 @@ static int ocfs2_try_to_merge_extent(struct inode *inode,
struct ocfs2_merge_ctxt *ctxt)
{
- int ret = 0, delete_tail_recs = 0;
+ int ret = 0;
struct ocfs2_extent_list *el = path_leaf_el(left_path);
struct ocfs2_extent_rec *rec = &el->l_recs[split_index];
BUG_ON(ctxt->c_contig_type == CONTIG_NONE);
- if (ctxt->c_split_covers_rec) {
- delete_tail_recs++;
-
- if (ctxt->c_contig_type == CONTIG_LEFTRIGHT ||
- ctxt->c_has_empty_extent)
- delete_tail_recs++;
-
- if (ctxt->c_has_empty_extent) {
- /*
- * The merge code will need to create an empty
- * extent to take the place of the newly
- * emptied slot. Remove any pre-existing empty
- * extents - having more than one in a leaf is
- * illegal.
- */
- ret = ocfs2_rotate_tree_left(inode, handle, left_path,
- dealloc);
- if (ret) {
- mlog_errno(ret);
- goto out;
- }
- split_index--;
- rec = &el->l_recs[split_index];
+ if (ctxt->c_split_covers_rec && ctxt->c_has_empty_extent) {
+ /*
+ * The merge code will need to create an empty
+ * extent to take the place of the newly
+ * emptied slot. Remove any pre-existing empty
+ * extents - having more than one in a leaf is
+ * illegal.
+ */
+ ret = ocfs2_rotate_tree_left(inode, handle, left_path,
+ dealloc);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
}
+ split_index--;
+ rec = &el->l_recs[split_index];
}
if (ctxt->c_contig_type == CONTIG_LEFTRIGHT) {
@@ -3593,6 +3583,7 @@ static int ocfs2_figure_insert_type(struct inode *inode,
struct buffer_head *di_bh,
struct buffer_head **last_eb_bh,
struct ocfs2_extent_rec *insert_rec,
+ int *free_records,
struct ocfs2_insert_type *insert)
{
int ret;
@@ -3633,7 +3624,7 @@ static int ocfs2_figure_insert_type(struct inode *inode,
* XXX: This test is simplistic, we can search for empty
* extent records too.
*/
- insert->ins_free_records = le16_to_cpu(el->l_count) -
+ *free_records = le16_to_cpu(el->l_count) -
le16_to_cpu(el->l_next_free_rec);
if (!insert->ins_tree_depth) {
@@ -3730,11 +3721,13 @@ int ocfs2_insert_extent(struct ocfs2_super *osb,
struct ocfs2_alloc_context *meta_ac)
{
int status;
+ int uninitialized_var(free_records);
struct buffer_head *last_eb_bh = NULL;
- struct buffer_head *bh = NULL;
struct ocfs2_insert_type insert = {0, };
struct ocfs2_extent_rec rec;
+ BUG_ON(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL);
+
mlog(0, "add %u clusters at position %u to inode %llu\n",
new_clusters, cpos, (unsigned long long)OCFS2_I(inode)->ip_blkno);
@@ -3753,7 +3746,7 @@ int ocfs2_insert_extent(struct ocfs2_super *osb,
rec.e_flags = flags;
status = ocfs2_figure_insert_type(inode, fe_bh, &last_eb_bh, &rec,
- &insert);
+ &free_records, &insert);
if (status < 0) {
mlog_errno(status);
goto bail;
@@ -3763,9 +3756,9 @@ int ocfs2_insert_extent(struct ocfs2_super *osb,
"Insert.contig_index: %d, Insert.free_records: %d, "
"Insert.tree_depth: %d\n",
insert.ins_appending, insert.ins_contig, insert.ins_contig_index,
- insert.ins_free_records, insert.ins_tree_depth);
+ free_records, insert.ins_tree_depth);
- if (insert.ins_contig == CONTIG_NONE && insert.ins_free_records == 0) {
+ if (insert.ins_contig == CONTIG_NONE && free_records == 0) {
status = ocfs2_grow_tree(inode, handle, fe_bh,
&insert.ins_tree_depth, &last_eb_bh,
meta_ac);
@@ -3783,9 +3776,6 @@ int ocfs2_insert_extent(struct ocfs2_super *osb,
ocfs2_extent_map_insert_rec(inode, &rec);
bail:
- if (bh)
- brelse(bh);
-
if (last_eb_bh)
brelse(last_eb_bh);
@@ -3851,26 +3841,17 @@ leftright:
if (le16_to_cpu(rightmost_el->l_next_free_rec) ==
le16_to_cpu(rightmost_el->l_count)) {
- int old_depth = depth;
-
ret = ocfs2_grow_tree(inode, handle, di_bh, &depth, last_eb_bh,
meta_ac);
if (ret) {
mlog_errno(ret);
goto out;
}
-
- if (old_depth != depth) {
- eb = (struct ocfs2_extent_block *)(*last_eb_bh)->b_data;
- rightmost_el = &eb->h_list;
- }
}
memset(&insert, 0, sizeof(struct ocfs2_insert_type));
insert.ins_appending = APPEND_NONE;
insert.ins_contig = CONTIG_NONE;
- insert.ins_free_records = le16_to_cpu(rightmost_el->l_count)
- - le16_to_cpu(rightmost_el->l_next_free_rec);
insert.ins_tree_depth = depth;
insert_range = le32_to_cpu(split_rec.e_cpos) +
@@ -4019,11 +4000,6 @@ static int __ocfs2_mark_extent_written(struct inode *inode,
} else
rightmost_el = path_root_el(path);
- ctxt.c_used_tail_recs = le16_to_cpu(rightmost_el->l_next_free_rec);
- if (ctxt.c_used_tail_recs > 0 &&
- ocfs2_is_empty_extent(&rightmost_el->l_recs[0]))
- ctxt.c_used_tail_recs--;
-
if (rec->e_cpos == split_rec->e_cpos &&
rec->e_leaf_clusters == split_rec->e_leaf_clusters)
ctxt.c_split_covers_rec = 1;
@@ -4032,10 +4008,9 @@ static int __ocfs2_mark_extent_written(struct inode *inode,
ctxt.c_has_empty_extent = ocfs2_is_empty_extent(&el->l_recs[0]);
- mlog(0, "index: %d, contig: %u, used_tail_recs: %u, "
- "has_empty: %u, split_covers: %u\n", split_index,
- ctxt.c_contig_type, ctxt.c_used_tail_recs,
- ctxt.c_has_empty_extent, ctxt.c_split_covers_rec);
+ mlog(0, "index: %d, contig: %u, has_empty: %u, split_covers: %u\n",
+ split_index, ctxt.c_contig_type, ctxt.c_has_empty_extent,
+ ctxt.c_split_covers_rec);
if (ctxt.c_contig_type == CONTIG_NONE) {
if (ctxt.c_split_covers_rec)
@@ -4184,27 +4159,18 @@ static int ocfs2_split_tree(struct inode *inode, struct buffer_head *di_bh,
if (le16_to_cpu(rightmost_el->l_next_free_rec) ==
le16_to_cpu(rightmost_el->l_count)) {
- int old_depth = depth;
-
ret = ocfs2_grow_tree(inode, handle, di_bh, &depth, &last_eb_bh,
meta_ac);
if (ret) {
mlog_errno(ret);
goto out;
}
-
- if (old_depth != depth) {
- eb = (struct ocfs2_extent_block *)last_eb_bh->b_data;
- rightmost_el = &eb->h_list;
- }
}
memset(&insert, 0, sizeof(struct ocfs2_insert_type));
insert.ins_appending = APPEND_NONE;
insert.ins_contig = CONTIG_NONE;
insert.ins_split = SPLIT_RIGHT;
- insert.ins_free_records = le16_to_cpu(rightmost_el->l_count)
- - le16_to_cpu(rightmost_el->l_next_free_rec);
insert.ins_tree_depth = depth;
ret = ocfs2_do_insert_extent(inode, handle, di_bh, &split_rec, &insert);
@@ -5606,6 +5572,7 @@ static int ocfs2_do_truncate(struct ocfs2_super *osb,
clusters_to_del;
spin_unlock(&OCFS2_I(inode)->ip_lock);
le32_add_cpu(&fe->i_clusters, -clusters_to_del);
+ inode->i_blocks = ocfs2_inode_sector_count(inode);
status = ocfs2_trim_tree(inode, path, handle, tc,
clusters_to_del, &delete_blk);
@@ -5668,12 +5635,50 @@ static int ocfs2_ordered_zero_func(handle_t *handle, struct buffer_head *bh)
return ocfs2_journal_dirty_data(handle, bh);
}
+static void ocfs2_map_and_dirty_page(struct inode *inode, handle_t *handle,
+ unsigned int from, unsigned int to,
+ struct page *page, int zero, u64 *phys)
+{
+ int ret, partial = 0;
+
+ ret = ocfs2_map_page_blocks(page, phys, inode, from, to, 0);
+ if (ret)
+ mlog_errno(ret);
+
+ if (zero)
+ zero_user_page(page, from, to - from, KM_USER0);
+
+ /*
+ * Need to set the buffers we zero'd into uptodate
+ * here if they aren't - ocfs2_map_page_blocks()
+ * might've skipped some
+ */
+ if (ocfs2_should_order_data(inode)) {
+ ret = walk_page_buffers(handle,
+ page_buffers(page),
+ from, to, &partial,
+ ocfs2_ordered_zero_func);
+ if (ret < 0)
+ mlog_errno(ret);
+ } else {
+ ret = walk_page_buffers(handle, page_buffers(page),
+ from, to, &partial,
+ ocfs2_writeback_zero_func);
+ if (ret < 0)
+ mlog_errno(ret);
+ }
+
+ if (!partial)
+ SetPageUptodate(page);
+
+ flush_dcache_page(page);
+}
+
static void ocfs2_zero_cluster_pages(struct inode *inode, loff_t start,
loff_t end, struct page **pages,
int numpages, u64 phys, handle_t *handle)
{
- int i, ret, partial = 0;
- void *kaddr;
+ int i;
struct page *page;
unsigned int from, to = PAGE_CACHE_SIZE;
struct super_block *sb = inode->i_sb;
@@ -5694,87 +5699,31 @@ static void ocfs2_zero_cluster_pages(struct inode *inode, loff_t start,
BUG_ON(from > PAGE_CACHE_SIZE);
BUG_ON(to > PAGE_CACHE_SIZE);
- ret = ocfs2_map_page_blocks(page, &phys, inode, from, to, 0);
- if (ret)
- mlog_errno(ret);
-
- kaddr = kmap_atomic(page, KM_USER0);
- memset(kaddr + from, 0, to - from);
- kunmap_atomic(kaddr, KM_USER0);
-
- /*
- * Need to set the buffers we zero'd into uptodate
- * here if they aren't - ocfs2_map_page_blocks()
- * might've skipped some
- */
- if (ocfs2_should_order_data(inode)) {
- ret = walk_page_buffers(handle,
- page_buffers(page),
- from, to, &partial,
- ocfs2_ordered_zero_func);
- if (ret < 0)
- mlog_errno(ret);
- } else {
- ret = walk_page_buffers(handle, page_buffers(page),
- from, to, &partial,
- ocfs2_writeback_zero_func);
- if (ret < 0)
- mlog_errno(ret);
- }
-
- if (!partial)
- SetPageUptodate(page);
-
- flush_dcache_page(page);
+ ocfs2_map_and_dirty_page(inode, handle, from, to, page, 1,
+ &phys);
start = (page->index + 1) << PAGE_CACHE_SHIFT;
}
out:
- if (pages) {
- for (i = 0; i < numpages; i++) {
- page = pages[i];
- unlock_page(page);
- mark_page_accessed(page);
- page_cache_release(page);
- }
- }
+ if (pages)
+ ocfs2_unlock_and_free_pages(pages, numpages);
}
static int ocfs2_grab_eof_pages(struct inode *inode, loff_t start, loff_t end,
- struct page **pages, int *num, u64 *phys)
+ struct page **pages, int *num)
{
- int i, numpages = 0, ret = 0;
- unsigned int ext_flags;
+ int numpages, ret = 0;
struct super_block *sb = inode->i_sb;
struct address_space *mapping = inode->i_mapping;
unsigned long index;
loff_t last_page_bytes;
- BUG_ON(!ocfs2_sparse_alloc(OCFS2_SB(sb)));
BUG_ON(start > end);
- if (start == end)
- goto out;
-
BUG_ON(start >> OCFS2_SB(sb)->s_clustersize_bits !=
(end - 1) >> OCFS2_SB(sb)->s_clustersize_bits);
- ret = ocfs2_extent_map_get_blocks(inode, start >> sb->s_blocksize_bits,
- phys, NULL, &ext_flags);
- if (ret) {
- mlog_errno(ret);
- goto out;
- }
-
- /* Tail is a hole. */
- if (*phys == 0)
- goto out;
-
- /* Tail is marked as unwritten, we can count on write to zero
- * in that case. */
- if (ext_flags & OCFS2_EXT_UNWRITTEN)
- goto out;
-
+ numpages = 0;
last_page_bytes = PAGE_ALIGN(end);
index = start >> PAGE_CACHE_SHIFT;
do {
@@ -5791,14 +5740,8 @@ static int ocfs2_grab_eof_pages(struct inode *inode, loff_t start, loff_t end,
out:
if (ret != 0) {
- if (pages) {
- for (i = 0; i < numpages; i++) {
- if (pages[i]) {
- unlock_page(pages[i]);
- page_cache_release(pages[i]);
- }
- }
- }
+ if (pages)
+ ocfs2_unlock_and_free_pages(pages, numpages);
numpages = 0;
}
@@ -5819,18 +5762,20 @@ out:
int ocfs2_zero_range_for_truncate(struct inode *inode, handle_t *handle,
u64 range_start, u64 range_end)
{
- int ret, numpages;
+ int ret = 0, numpages;
struct page **pages = NULL;
u64 phys;
+ unsigned int ext_flags;
+ struct super_block *sb = inode->i_sb;
/*
* File systems which don't support sparse files zero on every
* extend.
*/
- if (!ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)))
+ if (!ocfs2_sparse_alloc(OCFS2_SB(sb)))
return 0;
- pages = kcalloc(ocfs2_pages_per_cluster(inode->i_sb),
+ pages = kcalloc(ocfs2_pages_per_cluster(sb),
sizeof(struct page *), GFP_NOFS);
if (pages == NULL) {
ret = -ENOMEM;
@@ -5838,16 +5783,31 @@ int ocfs2_zero_range_for_truncate(struct inode *inode, handle_t *handle,
goto out;
}
- ret = ocfs2_grab_eof_pages(inode, range_start, range_end, pages,
- &numpages, &phys);
+ if (range_start == range_end)
+ goto out;
+
+ ret = ocfs2_extent_map_get_blocks(inode,
+ range_start >> sb->s_blocksize_bits,
+ &phys, NULL, &ext_flags);
if (ret) {
mlog_errno(ret);
goto out;
}
- if (numpages == 0)
+ /*
+ * Tail is a hole, or is marked unwritten. In either case, we
+ * can count on read and write to return/push zero's.
+ */
+ if (phys == 0 || ext_flags & OCFS2_EXT_UNWRITTEN)
goto out;
+ ret = ocfs2_grab_eof_pages(inode, range_start, range_end, pages,
+ &numpages);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
ocfs2_zero_cluster_pages(inode, range_start, range_end, pages,
numpages, phys, handle);
@@ -5868,6 +5828,178 @@ out:
return ret;
}
+static void ocfs2_zero_dinode_id2(struct inode *inode, struct ocfs2_dinode *di)
+{
+ unsigned int blocksize = 1 << inode->i_sb->s_blocksize_bits;
+
+ memset(&di->id2, 0, blocksize - offsetof(struct ocfs2_dinode, id2));
+}
+
+void ocfs2_dinode_new_extent_list(struct inode *inode,
+ struct ocfs2_dinode *di)
+{
+ ocfs2_zero_dinode_id2(inode, di);
+ di->id2.i_list.l_tree_depth = 0;
+ di->id2.i_list.l_next_free_rec = 0;
+ di->id2.i_list.l_count = cpu_to_le16(ocfs2_extent_recs_per_inode(inode->i_sb));
+}
+
+void ocfs2_set_inode_data_inline(struct inode *inode, struct ocfs2_dinode *di)
+{
+ struct ocfs2_inode_info *oi = OCFS2_I(inode);
+ struct ocfs2_inline_data *idata = &di->id2.i_data;
+
+ spin_lock(&oi->ip_lock);
+ oi->ip_dyn_features |= OCFS2_INLINE_DATA_FL;
+ di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
+ spin_unlock(&oi->ip_lock);
+
+ /*
+ * We clear the entire i_data structure here so that all
+ * fields can be properly initialized.
+ */
+ ocfs2_zero_dinode_id2(inode, di);
+
+ idata->id_count = cpu_to_le16(ocfs2_max_inline_data(inode->i_sb));
+}
+
+int ocfs2_convert_inline_data_to_extents(struct inode *inode,
+ struct buffer_head *di_bh)
+{
+ int ret, i, has_data, num_pages = 0;
+ handle_t *handle;
+ u64 uninitialized_var(block);
+ struct ocfs2_inode_info *oi = OCFS2_I(inode);
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
+ struct ocfs2_alloc_context *data_ac = NULL;
+ struct page **pages = NULL;
+ loff_t end = osb->s_clustersize;
+
+ has_data = i_size_read(inode) ? 1 : 0;
+
+ if (has_data) {
+ pages = kcalloc(ocfs2_pages_per_cluster(osb->sb),
+ sizeof(struct page *), GFP_NOFS);
+ if (pages == NULL) {
+ ret = -ENOMEM;
+ mlog_errno(ret);
+ goto out;
+ }
+
+ ret = ocfs2_reserve_clusters(osb, 1, &data_ac);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+ }
+
+ handle = ocfs2_start_trans(osb, OCFS2_INLINE_TO_EXTENTS_CREDITS);
+ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
+ mlog_errno(ret);
+ goto out_unlock;
+ }
+
+ ret = ocfs2_journal_access(handle, inode, di_bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (ret) {
+ mlog_errno(ret);
+ goto out_commit;
+ }
+
+ if (has_data) {
+ u32 bit_off, num;
+ unsigned int page_end;
+ u64 phys;
+
+ ret = ocfs2_claim_clusters(osb, handle, data_ac, 1, &bit_off,
+ &num);
+ if (ret) {
+ mlog_errno(ret);
+ goto out_commit;
+ }
+
+ /*
+ * Save two copies, one for insert, and one that can
+ * be changed by ocfs2_map_and_dirty_page() below.
+ */
+ block = phys = ocfs2_clusters_to_blocks(inode->i_sb, bit_off);
+
+ /*
+ * Non sparse file systems zero on extend, so no need
+ * to do that now.
+ */
+ if (!ocfs2_sparse_alloc(osb) &&
+ PAGE_CACHE_SIZE < osb->s_clustersize)
+ end = PAGE_CACHE_SIZE;
+
+ ret = ocfs2_grab_eof_pages(inode, 0, end, pages, &num_pages);
+ if (ret) {
+ mlog_errno(ret);
+ goto out_commit;
+ }
+
+ /*
+ * This should populate the 1st page for us and mark
+ * it up to date.
+ */
+ ret = ocfs2_read_inline_data(inode, pages[0], di_bh);
+ if (ret) {
+ mlog_errno(ret);
+ goto out_commit;
+ }
+
+ page_end = PAGE_CACHE_SIZE;
+ if (PAGE_CACHE_SIZE > osb->s_clustersize)
+ page_end = osb->s_clustersize;
+
+ for (i = 0; i < num_pages; i++)
+ ocfs2_map_and_dirty_page(inode, handle, 0, page_end,
+ pages[i], i > 0, &phys);
+ }
+
+ spin_lock(&oi->ip_lock);
+ oi->ip_dyn_features &= ~OCFS2_INLINE_DATA_FL;
+ di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
+ spin_unlock(&oi->ip_lock);
+
+ ocfs2_dinode_new_extent_list(inode, di);
+
+ ocfs2_journal_dirty(handle, di_bh);
+
+ if (has_data) {
+ /*
+ * An error at this point should be extremely rare. If
+ * this proves to be false, we could always re-build
+ * the in-inode data from our pages.
+ */
+ ret = ocfs2_insert_extent(osb, handle, inode, di_bh,
+ 0, block, 1, 0, NULL);
+ if (ret) {
+ mlog_errno(ret);
+ goto out_commit;
+ }
+
+ inode->i_blocks = ocfs2_inode_sector_count(inode);
+ }
+
+out_commit:
+ ocfs2_commit_trans(osb, handle);
+
+out_unlock:
+ if (data_ac)
+ ocfs2_free_alloc_context(data_ac);
+
+out:
+ if (pages) {
+ ocfs2_unlock_and_free_pages(pages, num_pages);
+ kfree(pages);
+ }
+
+ return ret;
+}
+
/*
* It is expected, that by the time you call this function,
* inode->i_size and fe->i_size have been adjusted.
@@ -6093,6 +6225,81 @@ bail:
return status;
}
+/*
+ * 'start' is inclusive, 'end' is not.
+ */
+int ocfs2_truncate_inline(struct inode *inode, struct buffer_head *di_bh,
+ unsigned int start, unsigned int end, int trunc)
+{
+ int ret;
+ unsigned int numbytes;
+ handle_t *handle;
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
+ struct ocfs2_inline_data *idata = &di->id2.i_data;
+
+ if (end > i_size_read(inode))
+ end = i_size_read(inode);
+
+ BUG_ON(start >= end);
+
+ if (!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) ||
+ !(le16_to_cpu(di->i_dyn_features) & OCFS2_INLINE_DATA_FL) ||
+ !ocfs2_supports_inline_data(osb)) {
+ ocfs2_error(inode->i_sb,
+ "Inline data flags for inode %llu don't agree! "
+ "Disk: 0x%x, Memory: 0x%x, Superblock: 0x%x\n",
+ (unsigned long long)OCFS2_I(inode)->ip_blkno,
+ le16_to_cpu(di->i_dyn_features),
+ OCFS2_I(inode)->ip_dyn_features,
+ osb->s_feature_incompat);
+ ret = -EROFS;
+ goto out;
+ }
+
+ handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
+ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
+ mlog_errno(ret);
+ goto out;
+ }
+
+ ret = ocfs2_journal_access(handle, inode, di_bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (ret) {
+ mlog_errno(ret);
+ goto out_commit;
+ }
+
+ numbytes = end - start;
+ memset(idata->id_data + start, 0, numbytes);
+
+ /*
+ * No need to worry about the data page here - it's been
+ * truncated already and inline data doesn't need it for
+ * pushing zero's to disk, so we'll let readpage pick it up
+ * later.
+ */
+ if (trunc) {
+ i_size_write(inode, start);
+ di->i_size = cpu_to_le64(start);
+ }
+
+ inode->i_blocks = ocfs2_inode_sector_count(inode);
+ inode->i_ctime = inode->i_mtime = CURRENT_TIME;
+
+ di->i_ctime = di->i_mtime = cpu_to_le64(inode->i_ctime.tv_sec);
+ di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
+
+ ocfs2_journal_dirty(handle, di_bh);
+
+out_commit:
+ ocfs2_commit_trans(osb, handle);
+
+out:
+ return ret;
+}
+
static void ocfs2_free_truncate_context(struct ocfs2_truncate_context *tc)
{
/*
diff --git a/fs/ocfs2/alloc.h b/fs/ocfs2/alloc.h
index 990df48..42ff94b 100644
--- a/fs/ocfs2/alloc.h
+++ b/fs/ocfs2/alloc.h
@@ -62,6 +62,11 @@ static inline int ocfs2_extend_meta_needed(struct ocfs2_dinode *fe)
return le16_to_cpu(fe->id2.i_list.l_tree_depth) + 2;
}
+void ocfs2_dinode_new_extent_list(struct inode *inode, struct ocfs2_dinode *di);
+void ocfs2_set_inode_data_inline(struct inode *inode, struct ocfs2_dinode *di);
+int ocfs2_convert_inline_data_to_extents(struct inode *inode,
+ struct buffer_head *di_bh);
+
int ocfs2_truncate_log_init(struct ocfs2_super *osb);
void ocfs2_truncate_log_shutdown(struct ocfs2_super *osb);
void ocfs2_schedule_truncate_log_flush(struct ocfs2_super *osb,
@@ -115,6 +120,8 @@ int ocfs2_commit_truncate(struct ocfs2_super *osb,
struct inode *inode,
struct buffer_head *fe_bh,
struct ocfs2_truncate_context *tc);
+int ocfs2_truncate_inline(struct inode *inode, struct buffer_head *di_bh,
+ unsigned int start, unsigned int end, int trunc);
int ocfs2_find_leaf(struct inode *inode, struct ocfs2_extent_list *root_el,
u32 cpos, struct buffer_head **leaf_bh);
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 84bf6e7..34d1045 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -206,9 +206,70 @@ bail:
return err;
}
+int ocfs2_read_inline_data(struct inode *inode, struct page *page,
+ struct buffer_head *di_bh)
+{
+ void *kaddr;
+ unsigned int size;
+ struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
+
+ if (!(le16_to_cpu(di->i_dyn_features) & OCFS2_INLINE_DATA_FL)) {
+ ocfs2_error(inode->i_sb, "Inode %llu lost inline data flag",
+ (unsigned long long)OCFS2_I(inode)->ip_blkno);
+ return -EROFS;
+ }
+
+ size = i_size_read(inode);
+
+ if (size > PAGE_CACHE_SIZE ||
+ size > ocfs2_max_inline_data(inode->i_sb)) {
+ ocfs2_error(inode->i_sb,
+ "Inode %llu has with inline data has bad size: %u",
+ (unsigned long long)OCFS2_I(inode)->ip_blkno, size);
+ return -EROFS;
+ }
+
+ kaddr = kmap_atomic(page, KM_USER0);
+ if (size)
+ memcpy(kaddr, di->id2.i_data.id_data, size);
+ /* Clear the remaining part of the page */
+ memset(kaddr + size, 0, PAGE_CACHE_SIZE - size);
+ flush_dcache_page(page);
+ kunmap_atomic(kaddr, KM_USER0);
+
+ SetPageUptodate(page);
+
+ return 0;
+}
+
+static int ocfs2_readpage_inline(struct inode *inode, struct page *page)
+{
+ int ret;
+ struct buffer_head *di_bh = NULL;
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+
+ BUG_ON(!PageLocked(page));
+ BUG_ON(!OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL);
+
+ ret = ocfs2_read_block(osb, OCFS2_I(inode)->ip_blkno, &di_bh,
+ OCFS2_BH_CACHED, inode);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ ret = ocfs2_read_inline_data(inode, page, di_bh);
+out:
+ unlock_page(page);
+
+ brelse(di_bh);
+ return ret;
+}
+
static int ocfs2_readpage(struct file *file, struct page *page)
{
struct inode *inode = page->mapping->host;
+ struct ocfs2_inode_info *oi = OCFS2_I(inode);
loff_t start = (loff_t)page->index << PAGE_CACHE_SHIFT;
int ret, unlock = 1;
@@ -222,7 +283,7 @@ static int ocfs2_readpage(struct file *file, struct page *page)
goto out;
}
- if (down_read_trylock(&OCFS2_I(inode)->ip_alloc_sem) == 0) {
+ if (down_read_trylock(&oi->ip_alloc_sem) == 0) {
ret = AOP_TRUNCATED_PAGE;
goto out_meta_unlock;
}
@@ -232,7 +293,7 @@ static int ocfs2_readpage(struct file *file, struct page *page)
* might now be discovering a truncate that hit on another node.
* block_read_full_page->get_block freaks out if it is asked to read
* beyond the end of a file, so we check here. Callers
- * (generic_file_read, fault->nopage) are clever enough to check i_size
+ * (generic_file_read, vm_ops->fault) are clever enough to check i_size
* and notice that the page they just read isn't needed.
*
* XXX sys_readahead() seems to get that wrong?
@@ -252,7 +313,10 @@ static int ocfs2_readpage(struct file *file, struct page *page)
goto out_alloc;
}
- ret = block_read_full_page(page, ocfs2_get_block);
+ if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL)
+ ret = ocfs2_readpage_inline(inode, page);
+ else
+ ret = block_read_full_page(page, ocfs2_get_block);
unlock = 0;
ocfs2_data_unlock(inode, 0);
@@ -301,12 +365,8 @@ int ocfs2_prepare_write_nolock(struct inode *inode, struct page *page,
{
int ret;
- down_read(&OCFS2_I(inode)->ip_alloc_sem);
-
ret = block_prepare_write(page, from, to, ocfs2_get_block);
- up_read(&OCFS2_I(inode)->ip_alloc_sem);
-
return ret;
}
@@ -401,7 +461,9 @@ static sector_t ocfs2_bmap(struct address_space *mapping, sector_t block)
down_read(&OCFS2_I(inode)->ip_alloc_sem);
}
- err = ocfs2_extent_map_get_blocks(inode, block, &p_blkno, NULL, NULL);
+ if (!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL))
+ err = ocfs2_extent_map_get_blocks(inode, block, &p_blkno, NULL,
+ NULL);
if (!INODE_JOURNAL(inode)) {
up_read(&OCFS2_I(inode)->ip_alloc_sem);
@@ -415,7 +477,6 @@ static sector_t ocfs2_bmap(struct address_space *mapping, sector_t block)
goto bail;
}
-
bail:
status = err ? 0 : p_blkno;
@@ -570,6 +631,13 @@ static ssize_t ocfs2_direct_IO(int rw,
mlog_entry_void();
+ /*
+ * Fallback to buffered I/O if we see an inode without
+ * extents.
+ */
+ if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
+ return 0;
+
if (!ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb))) {
/*
* We get PR data locks even for O_DIRECT. This
@@ -834,18 +902,22 @@ struct ocfs2_write_ctxt {
struct ocfs2_cached_dealloc_ctxt w_dealloc;
};
-static void ocfs2_free_write_ctxt(struct ocfs2_write_ctxt *wc)
+void ocfs2_unlock_and_free_pages(struct page **pages, int num_pages)
{
int i;
- for(i = 0; i < wc->w_num_pages; i++) {
- if (wc->w_pages[i] == NULL)
- continue;
-
- unlock_page(wc->w_pages[i]);
- mark_page_accessed(wc->w_pages[i]);
- page_cache_release(wc->w_pages[i]);
+ for(i = 0; i < num_pages; i++) {
+ if (pages[i]) {
+ unlock_page(pages[i]);
+ mark_page_accessed(pages[i]);
+ page_cache_release(pages[i]);
+ }
}
+}
+
+static void ocfs2_free_write_ctxt(struct ocfs2_write_ctxt *wc)
+{
+ ocfs2_unlock_and_free_pages(wc->w_pages, wc->w_num_pages);
brelse(wc->w_di_bh);
kfree(wc);
@@ -855,6 +927,7 @@ static int ocfs2_alloc_write_ctxt(struct ocfs2_write_ctxt **wcp,
struct ocfs2_super *osb, loff_t pos,
unsigned len, struct buffer_head *di_bh)
{
+ u32 cend;
struct ocfs2_write_ctxt *wc;
wc = kzalloc(sizeof(struct ocfs2_write_ctxt), GFP_NOFS);
@@ -862,7 +935,8 @@ static int ocfs2_alloc_write_ctxt(struct ocfs2_write_ctxt **wcp,
return -ENOMEM;
wc->w_cpos = pos >> osb->s_clustersize_bits;
- wc->w_clen = ocfs2_clusters_for_bytes(osb->sb, len);
+ cend = (pos + len - 1) >> osb->s_clustersize_bits;
+ wc->w_clen = cend - wc->w_cpos + 1;
get_bh(di_bh);
wc->w_di_bh = di_bh;
@@ -928,18 +1002,11 @@ static void ocfs2_write_failure(struct inode *inode,
loff_t user_pos, unsigned user_len)
{
int i;
- unsigned from, to;
+ unsigned from = user_pos & (PAGE_CACHE_SIZE - 1),
+ to = user_pos + user_len;
struct page *tmppage;
- ocfs2_zero_new_buffers(wc->w_target_page, user_pos, user_len);
-
- if (wc->w_large_pages) {
- from = wc->w_target_from;
- to = wc->w_target_to;
- } else {
- from = 0;
- to = PAGE_CACHE_SIZE;
- }
+ ocfs2_zero_new_buffers(wc->w_target_page, from, to);
for(i = 0; i < wc->w_num_pages; i++) {
tmppage = wc->w_pages[i];
@@ -989,9 +1056,6 @@ static int ocfs2_prepare_page_for_write(struct inode *inode, u64 *p_blkno,
map_from = cluster_start;
map_to = cluster_end;
}
-
- wc->w_target_from = map_from;
- wc->w_target_to = map_to;
} else {
/*
* If we haven't allocated the new page yet, we
@@ -1209,18 +1273,33 @@ static int ocfs2_write_cluster_by_desc(struct address_space *mapping,
loff_t pos, unsigned len)
{
int ret, i;
+ loff_t cluster_off;
+ unsigned int local_len = len;
struct ocfs2_write_cluster_desc *desc;
+ struct ocfs2_super *osb = OCFS2_SB(mapping->host->i_sb);
for (i = 0; i < wc->w_clen; i++) {
desc = &wc->w_desc[i];
+ /*
+ * We have to make sure that the total write passed in
+ * doesn't extend past a single cluster.
+ */
+ local_len = len;
+ cluster_off = pos & (osb->s_clustersize - 1);
+ if ((cluster_off + local_len) > osb->s_clustersize)
+ local_len = osb->s_clustersize - cluster_off;
+
ret = ocfs2_write_cluster(mapping, desc->c_phys,
desc->c_unwritten, data_ac, meta_ac,
- wc, desc->c_cpos, pos, len);
+ wc, desc->c_cpos, pos, local_len);
if (ret) {
mlog_errno(ret);
goto out;
}
+
+ len -= local_len;
+ pos += local_len;
}
ret = 0;
@@ -1353,6 +1432,160 @@ out:
return ret;
}
+static int ocfs2_write_begin_inline(struct address_space *mapping,
+ struct inode *inode,
+ struct ocfs2_write_ctxt *wc)
+{
+ int ret;
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ struct page *page;
+ handle_t *handle;
+ struct ocfs2_dinode *di = (struct ocfs2_dinode *)wc->w_di_bh->b_data;
+
+ page = find_or_create_page(mapping, 0, GFP_NOFS);
+ if (!page) {
+ ret = -ENOMEM;
+ mlog_errno(ret);
+ goto out;
+ }
+ /*
+ * If we don't set w_num_pages then this page won't get unlocked
+ * and freed on cleanup of the write context.
+ */
+ wc->w_pages[0] = wc->w_target_page = page;
+ wc->w_num_pages = 1;
+
+ handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
+ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
+ mlog_errno(ret);
+ goto out;
+ }
+
+ ret = ocfs2_journal_access(handle, inode, wc->w_di_bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (ret) {
+ ocfs2_commit_trans(osb, handle);
+
+ mlog_errno(ret);
+ goto out;
+ }
+
+ if (!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL))
+ ocfs2_set_inode_data_inline(inode, di);
+
+ if (!PageUptodate(page)) {
+ ret = ocfs2_read_inline_data(inode, page, wc->w_di_bh);
+ if (ret) {
+ ocfs2_commit_trans(osb, handle);
+
+ goto out;
+ }
+ }
+
+ wc->w_handle = handle;
+out:
+ return ret;
+}
+
+int ocfs2_size_fits_inline_data(struct buffer_head *di_bh, u64 new_size)
+{
+ struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
+
+ if (new_size < le16_to_cpu(di->id2.i_data.id_count))
+ return 1;
+ return 0;
+}
+
+static int ocfs2_try_to_write_inline_data(struct address_space *mapping,
+ struct inode *inode, loff_t pos,
+ unsigned len, struct page *mmap_page,
+ struct ocfs2_write_ctxt *wc)
+{
+ int ret, written = 0;
+ loff_t end = pos + len;
+ struct ocfs2_inode_info *oi = OCFS2_I(inode);
+
+ mlog(0, "Inode %llu, write of %u bytes at off %llu. features: 0x%x\n",
+ (unsigned long long)oi->ip_blkno, len, (unsigned long long)pos,
+ oi->ip_dyn_features);
+
+ /*
+ * Handle inodes which already have inline data 1st.
+ */
+ if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
+ if (mmap_page == NULL &&
+ ocfs2_size_fits_inline_data(wc->w_di_bh, end))
+ goto do_inline_write;
+
+ /*
+ * The write won't fit - we have to give this inode an
+ * inline extent list now.
+ */
+ ret = ocfs2_convert_inline_data_to_extents(inode, wc->w_di_bh);
+ if (ret)
+ mlog_errno(ret);
+ goto out;
+ }
+
+ /*
+ * Check whether the inode can accept inline data.
+ */
+ if (oi->ip_clusters != 0 || i_size_read(inode) != 0)
+ return 0;
+
+ /*
+ * Check whether the write can fit.
+ */
+ if (mmap_page || end > ocfs2_max_inline_data(inode->i_sb))
+ return 0;
+
+do_inline_write:
+ ret = ocfs2_write_begin_inline(mapping, inode, wc);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ /*
+ * This signals to the caller that the data can be written
+ * inline.
+ */
+ written = 1;
+out:
+ return written ? written : ret;
+}
+
+/*
+ * This function only does anything for file systems which can't
+ * handle sparse files.
+ *
+ * What we want to do here is fill in any hole between the current end
+ * of allocation and the end of our write. That way the rest of the
+ * write path can treat it as an non-allocating write, which has no
+ * special case code for sparse/nonsparse files.
+ */
+static int ocfs2_expand_nonsparse_inode(struct inode *inode, loff_t pos,
+ unsigned len,
+ struct ocfs2_write_ctxt *wc)
+{
+ int ret;
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ loff_t newsize = pos + len;
+
+ if (ocfs2_sparse_alloc(osb))
+ return 0;
+
+ if (newsize <= i_size_read(inode))
+ return 0;
+
+ ret = ocfs2_extend_no_holes(inode, newsize, newsize - len);
+ if (ret)
+ mlog_errno(ret);
+
+ return ret;
+}
+
int ocfs2_write_begin_nolock(struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata,
@@ -1374,6 +1607,25 @@ int ocfs2_write_begin_nolock(struct address_space *mapping,
return ret;
}
+ if (ocfs2_supports_inline_data(osb)) {
+ ret = ocfs2_try_to_write_inline_data(mapping, inode, pos, len,
+ mmap_page, wc);
+ if (ret == 1) {
+ ret = 0;
+ goto success;
+ }
+ if (ret < 0) {
+ mlog_errno(ret);
+ goto out;
+ }
+ }
+
+ ret = ocfs2_expand_nonsparse_inode(inode, pos, len, wc);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
ret = ocfs2_populate_write_desc(inode, wc, &clusters_to_alloc,
&extents_to_split);
if (ret) {
@@ -1455,6 +1707,7 @@ int ocfs2_write_begin_nolock(struct address_space *mapping,
if (meta_ac)
ocfs2_free_alloc_context(meta_ac);
+success:
*pagep = wc->w_target_page;
*fsdata = wc;
return 0;
@@ -1522,6 +1775,31 @@ out_fail:
return ret;
}
+static void ocfs2_write_end_inline(struct inode *inode, loff_t pos,
+ unsigned len, unsigned *copied,
+ struct ocfs2_dinode *di,
+ struct ocfs2_write_ctxt *wc)
+{
+ void *kaddr;
+
+ if (unlikely(*copied < len)) {
+ if (!PageUptodate(wc->w_target_page)) {
+ *copied = 0;
+ return;
+ }
+ }
+
+ kaddr = kmap_atomic(wc->w_target_page, KM_USER0);
+ memcpy(di->id2.i_data.id_data + pos, kaddr + pos, *copied);
+ kunmap_atomic(kaddr, KM_USER0);
+
+ mlog(0, "Data written to inode at offset %llu. "
+ "id_count = %u, copied = %u, i_dyn_features = 0x%x\n",
+ (unsigned long long)pos, *copied,
+ le16_to_cpu(di->id2.i_data.id_count),
+ le16_to_cpu(di->i_dyn_features));
+}
+
int ocfs2_write_end_nolock(struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata)
@@ -1535,6 +1813,11 @@ int ocfs2_write_end_nolock(struct address_space *mapping,
handle_t *handle = wc->w_handle;
struct page *tmppage;
+ if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
+ ocfs2_write_end_inline(inode, pos, len, &copied, di, wc);
+ goto out_write_size;
+ }
+
if (unlikely(copied < len)) {
if (!PageUptodate(wc->w_target_page))
copied = 0;
@@ -1572,6 +1855,7 @@ int ocfs2_write_end_nolock(struct address_space *mapping,
block_commit_write(tmppage, from, to);
}
+out_write_size:
pos += copied;
if (pos > inode->i_size) {
i_size_write(inode, pos);
diff --git a/fs/ocfs2/aops.h b/fs/ocfs2/aops.h
index 389579b..1135608 100644
--- a/fs/ocfs2/aops.h
+++ b/fs/ocfs2/aops.h
@@ -34,6 +34,8 @@ int ocfs2_map_page_blocks(struct page *page, u64 *p_blkno,
struct inode *inode, unsigned int from,
unsigned int to, int new);
+void ocfs2_unlock_and_free_pages(struct page **pages, int num_pages);
+
int walk_page_buffers( handle_t *handle,
struct buffer_head *head,
unsigned from,
@@ -59,6 +61,10 @@ int ocfs2_write_begin_nolock(struct address_space *mapping,
struct page **pagep, void **fsdata,
struct buffer_head *di_bh, struct page *mmap_page);
+int ocfs2_read_inline_data(struct inode *inode, struct page *page,
+ struct buffer_head *di_bh);
+int ocfs2_size_fits_inline_data(struct buffer_head *di_bh, u64 new_size);
+
/* all ocfs2_dio_end_io()'s fault */
#define ocfs2_iocb_is_rw_locked(iocb) \
test_bit(0, (unsigned long *)&iocb->private)
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index 2bd7f78..f14b541 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -216,8 +216,7 @@ static void o2hb_wait_on_io(struct o2hb_region *reg,
wait_for_completion(&wc->wc_io_complete);
}
-static int o2hb_bio_end_io(struct bio *bio,
- unsigned int bytes_done,
+static void o2hb_bio_end_io(struct bio *bio,
int error)
{
struct o2hb_bio_wait_ctxt *wc = bio->bi_private;
@@ -227,12 +226,8 @@ static int o2hb_bio_end_io(struct bio *bio,
wc->wc_error = error;
}
- if (bio->bi_size)
- return 1;
-
o2hb_bio_wait_dec(wc, 1);
bio_put(bio);
- return 0;
}
/* Setup a Bio to cover I/O against num_slots slots starting at
diff --git a/fs/ocfs2/cluster/masklog.c b/fs/ocfs2/cluster/masklog.c
index e9e042b..a4882c8 100644
--- a/fs/ocfs2/cluster/masklog.c
+++ b/fs/ocfs2/cluster/masklog.c
@@ -143,7 +143,7 @@ static struct kobj_type mlog_ktype = {
};
static struct kset mlog_kset = {
- .kobj = {.name = "logmask", .ktype = &mlog_ktype},
+ .kobj = {.ktype = &mlog_ktype},
};
int mlog_sys_init(struct kset *o2cb_subsys)
@@ -156,6 +156,7 @@ int mlog_sys_init(struct kset *o2cb_subsys)
}
mlog_attr_ptrs[i] = NULL;
+ kobject_set_name(&mlog_kset.kobj, "logmask");
kobj_set_kset_s(&mlog_kset, *o2cb_subsys);
return kset_register(&mlog_kset);
}
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index f0bdfd9..685c180 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -854,17 +854,25 @@ static void o2net_sendpage(struct o2net_sock_container *sc,
struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num);
ssize_t ret;
-
- mutex_lock(&sc->sc_send_lock);
- ret = sc->sc_sock->ops->sendpage(sc->sc_sock,
- virt_to_page(kmalloced_virt),
- (long)kmalloced_virt & ~PAGE_MASK,
- size, MSG_DONTWAIT);
- mutex_unlock(&sc->sc_send_lock);
- if (ret != size) {
+ while (1) {
+ mutex_lock(&sc->sc_send_lock);
+ ret = sc->sc_sock->ops->sendpage(sc->sc_sock,
+ virt_to_page(kmalloced_virt),
+ (long)kmalloced_virt & ~PAGE_MASK,
+ size, MSG_DONTWAIT);
+ mutex_unlock(&sc->sc_send_lock);
+ if (ret == size)
+ break;
+ if (ret == (ssize_t)-EAGAIN) {
+ mlog(0, "sendpage of size %zu to " SC_NODEF_FMT
+ " returned EAGAIN\n", size, SC_NODEF_ARGS(sc));
+ cond_resched();
+ continue;
+ }
mlog(ML_ERROR, "sendpage of size %zu to " SC_NODEF_FMT
" failed with %zd\n", size, SC_NODEF_ARGS(sc), ret);
o2net_ensure_shutdown(nn, sc, 0);
+ break;
}
}
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c
index 0d5fdde..7453b70 100644
--- a/fs/ocfs2/dir.c
+++ b/fs/ocfs2/dir.c
@@ -55,10 +55,16 @@
#include "journal.h"
#include "namei.h"
#include "suballoc.h"
+#include "super.h"
#include "uptodate.h"
#include "buffer_head_io.h"
+#define NAMEI_RA_CHUNKS 2
+#define NAMEI_RA_BLOCKS 4
+#define NAMEI_RA_SIZE (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS)
+#define NAMEI_RA_INDEX(c,b) (((c) * NAMEI_RA_BLOCKS) + (b))
+
static unsigned char ocfs2_filetype_table[] = {
DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
};
@@ -66,12 +72,614 @@ static unsigned char ocfs2_filetype_table[] = {
static int ocfs2_extend_dir(struct ocfs2_super *osb,
struct inode *dir,
struct buffer_head *parent_fe_bh,
+ unsigned int blocks_wanted,
struct buffer_head **new_de_bh);
+static int ocfs2_do_extend_dir(struct super_block *sb,
+ handle_t *handle,
+ struct inode *dir,
+ struct buffer_head *parent_fe_bh,
+ struct ocfs2_alloc_context *data_ac,
+ struct ocfs2_alloc_context *meta_ac,
+ struct buffer_head **new_bh);
+
/*
- * ocfs2_readdir()
+ * bh passed here can be an inode block or a dir data block, depending
+ * on the inode inline data flag.
+ */
+static int ocfs2_check_dir_entry(struct inode * dir,
+ struct ocfs2_dir_entry * de,
+ struct buffer_head * bh,
+ unsigned long offset)
+{
+ const char *error_msg = NULL;
+ const int rlen = le16_to_cpu(de->rec_len);
+
+ if (rlen < OCFS2_DIR_REC_LEN(1))
+ error_msg = "rec_len is smaller than minimal";
+ else if (rlen % 4 != 0)
+ error_msg = "rec_len % 4 != 0";
+ else if (rlen < OCFS2_DIR_REC_LEN(de->name_len))
+ error_msg = "rec_len is too small for name_len";
+ else if (((char *) de - bh->b_data) + rlen > dir->i_sb->s_blocksize)
+ error_msg = "directory entry across blocks";
+
+ if (error_msg != NULL)
+ mlog(ML_ERROR, "bad entry in directory #%llu: %s - "
+ "offset=%lu, inode=%llu, rec_len=%d, name_len=%d\n",
+ (unsigned long long)OCFS2_I(dir)->ip_blkno, error_msg,
+ offset, (unsigned long long)le64_to_cpu(de->inode), rlen,
+ de->name_len);
+ return error_msg == NULL ? 1 : 0;
+}
+
+static inline int ocfs2_match(int len,
+ const char * const name,
+ struct ocfs2_dir_entry *de)
+{
+ if (len != de->name_len)
+ return 0;
+ if (!de->inode)
+ return 0;
+ return !memcmp(name, de->name, len);
+}
+
+/*
+ * Returns 0 if not found, -1 on failure, and 1 on success
+ */
+static int inline ocfs2_search_dirblock(struct buffer_head *bh,
+ struct inode *dir,
+ const char *name, int namelen,
+ unsigned long offset,
+ char *first_de,
+ unsigned int bytes,
+ struct ocfs2_dir_entry **res_dir)
+{
+ struct ocfs2_dir_entry *de;
+ char *dlimit, *de_buf;
+ int de_len;
+ int ret = 0;
+
+ mlog_entry_void();
+
+ de_buf = first_de;
+ dlimit = de_buf + bytes;
+
+ while (de_buf < dlimit) {
+ /* this code is executed quadratically often */
+ /* do minimal checking `by hand' */
+
+ de = (struct ocfs2_dir_entry *) de_buf;
+
+ if (de_buf + namelen <= dlimit &&
+ ocfs2_match(namelen, name, de)) {
+ /* found a match - just to be sure, do a full check */
+ if (!ocfs2_check_dir_entry(dir, de, bh, offset)) {
+ ret = -1;
+ goto bail;
+ }
+ *res_dir = de;
+ ret = 1;
+ goto bail;
+ }
+
+ /* prevent looping on a bad block */
+ de_len = le16_to_cpu(de->rec_len);
+ if (de_len <= 0) {
+ ret = -1;
+ goto bail;
+ }
+
+ de_buf += de_len;
+ offset += de_len;
+ }
+
+bail:
+ mlog_exit(ret);
+ return ret;
+}
+
+static struct buffer_head *ocfs2_find_entry_id(const char *name,
+ int namelen,
+ struct inode *dir,
+ struct ocfs2_dir_entry **res_dir)
+{
+ int ret, found;
+ struct buffer_head *di_bh = NULL;
+ struct ocfs2_dinode *di;
+ struct ocfs2_inline_data *data;
+
+ ret = ocfs2_read_block(OCFS2_SB(dir->i_sb), OCFS2_I(dir)->ip_blkno,
+ &di_bh, OCFS2_BH_CACHED, dir);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ di = (struct ocfs2_dinode *)di_bh->b_data;
+ data = &di->id2.i_data;
+
+ found = ocfs2_search_dirblock(di_bh, dir, name, namelen, 0,
+ data->id_data, i_size_read(dir), res_dir);
+ if (found == 1)
+ return di_bh;
+
+ brelse(di_bh);
+out:
+ return NULL;
+}
+
+struct buffer_head *ocfs2_find_entry_el(const char *name, int namelen,
+ struct inode *dir,
+ struct ocfs2_dir_entry **res_dir)
+{
+ struct super_block *sb;
+ struct buffer_head *bh_use[NAMEI_RA_SIZE];
+ struct buffer_head *bh, *ret = NULL;
+ unsigned long start, block, b;
+ int ra_max = 0; /* Number of bh's in the readahead
+ buffer, bh_use[] */
+ int ra_ptr = 0; /* Current index into readahead
+ buffer */
+ int num = 0;
+ int nblocks, i, err;
+
+ mlog_entry_void();
+
+ sb = dir->i_sb;
+
+ nblocks = i_size_read(dir) >> sb->s_blocksize_bits;
+ start = OCFS2_I(dir)->ip_dir_start_lookup;
+ if (start >= nblocks)
+ start = 0;
+ block = start;
+
+restart:
+ do {
+ /*
+ * We deal with the read-ahead logic here.
+ */
+ if (ra_ptr >= ra_max) {
+ /* Refill the readahead buffer */
+ ra_ptr = 0;
+ b = block;
+ for (ra_max = 0; ra_max < NAMEI_RA_SIZE; ra_max++) {
+ /*
+ * Terminate if we reach the end of the
+ * directory and must wrap, or if our
+ * search has finished at this block.
+ */
+ if (b >= nblocks || (num && block == start)) {
+ bh_use[ra_max] = NULL;
+ break;
+ }
+ num++;
+
+ bh = ocfs2_bread(dir, b++, &err, 1);
+ bh_use[ra_max] = bh;
+ }
+ }
+ if ((bh = bh_use[ra_ptr++]) == NULL)
+ goto next;
+ wait_on_buffer(bh);
+ if (!buffer_uptodate(bh)) {
+ /* read error, skip block & hope for the best */
+ ocfs2_error(dir->i_sb, "reading directory %llu, "
+ "offset %lu\n",
+ (unsigned long long)OCFS2_I(dir)->ip_blkno,
+ block);
+ brelse(bh);
+ goto next;
+ }
+ i = ocfs2_search_dirblock(bh, dir, name, namelen,
+ block << sb->s_blocksize_bits,
+ bh->b_data, sb->s_blocksize,
+ res_dir);
+ if (i == 1) {
+ OCFS2_I(dir)->ip_dir_start_lookup = block;
+ ret = bh;
+ goto cleanup_and_exit;
+ } else {
+ brelse(bh);
+ if (i < 0)
+ goto cleanup_and_exit;
+ }
+ next:
+ if (++block >= nblocks)
+ block = 0;
+ } while (block != start);
+
+ /*
+ * If the directory has grown while we were searching, then
+ * search the last part of the directory before giving up.
+ */
+ block = nblocks;
+ nblocks = i_size_read(dir) >> sb->s_blocksize_bits;
+ if (block < nblocks) {
+ start = 0;
+ goto restart;
+ }
+
+cleanup_and_exit:
+ /* Clean up the read-ahead blocks */
+ for (; ra_ptr < ra_max; ra_ptr++)
+ brelse(bh_use[ra_ptr]);
+
+ mlog_exit_ptr(ret);
+ return ret;
+}
+
+/*
+ * Try to find an entry of the provided name within 'dir'.
*
+ * If nothing was found, NULL is returned. Otherwise, a buffer_head
+ * and pointer to the dir entry are passed back.
+ *
+ * Caller can NOT assume anything about the contents of the
+ * buffer_head - it is passed back only so that it can be passed into
+ * any one of the manipulation functions (add entry, delete entry,
+ * etc). As an example, bh in the extent directory case is a data
+ * block, in the inline-data case it actually points to an inode.
*/
-int ocfs2_readdir(struct file * filp, void * dirent, filldir_t filldir)
+struct buffer_head *ocfs2_find_entry(const char *name, int namelen,
+ struct inode *dir,
+ struct ocfs2_dir_entry **res_dir)
+{
+ *res_dir = NULL;
+
+ if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
+ return ocfs2_find_entry_id(name, namelen, dir, res_dir);
+
+ return ocfs2_find_entry_el(name, namelen, dir, res_dir);
+}
+
+/*
+ * Update inode number and type of a previously found directory entry.
+ */
+int ocfs2_update_entry(struct inode *dir, handle_t *handle,
+ struct buffer_head *de_bh, struct ocfs2_dir_entry *de,
+ struct inode *new_entry_inode)
+{
+ int ret;
+
+ /*
+ * The same code works fine for both inline-data and extent
+ * based directories, so no need to split this up.
+ */
+
+ ret = ocfs2_journal_access(handle, dir, de_bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ de->inode = cpu_to_le64(OCFS2_I(new_entry_inode)->ip_blkno);
+ ocfs2_set_de_type(de, new_entry_inode->i_mode);
+
+ ocfs2_journal_dirty(handle, de_bh);
+
+out:
+ return ret;
+}
+
+static int __ocfs2_delete_entry(handle_t *handle, struct inode *dir,
+ struct ocfs2_dir_entry *de_del,
+ struct buffer_head *bh, char *first_de,
+ unsigned int bytes)
+{
+ struct ocfs2_dir_entry *de, *pde;
+ int i, status = -ENOENT;
+
+ mlog_entry("(0x%p, 0x%p, 0x%p, 0x%p)\n", handle, dir, de_del, bh);
+
+ i = 0;
+ pde = NULL;
+ de = (struct ocfs2_dir_entry *) first_de;
+ while (i < bytes) {
+ if (!ocfs2_check_dir_entry(dir, de, bh, i)) {
+ status = -EIO;
+ mlog_errno(status);
+ goto bail;
+ }
+ if (de == de_del) {
+ status = ocfs2_journal_access(handle, dir, bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (status < 0) {
+ status = -EIO;
+ mlog_errno(status);
+ goto bail;
+ }
+ if (pde)
+ pde->rec_len =
+ cpu_to_le16(le16_to_cpu(pde->rec_len) +
+ le16_to_cpu(de->rec_len));
+ else
+ de->inode = 0;
+ dir->i_version++;
+ status = ocfs2_journal_dirty(handle, bh);
+ goto bail;
+ }
+ i += le16_to_cpu(de->rec_len);
+ pde = de;
+ de = (struct ocfs2_dir_entry *)((char *)de + le16_to_cpu(de->rec_len));
+ }
+bail:
+ mlog_exit(status);
+ return status;
+}
+
+static inline int ocfs2_delete_entry_id(handle_t *handle,
+ struct inode *dir,
+ struct ocfs2_dir_entry *de_del,
+ struct buffer_head *bh)
+{
+ int ret;
+ struct buffer_head *di_bh = NULL;
+ struct ocfs2_dinode *di;
+ struct ocfs2_inline_data *data;
+
+ ret = ocfs2_read_block(OCFS2_SB(dir->i_sb), OCFS2_I(dir)->ip_blkno,
+ &di_bh, OCFS2_BH_CACHED, dir);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ di = (struct ocfs2_dinode *)di_bh->b_data;
+ data = &di->id2.i_data;
+
+ ret = __ocfs2_delete_entry(handle, dir, de_del, bh, data->id_data,
+ i_size_read(dir));
+
+ brelse(di_bh);
+out:
+ return ret;
+}
+
+static inline int ocfs2_delete_entry_el(handle_t *handle,
+ struct inode *dir,
+ struct ocfs2_dir_entry *de_del,
+ struct buffer_head *bh)
+{
+ return __ocfs2_delete_entry(handle, dir, de_del, bh, bh->b_data,
+ bh->b_size);
+}
+
+/*
+ * ocfs2_delete_entry deletes a directory entry by merging it with the
+ * previous entry
+ */
+int ocfs2_delete_entry(handle_t *handle,
+ struct inode *dir,
+ struct ocfs2_dir_entry *de_del,
+ struct buffer_head *bh)
+{
+ if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
+ return ocfs2_delete_entry_id(handle, dir, de_del, bh);
+
+ return ocfs2_delete_entry_el(handle, dir, de_del, bh);
+}
+
+/*
+ * Check whether 'de' has enough room to hold an entry of
+ * 'new_rec_len' bytes.
+ */
+static inline int ocfs2_dirent_would_fit(struct ocfs2_dir_entry *de,
+ unsigned int new_rec_len)
+{
+ unsigned int de_really_used;
+
+ /* Check whether this is an empty record with enough space */
+ if (le64_to_cpu(de->inode) == 0 &&
+ le16_to_cpu(de->rec_len) >= new_rec_len)
+ return 1;
+
+ /*
+ * Record might have free space at the end which we can
+ * use.
+ */
+ de_really_used = OCFS2_DIR_REC_LEN(de->name_len);
+ if (le16_to_cpu(de->rec_len) >= (de_really_used + new_rec_len))
+ return 1;
+
+ return 0;
+}
+
+/* we don't always have a dentry for what we want to add, so people
+ * like orphan dir can call this instead.
+ *
+ * If you pass me insert_bh, I'll skip the search of the other dir
+ * blocks and put the record in there.
+ */
+int __ocfs2_add_entry(handle_t *handle,
+ struct inode *dir,
+ const char *name, int namelen,
+ struct inode *inode, u64 blkno,
+ struct buffer_head *parent_fe_bh,
+ struct buffer_head *insert_bh)
+{
+ unsigned long offset;
+ unsigned short rec_len;
+ struct ocfs2_dir_entry *de, *de1;
+ struct ocfs2_dinode *di = (struct ocfs2_dinode *)parent_fe_bh->b_data;
+ struct super_block *sb = dir->i_sb;
+ int retval, status;
+ unsigned int size = sb->s_blocksize;
+ char *data_start = insert_bh->b_data;
+
+ mlog_entry_void();
+
+ if (!namelen)
+ return -EINVAL;
+
+ if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
+ data_start = di->id2.i_data.id_data;
+ size = i_size_read(dir);
+
+ BUG_ON(insert_bh != parent_fe_bh);
+ }
+
+ rec_len = OCFS2_DIR_REC_LEN(namelen);
+ offset = 0;
+ de = (struct ocfs2_dir_entry *) data_start;
+ while (1) {
+ BUG_ON((char *)de >= (size + data_start));
+
+ /* These checks should've already been passed by the
+ * prepare function, but I guess we can leave them
+ * here anyway. */
+ if (!ocfs2_check_dir_entry(dir, de, insert_bh, offset)) {
+ retval = -ENOENT;
+ goto bail;
+ }
+ if (ocfs2_match(namelen, name, de)) {
+ retval = -EEXIST;
+ goto bail;
+ }
+
+ if (ocfs2_dirent_would_fit(de, rec_len)) {
+ dir->i_mtime = dir->i_ctime = CURRENT_TIME;
+ retval = ocfs2_mark_inode_dirty(handle, dir, parent_fe_bh);
+ if (retval < 0) {
+ mlog_errno(retval);
+ goto bail;
+ }
+
+ status = ocfs2_journal_access(handle, dir, insert_bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ /* By now the buffer is marked for journaling */
+ offset += le16_to_cpu(de->rec_len);
+ if (le64_to_cpu(de->inode)) {
+ de1 = (struct ocfs2_dir_entry *)((char *) de +
+ OCFS2_DIR_REC_LEN(de->name_len));
+ de1->rec_len =
+ cpu_to_le16(le16_to_cpu(de->rec_len) -
+ OCFS2_DIR_REC_LEN(de->name_len));
+ de->rec_len = cpu_to_le16(OCFS2_DIR_REC_LEN(de->name_len));
+ de = de1;
+ }
+ de->file_type = OCFS2_FT_UNKNOWN;
+ if (blkno) {
+ de->inode = cpu_to_le64(blkno);
+ ocfs2_set_de_type(de, inode->i_mode);
+ } else
+ de->inode = 0;
+ de->name_len = namelen;
+ memcpy(de->name, name, namelen);
+
+ dir->i_version++;
+ status = ocfs2_journal_dirty(handle, insert_bh);
+ retval = 0;
+ goto bail;
+ }
+ offset += le16_to_cpu(de->rec_len);
+ de = (struct ocfs2_dir_entry *) ((char *) de + le16_to_cpu(de->rec_len));
+ }
+
+ /* when you think about it, the assert above should prevent us
+ * from ever getting here. */
+ retval = -ENOSPC;
+bail:
+
+ mlog_exit(retval);
+ return retval;
+}
+
+static int ocfs2_dir_foreach_blk_id(struct inode *inode,
+ unsigned long *f_version,
+ loff_t *f_pos, void *priv,
+ filldir_t filldir, int *filldir_err)
+{
+ int ret, i, filldir_ret;
+ unsigned long offset = *f_pos;
+ struct buffer_head *di_bh = NULL;
+ struct ocfs2_dinode *di;
+ struct ocfs2_inline_data *data;
+ struct ocfs2_dir_entry *de;
+
+ ret = ocfs2_read_block(OCFS2_SB(inode->i_sb), OCFS2_I(inode)->ip_blkno,
+ &di_bh, OCFS2_BH_CACHED, inode);
+ if (ret) {
+ mlog(ML_ERROR, "Unable to read inode block for dir %llu\n",
+ (unsigned long long)OCFS2_I(inode)->ip_blkno);
+ goto out;
+ }
+
+ di = (struct ocfs2_dinode *)di_bh->b_data;
+ data = &di->id2.i_data;
+
+ while (*f_pos < i_size_read(inode)) {
+revalidate:
+ /* If the dir block has changed since the last call to
+ * readdir(2), then we might be pointing to an invalid
+ * dirent right now. Scan from the start of the block
+ * to make sure. */
+ if (*f_version != inode->i_version) {
+ for (i = 0; i < i_size_read(inode) && i < offset; ) {
+ de = (struct ocfs2_dir_entry *)
+ (data->id_data + i);
+ /* It's too expensive to do a full
+ * dirent test each time round this
+ * loop, but we do have to test at
+ * least that it is non-zero. A
+ * failure will be detected in the
+ * dirent test below. */
+ if (le16_to_cpu(de->rec_len) <
+ OCFS2_DIR_REC_LEN(1))
+ break;
+ i += le16_to_cpu(de->rec_len);
+ }
+ *f_pos = offset = i;
+ *f_version = inode->i_version;
+ }
+
+ de = (struct ocfs2_dir_entry *) (data->id_data + *f_pos);
+ if (!ocfs2_check_dir_entry(inode, de, di_bh, *f_pos)) {
+ /* On error, skip the f_pos to the end. */
+ *f_pos = i_size_read(inode);
+ goto out;
+ }
+ offset += le16_to_cpu(de->rec_len);
+ if (le64_to_cpu(de->inode)) {
+ /* We might block in the next section
+ * if the data destination is
+ * currently swapped out. So, use a
+ * version stamp to detect whether or
+ * not the directory has been modified
+ * during the copy operation.
+ */
+ unsigned long version = *f_version;
+ unsigned char d_type = DT_UNKNOWN;
+
+ if (de->file_type < OCFS2_FT_MAX)
+ d_type = ocfs2_filetype_table[de->file_type];
+
+ filldir_ret = filldir(priv, de->name,
+ de->name_len,
+ *f_pos,
+ le64_to_cpu(de->inode),
+ d_type);
+ if (filldir_ret) {
+ if (filldir_err)
+ *filldir_err = filldir_ret;
+ break;
+ }
+ if (version != *f_version)
+ goto revalidate;
+ }
+ *f_pos += le16_to_cpu(de->rec_len);
+ }
+
+out:
+ brelse(di_bh);
+
+ return 0;
+}
+
+static int ocfs2_dir_foreach_blk_el(struct inode *inode,
+ unsigned long *f_version,
+ loff_t *f_pos, void *priv,
+ filldir_t filldir, int *filldir_err)
{
int error = 0;
unsigned long offset, blk, last_ra_blk = 0;
@@ -79,45 +687,23 @@ int ocfs2_readdir(struct file * filp, void * dirent, filldir_t filldir)
struct buffer_head * bh, * tmp;
struct ocfs2_dir_entry * de;
int err;
- struct inode *inode = filp->f_path.dentry->d_inode;
struct super_block * sb = inode->i_sb;
unsigned int ra_sectors = 16;
- int lock_level = 0;
-
- mlog_entry("dirino=%llu\n",
- (unsigned long long)OCFS2_I(inode)->ip_blkno);
stored = 0;
bh = NULL;
- error = ocfs2_meta_lock_atime(inode, filp->f_vfsmnt, &lock_level);
- if (lock_level && error >= 0) {
- /* We release EX lock which used to update atime
- * and get PR lock again to reduce contention
- * on commonly accessed directories. */
- ocfs2_meta_unlock(inode, 1);
- lock_level = 0;
- error = ocfs2_meta_lock(inode, NULL, 0);
- }
- if (error < 0) {
- if (error != -ENOENT)
- mlog_errno(error);
- /* we haven't got any yet, so propagate the error. */
- stored = error;
- goto bail_nolock;
- }
+ offset = (*f_pos) & (sb->s_blocksize - 1);
- offset = filp->f_pos & (sb->s_blocksize - 1);
-
- while (!error && !stored && filp->f_pos < i_size_read(inode)) {
- blk = (filp->f_pos) >> sb->s_blocksize_bits;
+ while (!error && !stored && *f_pos < i_size_read(inode)) {
+ blk = (*f_pos) >> sb->s_blocksize_bits;
bh = ocfs2_bread(inode, blk, &err, 0);
if (!bh) {
mlog(ML_ERROR,
"directory #%llu contains a hole at offset %lld\n",
(unsigned long long)OCFS2_I(inode)->ip_blkno,
- filp->f_pos);
- filp->f_pos += sb->s_blocksize - offset;
+ *f_pos);
+ *f_pos += sb->s_blocksize - offset;
continue;
}
@@ -143,7 +729,7 @@ revalidate:
* readdir(2), then we might be pointing to an invalid
* dirent right now. Scan from the start of the block
* to make sure. */
- if (filp->f_version != inode->i_version) {
+ if (*f_version != inode->i_version) {
for (i = 0; i < sb->s_blocksize && i < offset; ) {
de = (struct ocfs2_dir_entry *) (bh->b_data + i);
/* It's too expensive to do a full
@@ -158,21 +744,20 @@ revalidate:
i += le16_to_cpu(de->rec_len);
}
offset = i;
- filp->f_pos = (filp->f_pos & ~(sb->s_blocksize - 1))
+ *f_pos = ((*f_pos) & ~(sb->s_blocksize - 1))
| offset;
- filp->f_version = inode->i_version;
+ *f_version = inode->i_version;
}
- while (!error && filp->f_pos < i_size_read(inode)
+ while (!error && *f_pos < i_size_read(inode)
&& offset < sb->s_blocksize) {
de = (struct ocfs2_dir_entry *) (bh->b_data + offset);
if (!ocfs2_check_dir_entry(inode, de, bh, offset)) {
/* On error, skip the f_pos to the
next block. */
- filp->f_pos = (filp->f_pos |
- (sb->s_blocksize - 1)) + 1;
+ *f_pos = ((*f_pos) | (sb->s_blocksize - 1)) + 1;
brelse(bh);
- goto bail;
+ goto out;
}
offset += le16_to_cpu(de->rec_len);
if (le64_to_cpu(de->inode)) {
@@ -183,36 +768,109 @@ revalidate:
* not the directory has been modified
* during the copy operation.
*/
- unsigned long version = filp->f_version;
+ unsigned long version = *f_version;
unsigned char d_type = DT_UNKNOWN;
if (de->file_type < OCFS2_FT_MAX)
d_type = ocfs2_filetype_table[de->file_type];
- error = filldir(dirent, de->name,
+ error = filldir(priv, de->name,
de->name_len,
- filp->f_pos,
- ino_from_blkno(sb, le64_to_cpu(de->inode)),
+ *f_pos,
+ le64_to_cpu(de->inode),
d_type);
- if (error)
+ if (error) {
+ if (filldir_err)
+ *filldir_err = error;
break;
- if (version != filp->f_version)
+ }
+ if (version != *f_version)
goto revalidate;
stored ++;
}
- filp->f_pos += le16_to_cpu(de->rec_len);
+ *f_pos += le16_to_cpu(de->rec_len);
}
offset = 0;
brelse(bh);
}
stored = 0;
-bail:
+out:
+ return stored;
+}
+
+static int ocfs2_dir_foreach_blk(struct inode *inode, unsigned long *f_version,
+ loff_t *f_pos, void *priv, filldir_t filldir,
+ int *filldir_err)
+{
+ if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
+ return ocfs2_dir_foreach_blk_id(inode, f_version, f_pos, priv,
+ filldir, filldir_err);
+
+ return ocfs2_dir_foreach_blk_el(inode, f_version, f_pos, priv, filldir,
+ filldir_err);
+}
+
+/*
+ * This is intended to be called from inside other kernel functions,
+ * so we fake some arguments.
+ */
+int ocfs2_dir_foreach(struct inode *inode, loff_t *f_pos, void *priv,
+ filldir_t filldir)
+{
+ int ret = 0, filldir_err = 0;
+ unsigned long version = inode->i_version;
+
+ while (*f_pos < i_size_read(inode)) {
+ ret = ocfs2_dir_foreach_blk(inode, &version, f_pos, priv,
+ filldir, &filldir_err);
+ if (ret || filldir_err)
+ break;
+ }
+
+ if (ret > 0)
+ ret = -EIO;
+
+ return 0;
+}
+
+/*
+ * ocfs2_readdir()
+ *
+ */
+int ocfs2_readdir(struct file * filp, void * dirent, filldir_t filldir)
+{
+ int error = 0;
+ struct inode *inode = filp->f_path.dentry->d_inode;
+ int lock_level = 0;
+
+ mlog_entry("dirino=%llu\n",
+ (unsigned long long)OCFS2_I(inode)->ip_blkno);
+
+ error = ocfs2_meta_lock_atime(inode, filp->f_vfsmnt, &lock_level);
+ if (lock_level && error >= 0) {
+ /* We release EX lock which used to update atime
+ * and get PR lock again to reduce contention
+ * on commonly accessed directories. */
+ ocfs2_meta_unlock(inode, 1);
+ lock_level = 0;
+ error = ocfs2_meta_lock(inode, NULL, 0);
+ }
+ if (error < 0) {
+ if (error != -ENOENT)
+ mlog_errno(error);
+ /* we haven't got any yet, so propagate the error. */
+ goto bail_nolock;
+ }
+
+ error = ocfs2_dir_foreach_blk(inode, &filp->f_version, &filp->f_pos,
+ dirent, filldir, NULL);
+
ocfs2_meta_unlock(inode, lock_level);
bail_nolock:
- mlog_exit(stored);
+ mlog_exit(error);
- return stored;
+ return error;
}
/*
@@ -252,6 +910,23 @@ leave:
return status;
}
+/*
+ * Convenience function for callers which just want the block number
+ * mapped to a name and don't require the full dirent info, etc.
+ */
+int ocfs2_lookup_ino_from_name(struct inode *dir, const char *name,
+ int namelen, u64 *blkno)
+{
+ int ret;
+ struct buffer_head *bh = NULL;
+ struct ocfs2_dir_entry *dirent = NULL;
+
+ ret = ocfs2_find_files_on_disk(name, namelen, blkno, dir, &bh, &dirent);
+ brelse(bh);
+
+ return ret;
+}
+
/* Check for a name within a directory.
*
* Return 0 if the name does not exist
@@ -284,77 +959,414 @@ bail:
return ret;
}
+struct ocfs2_empty_dir_priv {
+ unsigned seen_dot;
+ unsigned seen_dot_dot;
+ unsigned seen_other;
+};
+static int ocfs2_empty_dir_filldir(void *priv, const char *name, int name_len,
+ loff_t pos, u64 ino, unsigned type)
+{
+ struct ocfs2_empty_dir_priv *p = priv;
+
+ /*
+ * Check the positions of "." and ".." records to be sure
+ * they're in the correct place.
+ */
+ if (name_len == 1 && !strncmp(".", name, 1) && pos == 0) {
+ p->seen_dot = 1;
+ return 0;
+ }
+
+ if (name_len == 2 && !strncmp("..", name, 2) &&
+ pos == OCFS2_DIR_REC_LEN(1)) {
+ p->seen_dot_dot = 1;
+ return 0;
+ }
+
+ p->seen_other = 1;
+ return 1;
+}
/*
* routine to check that the specified directory is empty (for rmdir)
+ *
+ * Returns 1 if dir is empty, zero otherwise.
*/
int ocfs2_empty_dir(struct inode *inode)
{
- unsigned long offset;
- struct buffer_head * bh;
- struct ocfs2_dir_entry * de, * de1;
- struct super_block * sb;
- int err;
+ int ret;
+ loff_t start = 0;
+ struct ocfs2_empty_dir_priv priv;
+
+ memset(&priv, 0, sizeof(priv));
+
+ ret = ocfs2_dir_foreach(inode, &start, &priv, ocfs2_empty_dir_filldir);
+ if (ret)
+ mlog_errno(ret);
- sb = inode->i_sb;
- if ((i_size_read(inode) <
- (OCFS2_DIR_REC_LEN(1) + OCFS2_DIR_REC_LEN(2))) ||
- !(bh = ocfs2_bread(inode, 0, &err, 0))) {
- mlog(ML_ERROR, "bad directory (dir #%llu) - no data block\n",
+ if (!priv.seen_dot || !priv.seen_dot_dot) {
+ mlog(ML_ERROR, "bad directory (dir #%llu) - no `.' or `..'\n",
(unsigned long long)OCFS2_I(inode)->ip_blkno);
+ /*
+ * XXX: Is it really safe to allow an unlink to continue?
+ */
return 1;
}
- de = (struct ocfs2_dir_entry *) bh->b_data;
- de1 = (struct ocfs2_dir_entry *)
- ((char *)de + le16_to_cpu(de->rec_len));
- if ((le64_to_cpu(de->inode) != OCFS2_I(inode)->ip_blkno) ||
- !le64_to_cpu(de1->inode) ||
- strcmp(".", de->name) ||
- strcmp("..", de1->name)) {
- mlog(ML_ERROR, "bad directory (dir #%llu) - no `.' or `..'\n",
- (unsigned long long)OCFS2_I(inode)->ip_blkno);
- brelse(bh);
- return 1;
+ return !priv.seen_other;
+}
+
+static void ocfs2_fill_initial_dirents(struct inode *inode,
+ struct inode *parent,
+ char *start, unsigned int size)
+{
+ struct ocfs2_dir_entry *de = (struct ocfs2_dir_entry *)start;
+
+ de->inode = cpu_to_le64(OCFS2_I(inode)->ip_blkno);
+ de->name_len = 1;
+ de->rec_len =
+ cpu_to_le16(OCFS2_DIR_REC_LEN(de->name_len));
+ strcpy(de->name, ".");
+ ocfs2_set_de_type(de, S_IFDIR);
+
+ de = (struct ocfs2_dir_entry *) ((char *)de + le16_to_cpu(de->rec_len));
+ de->inode = cpu_to_le64(OCFS2_I(parent)->ip_blkno);
+ de->rec_len = cpu_to_le16(size - OCFS2_DIR_REC_LEN(1));
+ de->name_len = 2;
+ strcpy(de->name, "..");
+ ocfs2_set_de_type(de, S_IFDIR);
+}
+
+/*
+ * This works together with code in ocfs2_mknod_locked() which sets
+ * the inline-data flag and initializes the inline-data section.
+ */
+static int ocfs2_fill_new_dir_id(struct ocfs2_super *osb,
+ handle_t *handle,
+ struct inode *parent,
+ struct inode *inode,
+ struct buffer_head *di_bh)
+{
+ int ret;
+ struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
+ struct ocfs2_inline_data *data = &di->id2.i_data;
+ unsigned int size = le16_to_cpu(data->id_count);
+
+ ret = ocfs2_journal_access(handle, inode, di_bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
}
- offset = le16_to_cpu(de->rec_len) + le16_to_cpu(de1->rec_len);
- de = (struct ocfs2_dir_entry *)((char *)de1 + le16_to_cpu(de1->rec_len));
- while (offset < i_size_read(inode) ) {
- if (!bh || (void *)de >= (void *)(bh->b_data + sb->s_blocksize)) {
- brelse(bh);
- bh = ocfs2_bread(inode,
- offset >> sb->s_blocksize_bits, &err, 0);
- if (!bh) {
- mlog(ML_ERROR, "dir %llu has a hole at %lu\n",
- (unsigned long long)OCFS2_I(inode)->ip_blkno, offset);
- offset += sb->s_blocksize;
- continue;
- }
- de = (struct ocfs2_dir_entry *) bh->b_data;
- }
- if (!ocfs2_check_dir_entry(inode, de, bh, offset)) {
- brelse(bh);
- return 1;
+
+ ocfs2_fill_initial_dirents(inode, parent, data->id_data, size);
+
+ ocfs2_journal_dirty(handle, di_bh);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ i_size_write(inode, size);
+ inode->i_nlink = 2;
+ inode->i_blocks = ocfs2_inode_sector_count(inode);
+
+ ret = ocfs2_mark_inode_dirty(handle, inode, di_bh);
+ if (ret < 0)
+ mlog_errno(ret);
+
+out:
+ return ret;
+}
+
+static int ocfs2_fill_new_dir_el(struct ocfs2_super *osb,
+ handle_t *handle,
+ struct inode *parent,
+ struct inode *inode,
+ struct buffer_head *fe_bh,
+ struct ocfs2_alloc_context *data_ac)
+{
+ int status;
+ struct buffer_head *new_bh = NULL;
+
+ mlog_entry_void();
+
+ status = ocfs2_do_extend_dir(osb->sb, handle, inode, fe_bh,
+ data_ac, NULL, &new_bh);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ ocfs2_set_new_buffer_uptodate(inode, new_bh);
+
+ status = ocfs2_journal_access(handle, inode, new_bh,
+ OCFS2_JOURNAL_ACCESS_CREATE);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+ memset(new_bh->b_data, 0, osb->sb->s_blocksize);
+
+ ocfs2_fill_initial_dirents(inode, parent, new_bh->b_data,
+ osb->sb->s_blocksize);
+
+ status = ocfs2_journal_dirty(handle, new_bh);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ i_size_write(inode, inode->i_sb->s_blocksize);
+ inode->i_nlink = 2;
+ inode->i_blocks = ocfs2_inode_sector_count(inode);
+ status = ocfs2_mark_inode_dirty(handle, inode, fe_bh);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ status = 0;
+bail:
+ if (new_bh)
+ brelse(new_bh);
+
+ mlog_exit(status);
+ return status;
+}
+
+int ocfs2_fill_new_dir(struct ocfs2_super *osb,
+ handle_t *handle,
+ struct inode *parent,
+ struct inode *inode,
+ struct buffer_head *fe_bh,
+ struct ocfs2_alloc_context *data_ac)
+{
+ BUG_ON(!ocfs2_supports_inline_data(osb) && data_ac == NULL);
+
+ if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
+ return ocfs2_fill_new_dir_id(osb, handle, parent, inode, fe_bh);
+
+ return ocfs2_fill_new_dir_el(osb, handle, parent, inode, fe_bh,
+ data_ac);
+}
+
+static void ocfs2_expand_last_dirent(char *start, unsigned int old_size,
+ unsigned int new_size)
+{
+ struct ocfs2_dir_entry *de;
+ struct ocfs2_dir_entry *prev_de;
+ char *de_buf, *limit;
+ unsigned int bytes = new_size - old_size;
+
+ limit = start + old_size;
+ de_buf = start;
+ de = (struct ocfs2_dir_entry *)de_buf;
+ do {
+ prev_de = de;
+ de_buf += le16_to_cpu(de->rec_len);
+ de = (struct ocfs2_dir_entry *)de_buf;
+ } while (de_buf < limit);
+
+ le16_add_cpu(&prev_de->rec_len, bytes);
+}
+
+/*
+ * We allocate enough clusters to fulfill "blocks_wanted", but set
+ * i_size to exactly one block. Ocfs2_extend_dir() will handle the
+ * rest automatically for us.
+ *
+ * *first_block_bh is a pointer to the 1st data block allocated to the
+ * directory.
+ */
+static int ocfs2_expand_inline_dir(struct inode *dir, struct buffer_head *di_bh,
+ unsigned int blocks_wanted,
+ struct buffer_head **first_block_bh)
+{
+ int ret, credits = OCFS2_INLINE_TO_EXTENTS_CREDITS;
+ u32 alloc, bit_off, len;
+ struct super_block *sb = dir->i_sb;
+ u64 blkno, bytes = blocks_wanted << sb->s_blocksize_bits;
+ struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
+ struct ocfs2_inode_info *oi = OCFS2_I(dir);
+ struct ocfs2_alloc_context *data_ac;
+ struct buffer_head *dirdata_bh = NULL;
+ struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
+ handle_t *handle;
+
+ alloc = ocfs2_clusters_for_bytes(sb, bytes);
+
+ /*
+ * We should never need more than 2 clusters for this -
+ * maximum dirent size is far less than one block. In fact,
+ * the only time we'd need more than one cluster is if
+ * blocksize == clustersize and the dirent won't fit in the
+ * extra space that the expansion to a single block gives. As
+ * of today, that only happens on 4k/4k file systems.
+ */
+ BUG_ON(alloc > 2);
+
+ ret = ocfs2_reserve_clusters(osb, alloc, &data_ac);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ down_write(&oi->ip_alloc_sem);
+
+ /*
+ * Prepare for worst case allocation scenario of two seperate
+ * extents.
+ */
+ if (alloc == 2)
+ credits += OCFS2_SUBALLOC_ALLOC;
+
+ handle = ocfs2_start_trans(osb, credits);
+ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
+ mlog_errno(ret);
+ goto out_sem;
+ }
+
+ /*
+ * Try to claim as many clusters as the bitmap can give though
+ * if we only get one now, that's enough to continue. The rest
+ * will be claimed after the conversion to extents.
+ */
+ ret = ocfs2_claim_clusters(osb, handle, data_ac, 1, &bit_off, &len);
+ if (ret) {
+ mlog_errno(ret);
+ goto out_commit;
+ }
+
+ /*
+ * Operations are carefully ordered so that we set up the new
+ * data block first. The conversion from inline data to
+ * extents follows.
+ */
+ blkno = ocfs2_clusters_to_blocks(dir->i_sb, bit_off);
+ dirdata_bh = sb_getblk(sb, blkno);
+ if (!dirdata_bh) {
+ ret = -EIO;
+ mlog_errno(ret);
+ goto out_commit;
+ }
+
+ ocfs2_set_new_buffer_uptodate(dir, dirdata_bh);
+
+ ret = ocfs2_journal_access(handle, dir, dirdata_bh,
+ OCFS2_JOURNAL_ACCESS_CREATE);
+ if (ret) {
+ mlog_errno(ret);
+ goto out_commit;
+ }
+
+ memcpy(dirdata_bh->b_data, di->id2.i_data.id_data, i_size_read(dir));
+ memset(dirdata_bh->b_data + i_size_read(dir), 0,
+ sb->s_blocksize - i_size_read(dir));
+ ocfs2_expand_last_dirent(dirdata_bh->b_data, i_size_read(dir),
+ sb->s_blocksize);
+
+ ret = ocfs2_journal_dirty(handle, dirdata_bh);
+ if (ret) {
+ mlog_errno(ret);
+ goto out_commit;
+ }
+
+ /*
+ * Set extent, i_size, etc on the directory. After this, the
+ * inode should contain the same exact dirents as before and
+ * be fully accessible from system calls.
+ *
+ * We let the later dirent insert modify c/mtime - to the user
+ * the data hasn't changed.
+ */
+ ret = ocfs2_journal_access(handle, dir, di_bh,
+ OCFS2_JOURNAL_ACCESS_CREATE);
+ if (ret) {
+ mlog_errno(ret);
+ goto out_commit;
+ }
+
+ spin_lock(&oi->ip_lock);
+ oi->ip_dyn_features &= ~OCFS2_INLINE_DATA_FL;
+ di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
+ spin_unlock(&oi->ip_lock);
+
+ ocfs2_dinode_new_extent_list(dir, di);
+
+ i_size_write(dir, sb->s_blocksize);
+ dir->i_mtime = dir->i_ctime = CURRENT_TIME;
+
+ di->i_size = cpu_to_le64(sb->s_blocksize);
+ di->i_ctime = di->i_mtime = cpu_to_le64(dir->i_ctime.tv_sec);
+ di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(dir->i_ctime.tv_nsec);
+ dir->i_blocks = ocfs2_inode_sector_count(dir);
+
+ /*
+ * This should never fail as our extent list is empty and all
+ * related blocks have been journaled already.
+ */
+ ret = ocfs2_insert_extent(osb, handle, dir, di_bh, 0, blkno, len, 0,
+ NULL);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ ret = ocfs2_journal_dirty(handle, di_bh);
+ if (ret) {
+ mlog_errno(ret);
+ goto out_commit;
+ }
+
+ /*
+ * We asked for two clusters, but only got one in the 1st
+ * pass. Claim the 2nd cluster as a separate extent.
+ */
+ if (alloc > len) {
+ ret = ocfs2_claim_clusters(osb, handle, data_ac, 1, &bit_off,
+ &len);
+ if (ret) {
+ mlog_errno(ret);
+ goto out_commit;
}
- if (le64_to_cpu(de->inode)) {
- brelse(bh);
- return 0;
+ blkno = ocfs2_clusters_to_blocks(dir->i_sb, bit_off);
+
+ ret = ocfs2_insert_extent(osb, handle, dir, di_bh, 1, blkno,
+ len, 0, NULL);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
}
- offset += le16_to_cpu(de->rec_len);
- de = (struct ocfs2_dir_entry *)
- ((char *)de + le16_to_cpu(de->rec_len));
}
- brelse(bh);
- return 1;
+
+ *first_block_bh = dirdata_bh;
+ dirdata_bh = NULL;
+
+out_commit:
+ ocfs2_commit_trans(osb, handle);
+
+out_sem:
+ up_write(&oi->ip_alloc_sem);
+
+out:
+ if (data_ac)
+ ocfs2_free_alloc_context(data_ac);
+
+ brelse(dirdata_bh);
+
+ return ret;
}
/* returns a bh of the 1st new block in the allocation. */
-int ocfs2_do_extend_dir(struct super_block *sb,
- handle_t *handle,
- struct inode *dir,
- struct buffer_head *parent_fe_bh,
- struct ocfs2_alloc_context *data_ac,
- struct ocfs2_alloc_context *meta_ac,
- struct buffer_head **new_bh)
+static int ocfs2_do_extend_dir(struct super_block *sb,
+ handle_t *handle,
+ struct inode *dir,
+ struct buffer_head *parent_fe_bh,
+ struct ocfs2_alloc_context *data_ac,
+ struct ocfs2_alloc_context *meta_ac,
+ struct buffer_head **new_bh)
{
int status;
int extend;
@@ -396,10 +1408,18 @@ bail:
return status;
}
-/* assumes you already have a cluster lock on the directory. */
+/*
+ * Assumes you already have a cluster lock on the directory.
+ *
+ * 'blocks_wanted' is only used if we have an inline directory which
+ * is to be turned into an extent based one. The size of the dirent to
+ * insert might be larger than the space gained by growing to just one
+ * block, so we may have to grow the inode by two blocks in that case.
+ */
static int ocfs2_extend_dir(struct ocfs2_super *osb,
struct inode *dir,
struct buffer_head *parent_fe_bh,
+ unsigned int blocks_wanted,
struct buffer_head **new_de_bh)
{
int status = 0;
@@ -415,6 +1435,38 @@ static int ocfs2_extend_dir(struct ocfs2_super *osb,
mlog_entry_void();
+ if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
+ status = ocfs2_expand_inline_dir(dir, parent_fe_bh,
+ blocks_wanted, &new_bh);
+ if (status) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ if (blocks_wanted == 1) {
+ /*
+ * If the new dirent will fit inside the space
+ * created by pushing out to one block, then
+ * we can complete the operation
+ * here. Otherwise we have to expand i_size
+ * and format the 2nd block below.
+ */
+ BUG_ON(new_bh == NULL);
+ goto bail_bh;
+ }
+
+ /*
+ * Get rid of 'new_bh' - we want to format the 2nd
+ * data block and return that instead.
+ */
+ brelse(new_bh);
+ new_bh = NULL;
+
+ dir_i_size = i_size_read(dir);
+ credits = OCFS2_SIMPLE_DIR_EXTEND_CREDITS;
+ goto do_extend;
+ }
+
dir_i_size = i_size_read(dir);
mlog(0, "extending dir %llu (i_size = %lld)\n",
(unsigned long long)OCFS2_I(dir)->ip_blkno, dir_i_size);
@@ -452,6 +1504,7 @@ static int ocfs2_extend_dir(struct ocfs2_super *osb,
credits = OCFS2_SIMPLE_DIR_EXTEND_CREDITS;
}
+do_extend:
down_write(&OCFS2_I(dir)->ip_alloc_sem);
drop_alloc_sem = 1;
@@ -497,6 +1550,7 @@ static int ocfs2_extend_dir(struct ocfs2_super *osb,
goto bail;
}
+bail_bh:
*new_de_bh = new_bh;
get_bh(*new_de_bh);
bail:
@@ -517,41 +1571,71 @@ bail:
return status;
}
-/*
- * Search the dir for a good spot, extending it if necessary. The
- * block containing an appropriate record is returned in ret_de_bh.
- */
-int ocfs2_prepare_dir_for_insert(struct ocfs2_super *osb,
- struct inode *dir,
- struct buffer_head *parent_fe_bh,
- const char *name,
- int namelen,
- struct buffer_head **ret_de_bh)
+static int ocfs2_find_dir_space_id(struct inode *dir, struct buffer_head *di_bh,
+ const char *name, int namelen,
+ struct buffer_head **ret_de_bh,
+ unsigned int *blocks_wanted)
{
- unsigned long offset;
- struct buffer_head * bh = NULL;
- unsigned short rec_len;
- struct ocfs2_dinode *fe;
- struct ocfs2_dir_entry *de;
- struct super_block *sb;
- int status;
+ int ret;
+ struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
+ struct ocfs2_dir_entry *de, *last_de = NULL;
+ char *de_buf, *limit;
+ unsigned long offset = 0;
+ unsigned int rec_len, new_rec_len;
+
+ de_buf = di->id2.i_data.id_data;
+ limit = de_buf + i_size_read(dir);
+ rec_len = OCFS2_DIR_REC_LEN(namelen);
- mlog_entry_void();
+ while (de_buf < limit) {
+ de = (struct ocfs2_dir_entry *)de_buf;
- mlog(0, "getting ready to insert namelen %d into dir %llu\n",
- namelen, (unsigned long long)OCFS2_I(dir)->ip_blkno);
+ if (!ocfs2_check_dir_entry(dir, de, di_bh, offset)) {
+ ret = -ENOENT;
+ goto out;
+ }
+ if (ocfs2_match(namelen, name, de)) {
+ ret = -EEXIST;
+ goto out;
+ }
+ if (ocfs2_dirent_would_fit(de, rec_len)) {
+ /* Ok, we found a spot. Return this bh and let
+ * the caller actually fill it in. */
+ *ret_de_bh = di_bh;
+ get_bh(*ret_de_bh);
+ ret = 0;
+ goto out;
+ }
- BUG_ON(!S_ISDIR(dir->i_mode));
- fe = (struct ocfs2_dinode *) parent_fe_bh->b_data;
- BUG_ON(le64_to_cpu(fe->i_size) != i_size_read(dir));
+ last_de = de;
+ de_buf += le16_to_cpu(de->rec_len);
+ offset += le16_to_cpu(de->rec_len);
+ }
- sb = dir->i_sb;
+ /*
+ * We're going to require expansion of the directory - figure
+ * out how many blocks we'll need so that a place for the
+ * dirent can be found.
+ */
+ *blocks_wanted = 1;
+ new_rec_len = le16_to_cpu(last_de->rec_len) + (dir->i_sb->s_blocksize - i_size_read(dir));
+ if (new_rec_len < (rec_len + OCFS2_DIR_REC_LEN(last_de->name_len)))
+ *blocks_wanted = 2;
+
+ ret = -ENOSPC;
+out:
+ return ret;
+}
- if (!namelen) {
- status = -EINVAL;
- mlog_errno(status);
- goto bail;
- }
+static int ocfs2_find_dir_space_el(struct inode *dir, const char *name,
+ int namelen, struct buffer_head **ret_de_bh)
+{
+ unsigned long offset;
+ struct buffer_head *bh = NULL;
+ unsigned short rec_len;
+ struct ocfs2_dir_entry *de;
+ struct super_block *sb = dir->i_sb;
+ int status;
bh = ocfs2_bread(dir, 0, &status, 0);
if (!bh) {
@@ -568,17 +1652,11 @@ int ocfs2_prepare_dir_for_insert(struct ocfs2_super *osb,
bh = NULL;
if (i_size_read(dir) <= offset) {
- status = ocfs2_extend_dir(osb,
- dir,
- parent_fe_bh,
- &bh);
- if (status < 0) {
- mlog_errno(status);
- goto bail;
- }
- BUG_ON(!bh);
- *ret_de_bh = bh;
- get_bh(*ret_de_bh);
+ /*
+ * Caller will have to expand this
+ * directory.
+ */
+ status = -ENOSPC;
goto bail;
}
bh = ocfs2_bread(dir,
@@ -600,10 +1678,7 @@ int ocfs2_prepare_dir_for_insert(struct ocfs2_super *osb,
status = -EEXIST;
goto bail;
}
- if (((le64_to_cpu(de->inode) == 0) &&
- (le16_to_cpu(de->rec_len) >= rec_len)) ||
- (le16_to_cpu(de->rec_len) >=
- (OCFS2_DIR_REC_LEN(de->name_len) + rec_len))) {
+ if (ocfs2_dirent_would_fit(de, rec_len)) {
/* Ok, we found a spot. Return this bh and let
* the caller actually fill it in. */
*ret_de_bh = bh;
@@ -623,3 +1698,61 @@ bail:
mlog_exit(status);
return status;
}
+
+int ocfs2_prepare_dir_for_insert(struct ocfs2_super *osb,
+ struct inode *dir,
+ struct buffer_head *parent_fe_bh,
+ const char *name,
+ int namelen,
+ struct buffer_head **ret_de_bh)
+{
+ int ret;
+ unsigned int blocks_wanted = 1;
+ struct buffer_head *bh = NULL;
+
+ mlog(0, "getting ready to insert namelen %d into dir %llu\n",
+ namelen, (unsigned long long)OCFS2_I(dir)->ip_blkno);
+
+ *ret_de_bh = NULL;
+
+ if (!namelen) {
+ ret = -EINVAL;
+ mlog_errno(ret);
+ goto out;
+ }
+
+ if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
+ ret = ocfs2_find_dir_space_id(dir, parent_fe_bh, name,
+ namelen, &bh, &blocks_wanted);
+ } else
+ ret = ocfs2_find_dir_space_el(dir, name, namelen, &bh);
+
+ if (ret && ret != -ENOSPC) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ if (ret == -ENOSPC) {
+ /*
+ * We have to expand the directory to add this name.
+ */
+ BUG_ON(bh);
+
+ ret = ocfs2_extend_dir(osb, dir, parent_fe_bh, blocks_wanted,
+ &bh);
+ if (ret) {
+ if (ret != -ENOSPC)
+ mlog_errno(ret);
+ goto out;
+ }
+
+ BUG_ON(!bh);
+ }
+
+ *ret_de_bh = bh;
+ bh = NULL;
+out:
+ if (bh)
+ brelse(bh);
+ return ret;
+}
diff --git a/fs/ocfs2/dir.h b/fs/ocfs2/dir.h
index 3f67e14..ce48b90 100644
--- a/fs/ocfs2/dir.h
+++ b/fs/ocfs2/dir.h
@@ -26,17 +26,49 @@
#ifndef OCFS2_DIR_H
#define OCFS2_DIR_H
+struct buffer_head *ocfs2_find_entry(const char *name,
+ int namelen,
+ struct inode *dir,
+ struct ocfs2_dir_entry **res_dir);
+int ocfs2_delete_entry(handle_t *handle,
+ struct inode *dir,
+ struct ocfs2_dir_entry *de_del,
+ struct buffer_head *bh);
+int __ocfs2_add_entry(handle_t *handle,
+ struct inode *dir,
+ const char *name, int namelen,
+ struct inode *inode, u64 blkno,
+ struct buffer_head *parent_fe_bh,
+ struct buffer_head *insert_bh);
+static inline int ocfs2_add_entry(handle_t *handle,
+ struct dentry *dentry,
+ struct inode *inode, u64 blkno,
+ struct buffer_head *parent_fe_bh,
+ struct buffer_head *insert_bh)
+{
+ return __ocfs2_add_entry(handle, dentry->d_parent->d_inode,
+ dentry->d_name.name, dentry->d_name.len,
+ inode, blkno, parent_fe_bh, insert_bh);
+}
+int ocfs2_update_entry(struct inode *dir, handle_t *handle,
+ struct buffer_head *de_bh, struct ocfs2_dir_entry *de,
+ struct inode *new_entry_inode);
+
int ocfs2_check_dir_for_entry(struct inode *dir,
const char *name,
int namelen);
-int ocfs2_empty_dir(struct inode *inode); /* FIXME: to namei.c */
+int ocfs2_empty_dir(struct inode *inode);
int ocfs2_find_files_on_disk(const char *name,
int namelen,
u64 *blkno,
struct inode *inode,
struct buffer_head **dirent_bh,
struct ocfs2_dir_entry **dirent);
+int ocfs2_lookup_ino_from_name(struct inode *dir, const char *name,
+ int namelen, u64 *blkno);
int ocfs2_readdir(struct file *filp, void *dirent, filldir_t filldir);
+int ocfs2_dir_foreach(struct inode *inode, loff_t *f_pos, void *priv,
+ filldir_t filldir);
int ocfs2_prepare_dir_for_insert(struct ocfs2_super *osb,
struct inode *dir,
struct buffer_head *parent_fe_bh,
@@ -44,11 +76,11 @@ int ocfs2_prepare_dir_for_insert(struct ocfs2_super *osb,
int namelen,
struct buffer_head **ret_de_bh);
struct ocfs2_alloc_context;
-int ocfs2_do_extend_dir(struct super_block *sb,
- handle_t *handle,
- struct inode *dir,
- struct buffer_head *parent_fe_bh,
- struct ocfs2_alloc_context *data_ac,
- struct ocfs2_alloc_context *meta_ac,
- struct buffer_head **new_bh);
+int ocfs2_fill_new_dir(struct ocfs2_super *osb,
+ handle_t *handle,
+ struct inode *parent,
+ struct inode *inode,
+ struct buffer_head *fe_bh,
+ struct ocfs2_alloc_context *data_ac);
+
#endif /* OCFS2_DIR_H */
diff --git a/fs/ocfs2/dlm/dlmfs.c b/fs/ocfs2/dlm/dlmfs.c
index fd8cb1b..7418dc8 100644
--- a/fs/ocfs2/dlm/dlmfs.c
+++ b/fs/ocfs2/dlm/dlmfs.c
@@ -592,7 +592,7 @@ static int __init init_dlmfs_fs(void)
sizeof(struct dlmfs_inode_private),
0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD),
- dlmfs_init_once, NULL);
+ dlmfs_init_once);
if (!dlmfs_inode_cache)
return -ENOMEM;
cleanup_inode = 1;
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index 65b2b9b..62e4a7d 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -510,7 +510,7 @@ int dlm_init_mle_cache(void)
dlm_mle_cache = kmem_cache_create("dlm_mle_cache",
sizeof(struct dlm_master_list_entry),
0, SLAB_HWCACHE_ALIGN,
- NULL, NULL);
+ NULL);
if (dlm_mle_cache == NULL)
return -ENOMEM;
return 0;
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index f71250e..41c76ff 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -1482,6 +1482,7 @@ static void __ocfs2_stuff_meta_lvb(struct inode *inode)
lvb->lvb_imtime_packed =
cpu_to_be64(ocfs2_pack_timespec(&inode->i_mtime));
lvb->lvb_iattr = cpu_to_be32(oi->ip_attr);
+ lvb->lvb_idynfeatures = cpu_to_be16(oi->ip_dyn_features);
lvb->lvb_igeneration = cpu_to_be32(inode->i_generation);
out:
@@ -1515,6 +1516,7 @@ static void ocfs2_refresh_inode_from_lvb(struct inode *inode)
i_size_write(inode, be64_to_cpu(lvb->lvb_isize));
oi->ip_attr = be32_to_cpu(lvb->lvb_iattr);
+ oi->ip_dyn_features = be16_to_cpu(lvb->lvb_idynfeatures);
ocfs2_set_inode_flags(inode);
/* fast-symlinks are a special case */
diff --git a/fs/ocfs2/dlmglue.h b/fs/ocfs2/dlmglue.h
index 492bad3..87a785e 100644
--- a/fs/ocfs2/dlmglue.h
+++ b/fs/ocfs2/dlmglue.h
@@ -29,12 +29,12 @@
#include "dcache.h"
-#define OCFS2_LVB_VERSION 4
+#define OCFS2_LVB_VERSION 5
struct ocfs2_meta_lvb {
__u8 lvb_version;
__u8 lvb_reserved0;
- __be16 lvb_reserved1;
+ __be16 lvb_idynfeatures;
__be32 lvb_iclusters;
__be32 lvb_iuid;
__be32 lvb_igid;
diff --git a/fs/ocfs2/export.c b/fs/ocfs2/export.c
index bc48177..c3bbc19 100644
--- a/fs/ocfs2/export.c
+++ b/fs/ocfs2/export.c
@@ -88,8 +88,6 @@ static struct dentry *ocfs2_get_parent(struct dentry *child)
struct dentry *parent;
struct inode *inode;
struct inode *dir = child->d_inode;
- struct buffer_head *dirent_bh = NULL;
- struct ocfs2_dir_entry *dirent;
mlog_entry("(0x%p, '%.*s')\n", child,
child->d_name.len, child->d_name.name);
@@ -105,8 +103,7 @@ static struct dentry *ocfs2_get_parent(struct dentry *child)
goto bail;
}
- status = ocfs2_find_files_on_disk("..", 2, &blkno, dir, &dirent_bh,
- &dirent);
+ status = ocfs2_lookup_ino_from_name(dir, "..", 2, &blkno);
if (status < 0) {
parent = ERR_PTR(-ENOENT);
goto bail_unlock;
@@ -131,9 +128,6 @@ static struct dentry *ocfs2_get_parent(struct dentry *child)
bail_unlock:
ocfs2_meta_unlock(dir, 0);
- if (dirent_bh)
- brelse(dirent_bh);
-
bail:
mlog_exit_ptr(parent);
diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c
index 03c1d365..c58668a 100644
--- a/fs/ocfs2/extent_map.c
+++ b/fs/ocfs2/extent_map.c
@@ -387,6 +387,12 @@ int ocfs2_get_clusters(struct inode *inode, u32 v_cluster,
struct ocfs2_extent_rec *rec;
u32 coff;
+ if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
+ ret = -ERANGE;
+ mlog_errno(ret);
+ goto out;
+ }
+
ret = ocfs2_extent_map_lookup(inode, v_cluster, p_cluster,
num_clusters, extent_flags);
if (ret == 0)
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 004c2abb..a62b14e 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -34,6 +34,7 @@
#include <linux/splice.h>
#include <linux/mount.h>
#include <linux/writeback.h>
+#include <linux/falloc.h>
#define MLOG_MASK_PREFIX ML_INODE
#include <cluster/masklog.h>
@@ -186,6 +187,7 @@ int ocfs2_update_inode_atime(struct inode *inode,
int ret;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
handle_t *handle;
+ struct ocfs2_dinode *di = (struct ocfs2_dinode *) bh->b_data;
mlog_entry_void();
@@ -196,11 +198,27 @@ int ocfs2_update_inode_atime(struct inode *inode,
goto out;
}
+ ret = ocfs2_journal_access(handle, inode, bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (ret) {
+ mlog_errno(ret);
+ goto out_commit;
+ }
+
+ /*
+ * Don't use ocfs2_mark_inode_dirty() here as we don't always
+ * have i_mutex to guard against concurrent changes to other
+ * inode fields.
+ */
inode->i_atime = CURRENT_TIME;
- ret = ocfs2_mark_inode_dirty(handle, inode, bh);
+ di->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
+ di->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
+
+ ret = ocfs2_journal_dirty(handle, bh);
if (ret < 0)
mlog_errno(ret);
+out_commit:
ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
out:
mlog_exit(ret);
@@ -296,7 +314,6 @@ static int ocfs2_orphan_for_truncate(struct ocfs2_super *osb,
}
i_size_write(inode, new_i_size);
- inode->i_blocks = ocfs2_align_bytes_to_sectors(new_i_size);
inode->i_ctime = inode->i_mtime = CURRENT_TIME;
di = (struct ocfs2_dinode *) fe_bh->b_data;
@@ -380,6 +397,15 @@ static int ocfs2_truncate_file(struct inode *inode,
unmap_mapping_range(inode->i_mapping, new_i_size + PAGE_SIZE - 1, 0, 1);
truncate_inode_pages(inode->i_mapping, new_i_size);
+ if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
+ status = ocfs2_truncate_inline(inode, di_bh, new_i_size,
+ i_size_read(inode), 0);
+ if (status)
+ mlog_errno(status);
+
+ goto bail_unlock_data;
+ }
+
/* alright, we're going to need to do a full blown alloc size
* change. Orphan the inode so that recovery can complete the
* truncate if necessary. This does the task of marking
@@ -474,8 +500,8 @@ int ocfs2_do_extend_allocation(struct ocfs2_super *osb,
goto leave;
}
- status = ocfs2_claim_clusters(osb, handle, data_ac, 1,
- &bit_off, &num_bits);
+ status = __ocfs2_claim_clusters(osb, handle, data_ac, 1,
+ clusters_to_add, &bit_off, &num_bits);
if (status < 0) {
if (status != -ENOSPC)
mlog_errno(status);
@@ -762,25 +788,6 @@ leave:
return status;
}
-static int ocfs2_extend_allocation(struct inode *inode, u32 logical_start,
- u32 clusters_to_add, int mark_unwritten)
-{
- int ret;
-
- /*
- * The alloc sem blocks peope in read/write from reading our
- * allocation until we're done changing it. We depend on
- * i_mutex to block other extend/truncate calls while we're
- * here.
- */
- down_write(&OCFS2_I(inode)->ip_alloc_sem);
- ret = __ocfs2_extend_allocation(inode, logical_start, clusters_to_add,
- mark_unwritten);
- up_write(&OCFS2_I(inode)->ip_alloc_sem);
-
- return ret;
-}
-
/* Some parts of this taken from generic_cont_expand, which turned out
* to be too fragile to do exactly what we need without us having to
* worry about recursive locking in ->prepare_write() and
@@ -872,25 +879,48 @@ out:
return ret;
}
-/*
- * A tail_to_skip value > 0 indicates that we're being called from
- * ocfs2_file_aio_write(). This has the following implications:
- *
- * - we don't want to update i_size
- * - di_bh will be NULL, which is fine because it's only used in the
- * case where we want to update i_size.
- * - ocfs2_zero_extend() will then only be filling the hole created
- * between i_size and the start of the write.
- */
+int ocfs2_extend_no_holes(struct inode *inode, u64 new_i_size, u64 zero_to)
+{
+ int ret;
+ u32 clusters_to_add;
+ struct ocfs2_inode_info *oi = OCFS2_I(inode);
+
+ clusters_to_add = ocfs2_clusters_for_bytes(inode->i_sb, new_i_size);
+ if (clusters_to_add < oi->ip_clusters)
+ clusters_to_add = 0;
+ else
+ clusters_to_add -= oi->ip_clusters;
+
+ if (clusters_to_add) {
+ ret = __ocfs2_extend_allocation(inode, oi->ip_clusters,
+ clusters_to_add, 0);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+ }
+
+ /*
+ * Call this even if we don't add any clusters to the tree. We
+ * still need to zero the area between the old i_size and the
+ * new i_size.
+ */
+ ret = ocfs2_zero_extend(inode, zero_to);
+ if (ret < 0)
+ mlog_errno(ret);
+
+out:
+ return ret;
+}
+
static int ocfs2_extend_file(struct inode *inode,
struct buffer_head *di_bh,
- u64 new_i_size,
- size_t tail_to_skip)
+ u64 new_i_size)
{
- int ret = 0;
- u32 clusters_to_add = 0;
+ int ret = 0, data_locked = 0;
+ struct ocfs2_inode_info *oi = OCFS2_I(inode);
- BUG_ON(!tail_to_skip && !di_bh);
+ BUG_ON(!di_bh);
/* setattr sometimes calls us like this. */
if (new_i_size == 0)
@@ -900,13 +930,18 @@ static int ocfs2_extend_file(struct inode *inode,
goto out;
BUG_ON(new_i_size < i_size_read(inode));
- if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb))) {
- BUG_ON(tail_to_skip != 0);
+ /*
+ * Fall through for converting inline data, even if the fs
+ * supports sparse files.
+ *
+ * The check for inline data here is legal - nobody can add
+ * the feature since we have i_mutex. We must check it again
+ * after acquiring ip_alloc_sem though, as paths like mmap
+ * might have raced us to converting the inode to extents.
+ */
+ if (!(oi->ip_dyn_features & OCFS2_INLINE_DATA_FL)
+ && ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)))
goto out_update_size;
- }
-
- clusters_to_add = ocfs2_clusters_for_bytes(inode->i_sb, new_i_size) -
- OCFS2_I(inode)->ip_clusters;
/*
* protect the pages that ocfs2_zero_extend is going to be
@@ -920,39 +955,52 @@ static int ocfs2_extend_file(struct inode *inode,
mlog_errno(ret);
goto out;
}
+ data_locked = 1;
+
+ /*
+ * The alloc sem blocks people in read/write from reading our
+ * allocation until we're done changing it. We depend on
+ * i_mutex to block other extend/truncate calls while we're
+ * here.
+ */
+ down_write(&oi->ip_alloc_sem);
+
+ if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
+ /*
+ * We can optimize small extends by keeping the inodes
+ * inline data.
+ */
+ if (ocfs2_size_fits_inline_data(di_bh, new_i_size)) {
+ up_write(&oi->ip_alloc_sem);
+ goto out_update_size;
+ }
+
+ ret = ocfs2_convert_inline_data_to_extents(inode, di_bh);
+ if (ret) {
+ up_write(&oi->ip_alloc_sem);
- if (clusters_to_add) {
- ret = ocfs2_extend_allocation(inode,
- OCFS2_I(inode)->ip_clusters,
- clusters_to_add, 0);
- if (ret < 0) {
mlog_errno(ret);
goto out_unlock;
}
}
- /*
- * Call this even if we don't add any clusters to the tree. We
- * still need to zero the area between the old i_size and the
- * new i_size.
- */
- ret = ocfs2_zero_extend(inode, (u64)new_i_size - tail_to_skip);
+ if (!ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)))
+ ret = ocfs2_extend_no_holes(inode, new_i_size, new_i_size);
+
+ up_write(&oi->ip_alloc_sem);
+
if (ret < 0) {
mlog_errno(ret);
goto out_unlock;
}
out_update_size:
- if (!tail_to_skip) {
- /* We're being called from ocfs2_setattr() which wants
- * us to update i_size */
- ret = ocfs2_simple_size_update(inode, di_bh, new_i_size);
- if (ret < 0)
- mlog_errno(ret);
- }
+ ret = ocfs2_simple_size_update(inode, di_bh, new_i_size);
+ if (ret < 0)
+ mlog_errno(ret);
out_unlock:
- if (!ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)))
+ if (data_locked)
ocfs2_data_unlock(inode, 1);
out:
@@ -1010,10 +1058,15 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
}
if (size_change && attr->ia_size != i_size_read(inode)) {
+ if (attr->ia_size > sb->s_maxbytes) {
+ status = -EFBIG;
+ goto bail_unlock;
+ }
+
if (i_size_read(inode) > attr->ia_size)
status = ocfs2_truncate_file(inode, bh, attr->ia_size);
else
- status = ocfs2_extend_file(inode, bh, attr->ia_size, 0);
+ status = ocfs2_extend_file(inode, bh, attr->ia_size);
if (status < 0) {
if (status != -ENOSPC)
mlog_errno(status);
@@ -1221,6 +1274,31 @@ static int ocfs2_allocate_unwritten_extents(struct inode *inode,
{
int ret;
u32 cpos, phys_cpos, clusters, alloc_size;
+ u64 end = start + len;
+ struct buffer_head *di_bh = NULL;
+
+ if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
+ ret = ocfs2_read_block(OCFS2_SB(inode->i_sb),
+ OCFS2_I(inode)->ip_blkno, &di_bh,
+ OCFS2_BH_CACHED, inode);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ /*
+ * Nothing to do if the requested reservation range
+ * fits within the inode.
+ */
+ if (ocfs2_size_fits_inline_data(di_bh, end))
+ goto out;
+
+ ret = ocfs2_convert_inline_data_to_extents(inode, di_bh);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+ }
/*
* We consider both start and len to be inclusive.
@@ -1266,6 +1344,8 @@ next:
ret = 0;
out:
+
+ brelse(di_bh);
return ret;
}
@@ -1447,6 +1527,14 @@ static int ocfs2_remove_inode_range(struct inode *inode,
if (byte_len == 0)
return 0;
+ if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
+ ret = ocfs2_truncate_inline(inode, di_bh, byte_start,
+ byte_start + byte_len, 1);
+ if (ret)
+ mlog_errno(ret);
+ return ret;
+ }
+
trunc_start = ocfs2_clusters_for_bytes(osb->sb, byte_start);
trunc_len = (byte_start + byte_len) >> osb->s_clustersize_bits;
if (trunc_len >= trunc_start)
@@ -1504,29 +1592,18 @@ out:
/*
* Parts of this function taken from xfs_change_file_space()
*/
-int ocfs2_change_file_space(struct file *file, unsigned int cmd,
- struct ocfs2_space_resv *sr)
+static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
+ loff_t f_pos, unsigned int cmd,
+ struct ocfs2_space_resv *sr,
+ int change_size)
{
int ret;
s64 llen;
- struct inode *inode = file->f_path.dentry->d_inode;
+ loff_t size;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct buffer_head *di_bh = NULL;
handle_t *handle;
- unsigned long long max_off = ocfs2_max_file_offset(inode->i_sb->s_blocksize_bits);
-
- if ((cmd == OCFS2_IOC_RESVSP || cmd == OCFS2_IOC_RESVSP64) &&
- !ocfs2_writes_unwritten_extents(osb))
- return -ENOTTY;
- else if ((cmd == OCFS2_IOC_UNRESVSP || cmd == OCFS2_IOC_UNRESVSP64) &&
- !ocfs2_sparse_alloc(osb))
- return -ENOTTY;
-
- if (!S_ISREG(inode->i_mode))
- return -EINVAL;
-
- if (!(file->f_mode & FMODE_WRITE))
- return -EBADF;
+ unsigned long long max_off = inode->i_sb->s_maxbytes;
if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
return -EROFS;
@@ -1557,7 +1634,7 @@ int ocfs2_change_file_space(struct file *file, unsigned int cmd,
case 0: /*SEEK_SET*/
break;
case 1: /*SEEK_CUR*/
- sr->l_start += file->f_pos;
+ sr->l_start += f_pos;
break;
case 2: /*SEEK_END*/
sr->l_start += i_size_read(inode);
@@ -1577,6 +1654,7 @@ int ocfs2_change_file_space(struct file *file, unsigned int cmd,
ret = -EINVAL;
goto out_meta_unlock;
}
+ size = sr->l_start + sr->l_len;
if (cmd == OCFS2_IOC_RESVSP || cmd == OCFS2_IOC_RESVSP64) {
if (sr->l_len <= 0) {
@@ -1585,7 +1663,7 @@ int ocfs2_change_file_space(struct file *file, unsigned int cmd,
}
}
- if (should_remove_suid(file->f_path.dentry)) {
+ if (file && should_remove_suid(file->f_path.dentry)) {
ret = __ocfs2_write_remove_suid(inode, di_bh);
if (ret) {
mlog_errno(ret);
@@ -1628,6 +1706,9 @@ int ocfs2_change_file_space(struct file *file, unsigned int cmd,
goto out_meta_unlock;
}
+ if (change_size && i_size_read(inode) < size)
+ i_size_write(inode, size);
+
inode->i_ctime = inode->i_mtime = CURRENT_TIME;
ret = ocfs2_mark_inode_dirty(handle, inode, di_bh);
if (ret < 0)
@@ -1646,21 +1727,65 @@ out:
return ret;
}
+int ocfs2_change_file_space(struct file *file, unsigned int cmd,
+ struct ocfs2_space_resv *sr)
+{
+ struct inode *inode = file->f_path.dentry->d_inode;
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);;
+
+ if ((cmd == OCFS2_IOC_RESVSP || cmd == OCFS2_IOC_RESVSP64) &&
+ !ocfs2_writes_unwritten_extents(osb))
+ return -ENOTTY;
+ else if ((cmd == OCFS2_IOC_UNRESVSP || cmd == OCFS2_IOC_UNRESVSP64) &&
+ !ocfs2_sparse_alloc(osb))
+ return -ENOTTY;
+
+ if (!S_ISREG(inode->i_mode))
+ return -EINVAL;
+
+ if (!(file->f_mode & FMODE_WRITE))
+ return -EBADF;
+
+ return __ocfs2_change_file_space(file, inode, file->f_pos, cmd, sr, 0);
+}
+
+static long ocfs2_fallocate(struct inode *inode, int mode, loff_t offset,
+ loff_t len)
+{
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ struct ocfs2_space_resv sr;
+ int change_size = 1;
+
+ if (!ocfs2_writes_unwritten_extents(osb))
+ return -EOPNOTSUPP;
+
+ if (S_ISDIR(inode->i_mode))
+ return -ENODEV;
+
+ if (mode & FALLOC_FL_KEEP_SIZE)
+ change_size = 0;
+
+ sr.l_whence = 0;
+ sr.l_start = (s64)offset;
+ sr.l_len = (s64)len;
+
+ return __ocfs2_change_file_space(NULL, inode, offset,
+ OCFS2_IOC_RESVSP64, &sr, change_size);
+}
+
static int ocfs2_prepare_inode_for_write(struct dentry *dentry,
loff_t *ppos,
size_t count,
int appending,
int *direct_io)
{
- int ret = 0, meta_level = appending;
+ int ret = 0, meta_level = 0;
struct inode *inode = dentry->d_inode;
- u32 clusters;
- loff_t newsize, saved_pos;
+ loff_t saved_pos, end;
/*
- * We sample i_size under a read level meta lock to see if our write
- * is extending the file, if it is we back off and get a write level
- * meta lock.
+ * We start with a read level meta lock and only jump to an ex
+ * if we need to make modifications here.
*/
for(;;) {
ret = ocfs2_meta_lock(inode, NULL, meta_level);
@@ -1702,87 +1827,47 @@ static int ocfs2_prepare_inode_for_write(struct dentry *dentry,
saved_pos = *ppos;
}
- if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb))) {
- loff_t end = saved_pos + count;
-
- /*
- * Skip the O_DIRECT checks if we don't need
- * them.
- */
- if (!direct_io || !(*direct_io))
- break;
-
- /*
- * Allowing concurrent direct writes means
- * i_size changes wouldn't be synchronized, so
- * one node could wind up truncating another
- * nodes writes.
- */
- if (end > i_size_read(inode)) {
- *direct_io = 0;
- break;
- }
+ end = saved_pos + count;
- /*
- * We don't fill holes during direct io, so
- * check for them here. If any are found, the
- * caller will have to retake some cluster
- * locks and initiate the io as buffered.
- */
- ret = ocfs2_check_range_for_holes(inode, saved_pos,
- count);
- if (ret == 1) {
- *direct_io = 0;
- ret = 0;
- } else if (ret < 0)
- mlog_errno(ret);
+ /*
+ * Skip the O_DIRECT checks if we don't need
+ * them.
+ */
+ if (!direct_io || !(*direct_io))
break;
- }
/*
- * The rest of this loop is concerned with legacy file
- * systems which don't support sparse files.
+ * There's no sane way to do direct writes to an inode
+ * with inline data.
*/
-
- newsize = count + saved_pos;
-
- mlog(0, "pos=%lld newsize=%lld cursize=%lld\n",
- (long long) saved_pos, (long long) newsize,
- (long long) i_size_read(inode));
-
- /* No need for a higher level metadata lock if we're
- * never going past i_size. */
- if (newsize <= i_size_read(inode))
+ if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
+ *direct_io = 0;
break;
-
- if (meta_level == 0) {
- ocfs2_meta_unlock(inode, meta_level);
- meta_level = 1;
- continue;
}
- spin_lock(&OCFS2_I(inode)->ip_lock);
- clusters = ocfs2_clusters_for_bytes(inode->i_sb, newsize) -
- OCFS2_I(inode)->ip_clusters;
- spin_unlock(&OCFS2_I(inode)->ip_lock);
-
- mlog(0, "Writing at EOF, may need more allocation: "
- "i_size = %lld, newsize = %lld, need %u clusters\n",
- (long long) i_size_read(inode), (long long) newsize,
- clusters);
-
- /* We only want to continue the rest of this loop if
- * our extend will actually require more
- * allocation. */
- if (!clusters)
+ /*
+ * Allowing concurrent direct writes means
+ * i_size changes wouldn't be synchronized, so
+ * one node could wind up truncating another
+ * nodes writes.
+ */
+ if (end > i_size_read(inode)) {
+ *direct_io = 0;
break;
-
- ret = ocfs2_extend_file(inode, NULL, newsize, count);
- if (ret < 0) {
- if (ret != -ENOSPC)
- mlog_errno(ret);
- goto out_unlock;
}
+
+ /*
+ * We don't fill holes during direct io, so
+ * check for them here. If any are found, the
+ * caller will have to retake some cluster
+ * locks and initiate the io as buffered.
+ */
+ ret = ocfs2_check_range_for_holes(inode, saved_pos, count);
+ if (ret == 1) {
+ *direct_io = 0;
+ ret = 0;
+ } else if (ret < 0)
+ mlog_errno(ret);
break;
}
@@ -1902,7 +1987,7 @@ static ssize_t ocfs2_file_buffered_write(struct file *file, loff_t *ppos,
}
dst = kmap_atomic(page, KM_USER0);
- memcpy(dst + (pos & (PAGE_CACHE_SIZE - 1)), buf, bytes);
+ memcpy(dst + (pos & (loff_t)(PAGE_CACHE_SIZE - 1)), buf, bytes);
kunmap_atomic(dst, KM_USER0);
flush_dcache_page(page);
ocfs2_put_write_source(user_page);
@@ -2113,7 +2198,7 @@ static int ocfs2_splice_write_actor(struct pipe_inode_info *pipe,
src = buf->ops->map(pipe, buf, 1);
dst = kmap_atomic(page, KM_USER1);
memcpy(dst + offset, src + buf->offset, count);
- kunmap_atomic(page, KM_USER1);
+ kunmap_atomic(dst, KM_USER1);
buf->ops->unmap(pipe, buf, src);
copied = ocfs2_write_end(file, file->f_mapping, sd->pos, count, count,
@@ -2312,6 +2397,7 @@ const struct inode_operations ocfs2_file_iops = {
.setattr = ocfs2_setattr,
.getattr = ocfs2_getattr,
.permission = ocfs2_permission,
+ .fallocate = ocfs2_fallocate,
};
const struct inode_operations ocfs2_special_file_iops = {
diff --git a/fs/ocfs2/file.h b/fs/ocfs2/file.h
index 36fe27f..066f14a 100644
--- a/fs/ocfs2/file.h
+++ b/fs/ocfs2/file.h
@@ -47,6 +47,8 @@ int ocfs2_do_extend_allocation(struct ocfs2_super *osb,
struct ocfs2_alloc_context *data_ac,
struct ocfs2_alloc_context *meta_ac,
enum ocfs2_alloc_restarted *reason_ret);
+int ocfs2_extend_no_holes(struct inode *inode, u64 new_i_size,
+ u64 zero_to);
int ocfs2_lock_allocators(struct inode *inode, struct ocfs2_dinode *di,
u32 clusters_to_add, u32 extents_to_split,
struct ocfs2_alloc_context **data_ac,
diff --git a/fs/ocfs2/heartbeat.c b/fs/ocfs2/heartbeat.c
index 352eb4a..c4c3617 100644
--- a/fs/ocfs2/heartbeat.c
+++ b/fs/ocfs2/heartbeat.c
@@ -209,7 +209,7 @@ void ocfs2_stop_heartbeat(struct ocfs2_super *osb)
envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
envp[2] = NULL;
- ret = call_usermodehelper(argv[0], argv, envp, 1);
+ ret = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC);
if (ret < 0)
mlog_errno(ret);
}
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c
index c53a676..1d5e0cb 100644
--- a/fs/ocfs2/inode.c
+++ b/fs/ocfs2/inode.c
@@ -241,6 +241,7 @@ int ocfs2_populate_inode(struct inode *inode, struct ocfs2_dinode *fe,
OCFS2_I(inode)->ip_clusters = le32_to_cpu(fe->i_clusters);
OCFS2_I(inode)->ip_attr = le32_to_cpu(fe->i_attr);
+ OCFS2_I(inode)->ip_dyn_features = le16_to_cpu(fe->i_dyn_features);
inode->i_version = 1;
inode->i_generation = le32_to_cpu(fe->i_generation);
@@ -513,6 +514,10 @@ static int ocfs2_truncate_for_delete(struct ocfs2_super *osb,
fe = (struct ocfs2_dinode *) fe_bh->b_data;
+ /*
+ * This check will also skip truncate of inodes with inline
+ * data and fast symlinks.
+ */
if (fe->i_clusters) {
handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
if (IS_ERR(handle)) {
@@ -1220,6 +1225,7 @@ int ocfs2_mark_inode_dirty(handle_t *handle,
fe->i_clusters = cpu_to_le32(OCFS2_I(inode)->ip_clusters);
ocfs2_get_inode_flags(OCFS2_I(inode));
fe->i_attr = cpu_to_le32(OCFS2_I(inode)->ip_attr);
+ fe->i_dyn_features = cpu_to_le16(OCFS2_I(inode)->ip_dyn_features);
spin_unlock(&OCFS2_I(inode)->ip_lock);
fe->i_size = cpu_to_le64(i_size_read(inode));
@@ -1257,6 +1263,7 @@ void ocfs2_refresh_inode(struct inode *inode,
OCFS2_I(inode)->ip_clusters = le32_to_cpu(fe->i_clusters);
OCFS2_I(inode)->ip_attr = le32_to_cpu(fe->i_attr);
+ OCFS2_I(inode)->ip_dyn_features = le16_to_cpu(fe->i_dyn_features);
ocfs2_set_inode_flags(inode);
i_size_write(inode, le64_to_cpu(fe->i_size));
inode->i_nlink = le16_to_cpu(fe->i_links_count);
diff --git a/fs/ocfs2/inode.h b/fs/ocfs2/inode.h
index a41d081..70e881c 100644
--- a/fs/ocfs2/inode.h
+++ b/fs/ocfs2/inode.h
@@ -51,6 +51,7 @@ struct ocfs2_inode_info
u32 ip_flags; /* see below */
u32 ip_attr; /* inode attributes */
+ u16 ip_dyn_features;
/* protected by recovery_lock. */
struct inode *ip_next_orphan;
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index dbfb20b..f9d01e2 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -35,13 +35,13 @@
#include "ocfs2.h"
#include "alloc.h"
+#include "dir.h"
#include "dlmglue.h"
#include "extent_map.h"
#include "heartbeat.h"
#include "inode.h"
#include "journal.h"
#include "localalloc.h"
-#include "namei.h"
#include "slot_map.h"
#include "super.h"
#include "vote.h"
@@ -1213,17 +1213,49 @@ bail:
return status;
}
+struct ocfs2_orphan_filldir_priv {
+ struct inode *head;
+ struct ocfs2_super *osb;
+};
+
+static int ocfs2_orphan_filldir(void *priv, const char *name, int name_len,
+ loff_t pos, u64 ino, unsigned type)
+{
+ struct ocfs2_orphan_filldir_priv *p = priv;
+ struct inode *iter;
+
+ if (name_len == 1 && !strncmp(".", name, 1))
+ return 0;
+ if (name_len == 2 && !strncmp("..", name, 2))
+ return 0;
+
+ /* Skip bad inodes so that recovery can continue */
+ iter = ocfs2_iget(p->osb, ino,
+ OCFS2_FI_FLAG_ORPHAN_RECOVERY);
+ if (IS_ERR(iter))
+ return 0;
+
+ mlog(0, "queue orphan %llu\n",
+ (unsigned long long)OCFS2_I(iter)->ip_blkno);
+ /* No locking is required for the next_orphan queue as there
+ * is only ever a single process doing orphan recovery. */
+ OCFS2_I(iter)->ip_next_orphan = p->head;
+ p->head = iter;
+
+ return 0;
+}
+
static int ocfs2_queue_orphans(struct ocfs2_super *osb,
int slot,
struct inode **head)
{
int status;
struct inode *orphan_dir_inode = NULL;
- struct inode *iter;
- unsigned long offset, blk, local;
- struct buffer_head *bh = NULL;
- struct ocfs2_dir_entry *de;
- struct super_block *sb = osb->sb;
+ struct ocfs2_orphan_filldir_priv priv;
+ loff_t pos = 0;
+
+ priv.osb = osb;
+ priv.head = *head;
orphan_dir_inode = ocfs2_get_system_file_inode(osb,
ORPHAN_DIR_SYSTEM_INODE,
@@ -1241,77 +1273,15 @@ static int ocfs2_queue_orphans(struct ocfs2_super *osb,
goto out;
}
- offset = 0;
- iter = NULL;
- while(offset < i_size_read(orphan_dir_inode)) {
- blk = offset >> sb->s_blocksize_bits;
-
- bh = ocfs2_bread(orphan_dir_inode, blk, &status, 0);
- if (!bh)
- status = -EINVAL;
- if (status < 0) {
- if (bh)
- brelse(bh);
- mlog_errno(status);
- goto out_unlock;
- }
-
- local = 0;
- while(offset < i_size_read(orphan_dir_inode)
- && local < sb->s_blocksize) {
- de = (struct ocfs2_dir_entry *) (bh->b_data + local);
-
- if (!ocfs2_check_dir_entry(orphan_dir_inode,
- de, bh, local)) {
- status = -EINVAL;
- mlog_errno(status);
- brelse(bh);
- goto out_unlock;
- }
-
- local += le16_to_cpu(de->rec_len);
- offset += le16_to_cpu(de->rec_len);
-
- /* I guess we silently fail on no inode? */
- if (!le64_to_cpu(de->inode))
- continue;
- if (de->file_type > OCFS2_FT_MAX) {
- mlog(ML_ERROR,
- "block %llu contains invalid de: "
- "inode = %llu, rec_len = %u, "
- "name_len = %u, file_type = %u, "
- "name='%.*s'\n",
- (unsigned long long)bh->b_blocknr,
- (unsigned long long)le64_to_cpu(de->inode),
- le16_to_cpu(de->rec_len),
- de->name_len,
- de->file_type,
- de->name_len,
- de->name);
- continue;
- }
- if (de->name_len == 1 && !strncmp(".", de->name, 1))
- continue;
- if (de->name_len == 2 && !strncmp("..", de->name, 2))
- continue;
-
- iter = ocfs2_iget(osb, le64_to_cpu(de->inode),
- OCFS2_FI_FLAG_ORPHAN_RECOVERY);
- if (IS_ERR(iter))
- continue;
-
- mlog(0, "queue orphan %llu\n",
- (unsigned long long)OCFS2_I(iter)->ip_blkno);
- /* No locking is required for the next_orphan
- * queue as there is only ever a single
- * process doing orphan recovery. */
- OCFS2_I(iter)->ip_next_orphan = *head;
- *head = iter;
- }
- brelse(bh);
+ status = ocfs2_dir_foreach(orphan_dir_inode, &pos, &priv,
+ ocfs2_orphan_filldir);
+ if (status) {
+ mlog_errno(status);
+ goto out;
}
-out_unlock:
+ *head = priv.head;
+
ocfs2_meta_unlock(orphan_dir_inode, 0);
out:
mutex_unlock(&orphan_dir_inode->i_mutex);
diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h
index ce60aab..4b32e09 100644
--- a/fs/ocfs2/journal.h
+++ b/fs/ocfs2/journal.h
@@ -282,6 +282,9 @@ int ocfs2_journal_dirty_data(handle_t *handle,
* prev. group desc. if we relink. */
#define OCFS2_SUBALLOC_ALLOC (3)
+#define OCFS2_INLINE_TO_EXTENTS_CREDITS (OCFS2_SUBALLOC_ALLOC \
+ + OCFS2_INODE_UPDATE_CREDITS)
+
/* dinode + group descriptor update. We don't relink on free yet. */
#define OCFS2_SUBALLOC_FREE (2)
diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
index 545f789..d272847 100644
--- a/fs/ocfs2/localalloc.c
+++ b/fs/ocfs2/localalloc.c
@@ -514,8 +514,10 @@ int ocfs2_reserve_local_alloc_bits(struct ocfs2_super *osb,
ac->ac_bh = osb->local_alloc_bh;
status = 0;
bail:
- if (status < 0 && local_alloc_inode)
+ if (status < 0 && local_alloc_inode) {
+ mutex_unlock(&local_alloc_inode->i_mutex);
iput(local_alloc_inode);
+ }
mlog_exit(status);
return status;
@@ -524,13 +526,12 @@ bail:
int ocfs2_claim_local_alloc_bits(struct ocfs2_super *osb,
handle_t *handle,
struct ocfs2_alloc_context *ac,
- u32 min_bits,
+ u32 bits_wanted,
u32 *bit_off,
u32 *num_bits)
{
int status, start;
struct inode *local_alloc_inode;
- u32 bits_wanted;
void *bitmap;
struct ocfs2_dinode *alloc;
struct ocfs2_local_alloc *la;
@@ -538,7 +539,6 @@ int ocfs2_claim_local_alloc_bits(struct ocfs2_super *osb,
mlog_entry_void();
BUG_ON(ac->ac_which != OCFS2_AC_USE_LOCAL);
- bits_wanted = ac->ac_bits_wanted - ac->ac_bits_given;
local_alloc_inode = ac->ac_inode;
alloc = (struct ocfs2_dinode *) osb->local_alloc_bh->b_data;
la = OCFS2_LOCAL_ALLOC(alloc);
diff --git a/fs/ocfs2/localalloc.h b/fs/ocfs2/localalloc.h
index 385a101..3f76631 100644
--- a/fs/ocfs2/localalloc.h
+++ b/fs/ocfs2/localalloc.h
@@ -48,7 +48,7 @@ int ocfs2_reserve_local_alloc_bits(struct ocfs2_super *osb,
int ocfs2_claim_local_alloc_bits(struct ocfs2_super *osb,
handle_t *handle,
struct ocfs2_alloc_context *ac,
- u32 min_bits,
+ u32 bits_wanted,
u32 *bit_off,
u32 *num_bits);
diff --git a/fs/ocfs2/mmap.c b/fs/ocfs2/mmap.c
index d79aa12..9875615 100644
--- a/fs/ocfs2/mmap.c
+++ b/fs/ocfs2/mmap.c
@@ -60,31 +60,28 @@ static inline int ocfs2_vm_op_unblock_sigs(sigset_t *oldset)
return sigprocmask(SIG_SETMASK, oldset, NULL);
}
-static struct page *ocfs2_nopage(struct vm_area_struct * area,
- unsigned long address,
- int *type)
+static int ocfs2_fault(struct vm_area_struct *area, struct vm_fault *vmf)
{
- struct page *page = NOPAGE_SIGBUS;
sigset_t blocked, oldset;
- int ret;
+ int error, ret;
- mlog_entry("(area=%p, address=%lu, type=%p)\n", area, address,
- type);
+ mlog_entry("(area=%p, page offset=%lu)\n", area, vmf->pgoff);
- ret = ocfs2_vm_op_block_sigs(&blocked, &oldset);
- if (ret < 0) {
- mlog_errno(ret);
+ error = ocfs2_vm_op_block_sigs(&blocked, &oldset);
+ if (error < 0) {
+ mlog_errno(error);
+ ret = VM_FAULT_SIGBUS;
goto out;
}
- page = filemap_nopage(area, address, type);
+ ret = filemap_fault(area, vmf);
- ret = ocfs2_vm_op_unblock_sigs(&oldset);
- if (ret < 0)
- mlog_errno(ret);
+ error = ocfs2_vm_op_unblock_sigs(&oldset);
+ if (error < 0)
+ mlog_errno(error);
out:
- mlog_exit_ptr(page);
- return page;
+ mlog_exit_ptr(vmf->page);
+ return ret;
}
static int __ocfs2_page_mkwrite(struct inode *inode, struct buffer_head *di_bh,
@@ -92,7 +89,7 @@ static int __ocfs2_page_mkwrite(struct inode *inode, struct buffer_head *di_bh,
{
int ret;
struct address_space *mapping = inode->i_mapping;
- loff_t pos = page->index << PAGE_CACHE_SHIFT;
+ loff_t pos = page_offset(page);
unsigned int len = PAGE_CACHE_SIZE;
pgoff_t last_index;
struct page *locked_page = NULL;
@@ -209,7 +206,7 @@ out:
}
static struct vm_operations_struct ocfs2_file_vm_ops = {
- .nopage = ocfs2_nopage,
+ .fault = ocfs2_fault,
.page_mkwrite = ocfs2_page_mkwrite,
};
@@ -226,6 +223,7 @@ int ocfs2_mmap(struct file *file, struct vm_area_struct *vma)
ocfs2_meta_unlock(file->f_dentry->d_inode, lock_level);
out:
vma->vm_ops = &ocfs2_file_vm_ops;
+ vma->vm_flags |= VM_CAN_NONLINEAR;
return 0;
}
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index d430fda..7292590 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -64,29 +64,6 @@
#include "buffer_head_io.h"
-#define NAMEI_RA_CHUNKS 2
-#define NAMEI_RA_BLOCKS 4
-#define NAMEI_RA_SIZE (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS)
-#define NAMEI_RA_INDEX(c,b) (((c) * NAMEI_RA_BLOCKS) + (b))
-
-static int inline ocfs2_search_dirblock(struct buffer_head *bh,
- struct inode *dir,
- const char *name, int namelen,
- unsigned long offset,
- struct ocfs2_dir_entry **res_dir);
-
-static int ocfs2_delete_entry(handle_t *handle,
- struct inode *dir,
- struct ocfs2_dir_entry *de_del,
- struct buffer_head *bh);
-
-static int __ocfs2_add_entry(handle_t *handle,
- struct inode *dir,
- const char *name, int namelen,
- struct inode *inode, u64 blkno,
- struct buffer_head *parent_fe_bh,
- struct buffer_head *insert_bh);
-
static int ocfs2_mknod_locked(struct ocfs2_super *osb,
struct inode *dir,
struct dentry *dentry, int mode,
@@ -97,13 +74,6 @@ static int ocfs2_mknod_locked(struct ocfs2_super *osb,
struct inode **ret_inode,
struct ocfs2_alloc_context *inode_ac);
-static int ocfs2_fill_new_dir(struct ocfs2_super *osb,
- handle_t *handle,
- struct inode *parent,
- struct inode *inode,
- struct buffer_head *fe_bh,
- struct ocfs2_alloc_context *data_ac);
-
static int ocfs2_prepare_orphan_dir(struct ocfs2_super *osb,
struct inode **ret_orphan_dir,
struct inode *inode,
@@ -123,17 +93,6 @@ static int ocfs2_create_symlink_data(struct ocfs2_super *osb,
struct inode *inode,
const char *symname);
-static inline int ocfs2_add_entry(handle_t *handle,
- struct dentry *dentry,
- struct inode *inode, u64 blkno,
- struct buffer_head *parent_fe_bh,
- struct buffer_head *insert_bh)
-{
- return __ocfs2_add_entry(handle, dentry->d_parent->d_inode,
- dentry->d_name.name, dentry->d_name.len,
- inode, blkno, parent_fe_bh, insert_bh);
-}
-
/* An orphan dir name is an 8 byte value, printed as a hex string */
#define OCFS2_ORPHAN_NAMELEN ((int)(2 * sizeof(u64)))
@@ -142,10 +101,8 @@ static struct dentry *ocfs2_lookup(struct inode *dir, struct dentry *dentry,
{
int status;
u64 blkno;
- struct buffer_head *dirent_bh = NULL;
struct inode *inode = NULL;
struct dentry *ret;
- struct ocfs2_dir_entry *dirent;
struct ocfs2_inode_info *oi;
mlog_entry("(0x%p, 0x%p, '%.*s')\n", dir, dentry,
@@ -167,9 +124,8 @@ static struct dentry *ocfs2_lookup(struct inode *dir, struct dentry *dentry,
goto bail;
}
- status = ocfs2_find_files_on_disk(dentry->d_name.name,
- dentry->d_name.len, &blkno,
- dir, &dirent_bh, &dirent);
+ status = ocfs2_lookup_ino_from_name(dir, dentry->d_name.name,
+ dentry->d_name.len, &blkno);
if (status < 0)
goto bail_add;
@@ -224,83 +180,12 @@ bail_unlock:
ocfs2_meta_unlock(dir, 0);
bail:
- if (dirent_bh)
- brelse(dirent_bh);
mlog_exit_ptr(ret);
return ret;
}
-static int ocfs2_fill_new_dir(struct ocfs2_super *osb,
- handle_t *handle,
- struct inode *parent,
- struct inode *inode,
- struct buffer_head *fe_bh,
- struct ocfs2_alloc_context *data_ac)
-{
- int status;
- struct buffer_head *new_bh = NULL;
- struct ocfs2_dir_entry *de = NULL;
-
- mlog_entry_void();
-
- status = ocfs2_do_extend_dir(osb->sb, handle, inode, fe_bh,
- data_ac, NULL, &new_bh);
- if (status < 0) {
- mlog_errno(status);
- goto bail;
- }
-
- ocfs2_set_new_buffer_uptodate(inode, new_bh);
-
- status = ocfs2_journal_access(handle, inode, new_bh,
- OCFS2_JOURNAL_ACCESS_CREATE);
- if (status < 0) {
- mlog_errno(status);
- goto bail;
- }
- memset(new_bh->b_data, 0, osb->sb->s_blocksize);
-
- de = (struct ocfs2_dir_entry *) new_bh->b_data;
- de->inode = cpu_to_le64(OCFS2_I(inode)->ip_blkno);
- de->name_len = 1;
- de->rec_len =
- cpu_to_le16(OCFS2_DIR_REC_LEN(de->name_len));
- strcpy(de->name, ".");
- ocfs2_set_de_type(de, S_IFDIR);
- de = (struct ocfs2_dir_entry *) ((char *)de + le16_to_cpu(de->rec_len));
- de->inode = cpu_to_le64(OCFS2_I(parent)->ip_blkno);
- de->rec_len = cpu_to_le16(inode->i_sb->s_blocksize -
- OCFS2_DIR_REC_LEN(1));
- de->name_len = 2;
- strcpy(de->name, "..");
- ocfs2_set_de_type(de, S_IFDIR);
-
- status = ocfs2_journal_dirty(handle, new_bh);
- if (status < 0) {
- mlog_errno(status);
- goto bail;
- }
-
- i_size_write(inode, inode->i_sb->s_blocksize);
- inode->i_nlink = 2;
- inode->i_blocks = ocfs2_inode_sector_count(inode);
- status = ocfs2_mark_inode_dirty(handle, inode, fe_bh);
- if (status < 0) {
- mlog_errno(status);
- goto bail;
- }
-
- status = 0;
-bail:
- if (new_bh)
- brelse(new_bh);
-
- mlog_exit(status);
- return status;
-}
-
static int ocfs2_mknod(struct inode *dir,
struct dentry *dentry,
int mode,
@@ -365,9 +250,8 @@ static int ocfs2_mknod(struct inode *dir,
goto leave;
}
- /* are we making a directory? If so, reserve a cluster for his
- * 1st extent. */
- if (S_ISDIR(mode)) {
+ /* Reserve a cluster if creating an extent based directory. */
+ if (S_ISDIR(mode) && !ocfs2_supports_inline_data(osb)) {
status = ocfs2_reserve_clusters(osb, 1, &data_ac);
if (status < 0) {
if (status != -ENOSPC)
@@ -564,10 +448,21 @@ static int ocfs2_mknod_locked(struct ocfs2_super *osb,
cpu_to_le32(CURRENT_TIME.tv_nsec);
fe->i_dtime = 0;
- fel = &fe->id2.i_list;
- fel->l_tree_depth = 0;
- fel->l_next_free_rec = 0;
- fel->l_count = cpu_to_le16(ocfs2_extent_recs_per_inode(osb->sb));
+ /*
+ * If supported, directories start with inline data.
+ */
+ if (S_ISDIR(mode) && ocfs2_supports_inline_data(osb)) {
+ u16 feat = le16_to_cpu(fe->i_dyn_features);
+
+ fe->i_dyn_features = cpu_to_le16(feat | OCFS2_INLINE_DATA_FL);
+
+ fe->id2.i_data.id_count = cpu_to_le16(ocfs2_max_inline_data(osb->sb));
+ } else {
+ fel = &fe->id2.i_list;
+ fel->l_tree_depth = 0;
+ fel->l_next_free_rec = 0;
+ fel->l_count = cpu_to_le16(ocfs2_extent_recs_per_inode(osb->sb));
+ }
status = ocfs2_journal_dirty(handle, *new_fe_bh);
if (status < 0) {
@@ -1048,11 +943,6 @@ static void ocfs2_double_unlock(struct inode *inode1, struct inode *inode2)
ocfs2_meta_unlock(inode2, 1);
}
-#define PARENT_INO(buffer) \
- ((struct ocfs2_dir_entry *) \
- ((char *)buffer + \
- le16_to_cpu(((struct ocfs2_dir_entry *)buffer)->rec_len)))->inode
-
static int ocfs2_rename(struct inode *old_dir,
struct dentry *old_dentry,
struct inode *new_dir,
@@ -1070,16 +960,17 @@ static int ocfs2_rename(struct inode *old_dir,
struct buffer_head *old_inode_bh = NULL;
struct buffer_head *insert_entry_bh = NULL;
struct ocfs2_super *osb = NULL;
- u64 newfe_blkno;
+ u64 newfe_blkno, old_de_ino;
handle_t *handle = NULL;
struct buffer_head *old_dir_bh = NULL;
struct buffer_head *new_dir_bh = NULL;
- struct ocfs2_dir_entry *old_de = NULL, *new_de = NULL; // dirent for old_dentry
- // and new_dentry
+ struct ocfs2_dir_entry *old_inode_dot_dot_de = NULL, *old_de = NULL,
+ *new_de = NULL;
struct buffer_head *new_de_bh = NULL, *old_de_bh = NULL; // bhs for above
struct buffer_head *old_inode_de_bh = NULL; // if old_dentry is a dir,
// this is the 1st dirent bh
nlink_t old_dir_nlink = old_dir->i_nlink;
+ struct ocfs2_dinode *old_di;
/* At some point it might be nice to break this function up a
* bit. */
@@ -1158,27 +1049,35 @@ static int ocfs2_rename(struct inode *old_dir,
}
if (S_ISDIR(old_inode->i_mode)) {
- status = -EIO;
- old_inode_de_bh = ocfs2_bread(old_inode, 0, &status, 0);
- if (!old_inode_de_bh)
+ u64 old_inode_parent;
+
+ status = ocfs2_find_files_on_disk("..", 2, &old_inode_parent,
+ old_inode, &old_inode_de_bh,
+ &old_inode_dot_dot_de);
+ if (status) {
+ status = -EIO;
goto bail;
+ }
- status = -EIO;
- if (le64_to_cpu(PARENT_INO(old_inode_de_bh->b_data)) !=
- OCFS2_I(old_dir)->ip_blkno)
+ if (old_inode_parent != OCFS2_I(old_dir)->ip_blkno) {
+ status = -EIO;
goto bail;
- status = -EMLINK;
- if (!new_inode && new_dir!=old_dir &&
- new_dir->i_nlink >= OCFS2_LINK_MAX)
+ }
+
+ if (!new_inode && new_dir != old_dir &&
+ new_dir->i_nlink >= OCFS2_LINK_MAX) {
+ status = -EMLINK;
goto bail;
+ }
}
- status = -ENOENT;
- old_de_bh = ocfs2_find_entry(old_dentry->d_name.name,
- old_dentry->d_name.len,
- old_dir, &old_de);
- if (!old_de_bh)
+ status = ocfs2_lookup_ino_from_name(old_dir, old_dentry->d_name.name,
+ old_dentry->d_name.len,
+ &old_de_ino);
+ if (status) {
+ status = -ENOENT;
goto bail;
+ }
/*
* Check for inode number is _not_ due to possible IO errors.
@@ -1186,8 +1085,10 @@ static int ocfs2_rename(struct inode *old_dir,
* and merrily kill the link to whatever was created under the
* same name. Goodbye sticky bit ;-<
*/
- if (le64_to_cpu(old_de->inode) != OCFS2_I(old_inode)->ip_blkno)
+ if (old_de_ino != OCFS2_I(old_inode)->ip_blkno) {
+ status = -ENOENT;
goto bail;
+ }
/* check if the target already exists (in which case we need
* to delete it */
@@ -1320,20 +1221,13 @@ static int ocfs2_rename(struct inode *old_dir,
}
/* change the dirent to point to the correct inode */
- status = ocfs2_journal_access(handle, new_dir, new_de_bh,
- OCFS2_JOURNAL_ACCESS_WRITE);
+ status = ocfs2_update_entry(new_dir, handle, new_de_bh,
+ new_de, old_inode);
if (status < 0) {
mlog_errno(status);
goto bail;
}
- new_de->inode = cpu_to_le64(OCFS2_I(old_inode)->ip_blkno);
- new_de->file_type = old_de->file_type;
new_dir->i_version++;
- status = ocfs2_journal_dirty(handle, new_de_bh);
- if (status < 0) {
- mlog_errno(status);
- goto bail;
- }
if (S_ISDIR(new_inode->i_mode))
newfe->i_links_count = 0;
@@ -1354,9 +1248,36 @@ static int ocfs2_rename(struct inode *old_dir,
old_inode->i_ctime = CURRENT_TIME;
mark_inode_dirty(old_inode);
- ocfs2_mark_inode_dirty(handle, old_inode, old_inode_bh);
- /* now that the name has been added to new_dir, remove the old name */
+ status = ocfs2_journal_access(handle, old_inode, old_inode_bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (status >= 0) {
+ old_di = (struct ocfs2_dinode *) old_inode_bh->b_data;
+
+ old_di->i_ctime = cpu_to_le64(old_inode->i_ctime.tv_sec);
+ old_di->i_ctime_nsec = cpu_to_le32(old_inode->i_ctime.tv_nsec);
+
+ status = ocfs2_journal_dirty(handle, old_inode_bh);
+ if (status < 0)
+ mlog_errno(status);
+ } else
+ mlog_errno(status);
+
+ /*
+ * Now that the name has been added to new_dir, remove the old name.
+ *
+ * We don't keep any directory entry context around until now
+ * because the insert might have changed the type of directory
+ * we're dealing with.
+ */
+ old_de_bh = ocfs2_find_entry(old_dentry->d_name.name,
+ old_dentry->d_name.len,
+ old_dir, &old_de);
+ if (!old_de_bh) {
+ status = -EIO;
+ goto bail;
+ }
+
status = ocfs2_delete_entry(handle, old_dir, old_de, old_de_bh);
if (status < 0) {
mlog_errno(status);
@@ -1369,12 +1290,8 @@ static int ocfs2_rename(struct inode *old_dir,
}
old_dir->i_ctime = old_dir->i_mtime = CURRENT_TIME;
if (old_inode_de_bh) {
- status = ocfs2_journal_access(handle, old_inode,
- old_inode_de_bh,
- OCFS2_JOURNAL_ACCESS_WRITE);
- PARENT_INO(old_inode_de_bh->b_data) =
- cpu_to_le64(OCFS2_I(new_dir)->ip_blkno);
- status = ocfs2_journal_dirty(handle, old_inode_de_bh);
+ status = ocfs2_update_entry(old_inode, handle, old_inode_de_bh,
+ old_inode_dot_dot_de, new_dir);
old_dir->i_nlink--;
if (new_inode) {
new_inode->i_nlink--;
@@ -1753,329 +1670,6 @@ bail:
return status;
}
-int ocfs2_check_dir_entry(struct inode * dir,
- struct ocfs2_dir_entry * de,
- struct buffer_head * bh,
- unsigned long offset)
-{
- const char *error_msg = NULL;
- const int rlen = le16_to_cpu(de->rec_len);
-
- if (rlen < OCFS2_DIR_REC_LEN(1))
- error_msg = "rec_len is smaller than minimal";
- else if (rlen % 4 != 0)
- error_msg = "rec_len % 4 != 0";
- else if (rlen < OCFS2_DIR_REC_LEN(de->name_len))
- error_msg = "rec_len is too small for name_len";
- else if (((char *) de - bh->b_data) + rlen > dir->i_sb->s_blocksize)
- error_msg = "directory entry across blocks";
-
- if (error_msg != NULL)
- mlog(ML_ERROR, "bad entry in directory #%llu: %s - "
- "offset=%lu, inode=%llu, rec_len=%d, name_len=%d\n",
- (unsigned long long)OCFS2_I(dir)->ip_blkno, error_msg,
- offset, (unsigned long long)le64_to_cpu(de->inode), rlen,
- de->name_len);
- return error_msg == NULL ? 1 : 0;
-}
-
-/* we don't always have a dentry for what we want to add, so people
- * like orphan dir can call this instead.
- *
- * If you pass me insert_bh, I'll skip the search of the other dir
- * blocks and put the record in there.
- */
-static int __ocfs2_add_entry(handle_t *handle,
- struct inode *dir,
- const char *name, int namelen,
- struct inode *inode, u64 blkno,
- struct buffer_head *parent_fe_bh,
- struct buffer_head *insert_bh)
-{
- unsigned long offset;
- unsigned short rec_len;
- struct ocfs2_dir_entry *de, *de1;
- struct super_block *sb;
- int retval, status;
-
- mlog_entry_void();
-
- sb = dir->i_sb;
-
- if (!namelen)
- return -EINVAL;
-
- rec_len = OCFS2_DIR_REC_LEN(namelen);
- offset = 0;
- de = (struct ocfs2_dir_entry *) insert_bh->b_data;
- while (1) {
- BUG_ON((char *)de >= sb->s_blocksize + insert_bh->b_data);
- /* These checks should've already been passed by the
- * prepare function, but I guess we can leave them
- * here anyway. */
- if (!ocfs2_check_dir_entry(dir, de, insert_bh, offset)) {
- retval = -ENOENT;
- goto bail;
- }
- if (ocfs2_match(namelen, name, de)) {
- retval = -EEXIST;
- goto bail;
- }
- if (((le64_to_cpu(de->inode) == 0) &&
- (le16_to_cpu(de->rec_len) >= rec_len)) ||
- (le16_to_cpu(de->rec_len) >=
- (OCFS2_DIR_REC_LEN(de->name_len) + rec_len))) {
- dir->i_mtime = dir->i_ctime = CURRENT_TIME;
- retval = ocfs2_mark_inode_dirty(handle, dir, parent_fe_bh);
- if (retval < 0) {
- mlog_errno(retval);
- goto bail;
- }
-
- status = ocfs2_journal_access(handle, dir, insert_bh,
- OCFS2_JOURNAL_ACCESS_WRITE);
- /* By now the buffer is marked for journaling */
- offset += le16_to_cpu(de->rec_len);
- if (le64_to_cpu(de->inode)) {
- de1 = (struct ocfs2_dir_entry *)((char *) de +
- OCFS2_DIR_REC_LEN(de->name_len));
- de1->rec_len =
- cpu_to_le16(le16_to_cpu(de->rec_len) -
- OCFS2_DIR_REC_LEN(de->name_len));
- de->rec_len = cpu_to_le16(OCFS2_DIR_REC_LEN(de->name_len));
- de = de1;
- }
- de->file_type = OCFS2_FT_UNKNOWN;
- if (blkno) {
- de->inode = cpu_to_le64(blkno);
- ocfs2_set_de_type(de, inode->i_mode);
- } else
- de->inode = 0;
- de->name_len = namelen;
- memcpy(de->name, name, namelen);
-
- dir->i_version++;
- status = ocfs2_journal_dirty(handle, insert_bh);
- retval = 0;
- goto bail;
- }
- offset += le16_to_cpu(de->rec_len);
- de = (struct ocfs2_dir_entry *) ((char *) de + le16_to_cpu(de->rec_len));
- }
-
- /* when you think about it, the assert above should prevent us
- * from ever getting here. */
- retval = -ENOSPC;
-bail:
-
- mlog_exit(retval);
- return retval;
-}
-
-
-/*
- * ocfs2_delete_entry deletes a directory entry by merging it with the
- * previous entry
- */
-static int ocfs2_delete_entry(handle_t *handle,
- struct inode *dir,
- struct ocfs2_dir_entry *de_del,
- struct buffer_head *bh)
-{
- struct ocfs2_dir_entry *de, *pde;
- int i, status = -ENOENT;
-
- mlog_entry("(0x%p, 0x%p, 0x%p, 0x%p)\n", handle, dir, de_del, bh);
-
- i = 0;
- pde = NULL;
- de = (struct ocfs2_dir_entry *) bh->b_data;
- while (i < bh->b_size) {
- if (!ocfs2_check_dir_entry(dir, de, bh, i)) {
- status = -EIO;
- mlog_errno(status);
- goto bail;
- }
- if (de == de_del) {
- status = ocfs2_journal_access(handle, dir, bh,
- OCFS2_JOURNAL_ACCESS_WRITE);
- if (status < 0) {
- status = -EIO;
- mlog_errno(status);
- goto bail;
- }
- if (pde)
- pde->rec_len =
- cpu_to_le16(le16_to_cpu(pde->rec_len) +
- le16_to_cpu(de->rec_len));
- else
- de->inode = 0;
- dir->i_version++;
- status = ocfs2_journal_dirty(handle, bh);
- goto bail;
- }
- i += le16_to_cpu(de->rec_len);
- pde = de;
- de = (struct ocfs2_dir_entry *)((char *)de + le16_to_cpu(de->rec_len));
- }
-bail:
- mlog_exit(status);
- return status;
-}
-
-/*
- * Returns 0 if not found, -1 on failure, and 1 on success
- */
-static int inline ocfs2_search_dirblock(struct buffer_head *bh,
- struct inode *dir,
- const char *name, int namelen,
- unsigned long offset,
- struct ocfs2_dir_entry **res_dir)
-{
- struct ocfs2_dir_entry *de;
- char *dlimit, *de_buf;
- int de_len;
- int ret = 0;
-
- mlog_entry_void();
-
- de_buf = bh->b_data;
- dlimit = de_buf + dir->i_sb->s_blocksize;
-
- while (de_buf < dlimit) {
- /* this code is executed quadratically often */
- /* do minimal checking `by hand' */
-
- de = (struct ocfs2_dir_entry *) de_buf;
-
- if (de_buf + namelen <= dlimit &&
- ocfs2_match(namelen, name, de)) {
- /* found a match - just to be sure, do a full check */
- if (!ocfs2_check_dir_entry(dir, de, bh, offset)) {
- ret = -1;
- goto bail;
- }
- *res_dir = de;
- ret = 1;
- goto bail;
- }
-
- /* prevent looping on a bad block */
- de_len = le16_to_cpu(de->rec_len);
- if (de_len <= 0) {
- ret = -1;
- goto bail;
- }
-
- de_buf += de_len;
- offset += de_len;
- }
-
-bail:
- mlog_exit(ret);
- return ret;
-}
-
-struct buffer_head *ocfs2_find_entry(const char *name, int namelen,
- struct inode *dir,
- struct ocfs2_dir_entry **res_dir)
-{
- struct super_block *sb;
- struct buffer_head *bh_use[NAMEI_RA_SIZE];
- struct buffer_head *bh, *ret = NULL;
- unsigned long start, block, b;
- int ra_max = 0; /* Number of bh's in the readahead
- buffer, bh_use[] */
- int ra_ptr = 0; /* Current index into readahead
- buffer */
- int num = 0;
- int nblocks, i, err;
-
- mlog_entry_void();
-
- *res_dir = NULL;
- sb = dir->i_sb;
-
- nblocks = i_size_read(dir) >> sb->s_blocksize_bits;
- start = OCFS2_I(dir)->ip_dir_start_lookup;
- if (start >= nblocks)
- start = 0;
- block = start;
-
-restart:
- do {
- /*
- * We deal with the read-ahead logic here.
- */
- if (ra_ptr >= ra_max) {
- /* Refill the readahead buffer */
- ra_ptr = 0;
- b = block;
- for (ra_max = 0; ra_max < NAMEI_RA_SIZE; ra_max++) {
- /*
- * Terminate if we reach the end of the
- * directory and must wrap, or if our
- * search has finished at this block.
- */
- if (b >= nblocks || (num && block == start)) {
- bh_use[ra_max] = NULL;
- break;
- }
- num++;
-
- bh = ocfs2_bread(dir, b++, &err, 1);
- bh_use[ra_max] = bh;
- }
- }
- if ((bh = bh_use[ra_ptr++]) == NULL)
- goto next;
- wait_on_buffer(bh);
- if (!buffer_uptodate(bh)) {
- /* read error, skip block & hope for the best */
- ocfs2_error(dir->i_sb, "reading directory %llu, "
- "offset %lu\n",
- (unsigned long long)OCFS2_I(dir)->ip_blkno,
- block);
- brelse(bh);
- goto next;
- }
- i = ocfs2_search_dirblock(bh, dir, name, namelen,
- block << sb->s_blocksize_bits,
- res_dir);
- if (i == 1) {
- OCFS2_I(dir)->ip_dir_start_lookup = block;
- ret = bh;
- goto cleanup_and_exit;
- } else {
- brelse(bh);
- if (i < 0)
- goto cleanup_and_exit;
- }
- next:
- if (++block >= nblocks)
- block = 0;
- } while (block != start);
-
- /*
- * If the directory has grown while we were searching, then
- * search the last part of the directory before giving up.
- */
- block = nblocks;
- nblocks = i_size_read(dir) >> sb->s_blocksize_bits;
- if (block < nblocks) {
- start = 0;
- goto restart;
- }
-
-cleanup_and_exit:
- /* Clean up the read-ahead blocks */
- for (; ra_ptr < ra_max; ra_ptr++)
- brelse(bh_use[ra_ptr]);
-
- mlog_exit_ptr(ret);
- return ret;
-}
-
static int ocfs2_blkno_stringify(u64 blkno, char *name)
{
int status, namelen;
diff --git a/fs/ocfs2/namei.h b/fs/ocfs2/namei.h
index 0975c7b..688aef6 100644
--- a/fs/ocfs2/namei.h
+++ b/fs/ocfs2/namei.h
@@ -30,29 +30,10 @@ extern const struct inode_operations ocfs2_dir_iops;
struct dentry *ocfs2_get_parent(struct dentry *child);
-int ocfs2_check_dir_entry (struct inode *dir,
- struct ocfs2_dir_entry *de,
- struct buffer_head *bh,
- unsigned long offset);
-struct buffer_head *ocfs2_find_entry(const char *name,
- int namelen,
- struct inode *dir,
- struct ocfs2_dir_entry **res_dir);
int ocfs2_orphan_del(struct ocfs2_super *osb,
handle_t *handle,
struct inode *orphan_dir_inode,
struct inode *inode,
struct buffer_head *orphan_dir_bh);
-static inline int ocfs2_match(int len,
- const char * const name,
- struct ocfs2_dir_entry *de)
-{
- if (len != de->name_len)
- return 0;
- if (!de->inode)
- return 0;
- return !memcmp(name, de->name, len);
-}
-
#endif /* OCFS2_NAMEI_H */
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index 5cc90a4..60a23e1 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -319,6 +319,13 @@ static inline int ocfs2_writes_unwritten_extents(struct ocfs2_super *osb)
return 0;
}
+static inline int ocfs2_supports_inline_data(struct ocfs2_super *osb)
+{
+ if (osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_INLINE_DATA)
+ return 1;
+ return 0;
+}
+
/* set / clear functions because cluster events can make these happen
* in parallel so we want the transitions to be atomic. this also
* means that any future flags osb_flags must be protected by spinlock
@@ -494,16 +501,16 @@ static inline unsigned int ocfs2_page_index_to_clusters(struct super_block *sb,
/*
* Find the 1st page index which covers the given clusters.
*/
-static inline unsigned long ocfs2_align_clusters_to_page_index(struct super_block *sb,
+static inline pgoff_t ocfs2_align_clusters_to_page_index(struct super_block *sb,
u32 clusters)
{
unsigned int cbits = OCFS2_SB(sb)->s_clustersize_bits;
- unsigned long index = clusters;
+ pgoff_t index = clusters;
if (PAGE_CACHE_SHIFT > cbits) {
- index = clusters >> (PAGE_CACHE_SHIFT - cbits);
+ index = (pgoff_t)clusters >> (PAGE_CACHE_SHIFT - cbits);
} else if (PAGE_CACHE_SHIFT < cbits) {
- index = clusters << (cbits - PAGE_CACHE_SHIFT);
+ index = (pgoff_t)clusters << (cbits - PAGE_CACHE_SHIFT);
}
return index;
diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h
index 82f8a75..6ef8767 100644
--- a/fs/ocfs2/ocfs2_fs.h
+++ b/fs/ocfs2/ocfs2_fs.h
@@ -87,7 +87,8 @@
#define OCFS2_FEATURE_COMPAT_SUPP OCFS2_FEATURE_COMPAT_BACKUP_SB
#define OCFS2_FEATURE_INCOMPAT_SUPP (OCFS2_FEATURE_INCOMPAT_LOCAL_MOUNT \
- | OCFS2_FEATURE_INCOMPAT_SPARSE_ALLOC)
+ | OCFS2_FEATURE_INCOMPAT_SPARSE_ALLOC \
+ | OCFS2_FEATURE_INCOMPAT_INLINE_DATA)
#define OCFS2_FEATURE_RO_COMPAT_SUPP OCFS2_FEATURE_RO_COMPAT_UNWRITTEN
/*
@@ -111,6 +112,20 @@
#define OCFS2_FEATURE_INCOMPAT_SPARSE_ALLOC 0x0010
/*
+ * Tunefs sets this incompat flag before starting an operation which
+ * would require cleanup on abort. This is done to protect users from
+ * inadvertently mounting the fs after an aborted run without
+ * fsck-ing.
+ *
+ * s_tunefs_flags on the super block describes precisely which
+ * operations were in progress.
+ */
+#define OCFS2_FEATURE_INCOMPAT_TUNEFS_INPROG 0x0020
+
+/* Support for data packed into inode blocks */
+#define OCFS2_FEATURE_INCOMPAT_INLINE_DATA 0x0040
+
+/*
* backup superblock flag is used to indicate that this volume
* has backup superblocks.
*/
@@ -130,6 +145,11 @@
#define OCFS2_MAX_BACKUP_SUPERBLOCKS 6
/*
+ * Flags on ocfs2_super_block.s_tunefs_flags
+ */
+#define OCFS2_TUNEFS_INPROG_REMOVE_SLOT 0x0001 /* Removing slots */
+
+/*
* Flags on ocfs2_dinode.i_flags
*/
#define OCFS2_VALID_FL (0x00000001) /* Inode is valid */
@@ -146,6 +166,17 @@
#define OCFS2_CHAIN_FL (0x00000400) /* Chain allocator */
#define OCFS2_DEALLOC_FL (0x00000800) /* Truncate log */
+/*
+ * Flags on ocfs2_dinode.i_dyn_features
+ *
+ * These can change much more often than i_flags. When adding flags,
+ * keep in mind that i_dyn_features is only 16 bits wide.
+ */
+#define OCFS2_INLINE_DATA_FL (0x0001) /* Data stored in inode block */
+#define OCFS2_HAS_XATTR_FL (0x0002)
+#define OCFS2_INLINE_XATTR_FL (0x0004)
+#define OCFS2_INDEXED_DIR_FL (0x0008)
+
/* Inode attributes, keep in sync with EXT2 */
#define OCFS2_SECRM_FL (0x00000001) /* Secure deletion */
#define OCFS2_UNRM_FL (0x00000002) /* Undelete */
@@ -447,8 +478,8 @@ struct ocfs2_super_block {
__le32 s_clustersize_bits; /* Clustersize for this fs */
/*40*/ __le16 s_max_slots; /* Max number of simultaneous mounts
before tunefs required */
- __le16 s_reserved1;
- __le32 s_reserved2;
+ __le16 s_tunefs_flag;
+ __le32 s_reserved1;
__le64 s_first_cluster_group; /* Block offset of 1st cluster
* group header */
/*50*/ __u8 s_label[OCFS2_MAX_VOL_LABEL_LEN]; /* Label for mounting, etc. */
@@ -471,6 +502,19 @@ struct ocfs2_local_alloc
};
/*
+ * Data-in-inode header. This is only used if i_dyn_features has
+ * OCFS2_INLINE_DATA_FL set.
+ */
+struct ocfs2_inline_data
+{
+/*00*/ __le16 id_count; /* Number of bytes that can be used
+ * for data, starting at id_data */
+ __le16 id_reserved0;
+ __le32 id_reserved1;
+ __u8 id_data[0]; /* Start of user data */
+};
+
+/*
* On disk inode for OCFS2
*/
struct ocfs2_dinode {
@@ -502,7 +546,7 @@ struct ocfs2_dinode {
__le32 i_attr;
__le16 i_orphaned_slot; /* Only valid when OCFS2_ORPHANED_FL
was set in i_flags */
- __le16 i_reserved1;
+ __le16 i_dyn_features;
/*70*/ __le64 i_reserved2[8];
/*B8*/ union {
__le64 i_pad1; /* Generic way to refer to this
@@ -528,6 +572,7 @@ struct ocfs2_dinode {
struct ocfs2_chain_list i_chain;
struct ocfs2_extent_list i_list;
struct ocfs2_truncate_log i_dealloc;
+ struct ocfs2_inline_data i_data;
__u8 i_symlink[0];
} id2;
/* Actual on-disk size is one block */
@@ -577,6 +622,12 @@ static inline int ocfs2_fast_symlink_chars(struct super_block *sb)
offsetof(struct ocfs2_dinode, id2.i_symlink);
}
+static inline int ocfs2_max_inline_data(struct super_block *sb)
+{
+ return sb->s_blocksize -
+ offsetof(struct ocfs2_dinode, id2.i_data.id_data);
+}
+
static inline int ocfs2_extent_recs_per_inode(struct super_block *sb)
{
int size;
@@ -656,6 +707,11 @@ static inline int ocfs2_fast_symlink_chars(int blocksize)
return blocksize - offsetof(struct ocfs2_dinode, id2.i_symlink);
}
+static inline int ocfs2_max_inline_data(int blocksize)
+{
+ return blocksize - offsetof(struct ocfs2_dinode, id2.i_data.id_data);
+}
+
static inline int ocfs2_extent_recs_per_inode(int blocksize)
{
int size;
diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
index d9c5c9f..8f09f52 100644
--- a/fs/ocfs2/suballoc.c
+++ b/fs/ocfs2/suballoc.c
@@ -1486,21 +1486,21 @@ static inline void ocfs2_block_to_cluster_group(struct inode *inode,
* contig. allocation, set to '1' to indicate we can deal with extents
* of any size.
*/
-int ocfs2_claim_clusters(struct ocfs2_super *osb,
- handle_t *handle,
- struct ocfs2_alloc_context *ac,
- u32 min_clusters,
- u32 *cluster_start,
- u32 *num_clusters)
+int __ocfs2_claim_clusters(struct ocfs2_super *osb,
+ handle_t *handle,
+ struct ocfs2_alloc_context *ac,
+ u32 min_clusters,
+ u32 max_clusters,
+ u32 *cluster_start,
+ u32 *num_clusters)
{
int status;
- unsigned int bits_wanted = ac->ac_bits_wanted - ac->ac_bits_given;
+ unsigned int bits_wanted = max_clusters;
u64 bg_blkno = 0;
u16 bg_bit_off;
mlog_entry_void();
- BUG_ON(!ac);
BUG_ON(ac->ac_bits_given >= ac->ac_bits_wanted);
BUG_ON(ac->ac_which != OCFS2_AC_USE_LOCAL
@@ -1557,6 +1557,19 @@ bail:
return status;
}
+int ocfs2_claim_clusters(struct ocfs2_super *osb,
+ handle_t *handle,
+ struct ocfs2_alloc_context *ac,
+ u32 min_clusters,
+ u32 *cluster_start,
+ u32 *num_clusters)
+{
+ unsigned int bits_wanted = ac->ac_bits_wanted - ac->ac_bits_given;
+
+ return __ocfs2_claim_clusters(osb, handle, ac, min_clusters,
+ bits_wanted, cluster_start, num_clusters);
+}
+
static inline int ocfs2_block_group_clear_bits(handle_t *handle,
struct inode *alloc_inode,
struct ocfs2_group_desc *bg,
diff --git a/fs/ocfs2/suballoc.h b/fs/ocfs2/suballoc.h
index f212dc0..cafe937 100644
--- a/fs/ocfs2/suballoc.h
+++ b/fs/ocfs2/suballoc.h
@@ -85,6 +85,17 @@ int ocfs2_claim_clusters(struct ocfs2_super *osb,
u32 min_clusters,
u32 *cluster_start,
u32 *num_clusters);
+/*
+ * Use this variant of ocfs2_claim_clusters to specify a maxiumum
+ * number of clusters smaller than the allocation reserved.
+ */
+int __ocfs2_claim_clusters(struct ocfs2_super *osb,
+ handle_t *handle,
+ struct ocfs2_alloc_context *ac,
+ u32 min_clusters,
+ u32 max_clusters,
+ u32 *cluster_start,
+ u32 *num_clusters);
int ocfs2_free_suballoc_bits(handle_t *handle,
struct inode *alloc_inode,
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 3a5a1ed..0e2a1b4 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -39,6 +39,7 @@
#include <linux/parser.h>
#include <linux/crc32.h>
#include <linux/debugfs.h>
+#include <linux/mount.h>
#include <cluster/nodemanager.h>
@@ -81,9 +82,17 @@ static struct dentry *ocfs2_debugfs_root = NULL;
MODULE_AUTHOR("Oracle");
MODULE_LICENSE("GPL");
+struct mount_options
+{
+ unsigned long mount_opt;
+ unsigned int atime_quantum;
+ signed short slot;
+};
+
static int ocfs2_parse_options(struct super_block *sb, char *options,
- unsigned long *mount_opt, s16 *slot,
+ struct mount_options *mopt,
int is_remount);
+static int ocfs2_show_options(struct seq_file *s, struct vfsmount *mnt);
static void ocfs2_put_super(struct super_block *sb);
static int ocfs2_mount_volume(struct super_block *sb);
static int ocfs2_remount(struct super_block *sb, int *flags, char *data);
@@ -98,7 +107,7 @@ static int ocfs2_sync_fs(struct super_block *sb, int wait);
static int ocfs2_init_global_system_inodes(struct ocfs2_super *osb);
static int ocfs2_init_local_system_inodes(struct ocfs2_super *osb);
-static int ocfs2_release_system_inodes(struct ocfs2_super *osb);
+static void ocfs2_release_system_inodes(struct ocfs2_super *osb);
static int ocfs2_fill_local_node_info(struct ocfs2_super *osb);
static int ocfs2_check_volume(struct ocfs2_super *osb);
static int ocfs2_verify_volume(struct ocfs2_dinode *di,
@@ -126,6 +135,7 @@ static const struct super_operations ocfs2_sops = {
.write_super = ocfs2_write_super,
.put_super = ocfs2_put_super,
.remount_fs = ocfs2_remount,
+ .show_options = ocfs2_show_options,
};
enum {
@@ -170,7 +180,7 @@ static void ocfs2_write_super(struct super_block *sb)
static int ocfs2_sync_fs(struct super_block *sb, int wait)
{
- int status = 0;
+ int status;
tid_t target;
struct ocfs2_super *osb = OCFS2_SB(sb);
@@ -268,9 +278,9 @@ bail:
return status;
}
-static int ocfs2_release_system_inodes(struct ocfs2_super *osb)
+static void ocfs2_release_system_inodes(struct ocfs2_super *osb)
{
- int status = 0, i;
+ int i;
struct inode *inode;
mlog_entry_void();
@@ -295,8 +305,7 @@ static int ocfs2_release_system_inodes(struct ocfs2_super *osb)
osb->root_inode = NULL;
}
- mlog_exit(status);
- return status;
+ mlog_exit(0);
}
/* We're allocating fs objects, use GFP_NOFS */
@@ -316,63 +325,74 @@ static void ocfs2_destroy_inode(struct inode *inode)
kmem_cache_free(ocfs2_inode_cachep, OCFS2_I(inode));
}
-/* From xfs_super.c:xfs_max_file_offset
- * Copyright (c) 2000-2004 Silicon Graphics, Inc.
- */
-unsigned long long ocfs2_max_file_offset(unsigned int blockshift)
+static unsigned long long ocfs2_max_file_offset(unsigned int bbits,
+ unsigned int cbits)
{
- unsigned int pagefactor = 1;
- unsigned int bitshift = BITS_PER_LONG - 1;
-
- /* Figure out maximum filesize, on Linux this can depend on
- * the filesystem blocksize (on 32 bit platforms).
- * __block_prepare_write does this in an [unsigned] long...
- * page->index << (PAGE_CACHE_SHIFT - bbits)
- * So, for page sized blocks (4K on 32 bit platforms),
- * this wraps at around 8Tb (hence MAX_LFS_FILESIZE which is
- * (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1)
- * but for smaller blocksizes it is less (bbits = log2 bsize).
- * Note1: get_block_t takes a long (implicit cast from above)
- * Note2: The Large Block Device (LBD and HAVE_SECTOR_T) patch
- * can optionally convert the [unsigned] long from above into
- * an [unsigned] long long.
+ unsigned int bytes = 1 << cbits;
+ unsigned int trim = bytes;
+ unsigned int bitshift = 32;
+
+ /*
+ * i_size and all block offsets in ocfs2 are always 64 bits
+ * wide. i_clusters is 32 bits, in cluster-sized units. So on
+ * 64 bit platforms, cluster size will be the limiting factor.
*/
#if BITS_PER_LONG == 32
# if defined(CONFIG_LBD)
BUILD_BUG_ON(sizeof(sector_t) != 8);
- pagefactor = PAGE_CACHE_SIZE;
- bitshift = BITS_PER_LONG;
+ /*
+ * We might be limited by page cache size.
+ */
+ if (bytes > PAGE_CACHE_SIZE) {
+ bytes = PAGE_CACHE_SIZE;
+ trim = 1;
+ /*
+ * Shift by 31 here so that we don't get larger than
+ * MAX_LFS_FILESIZE
+ */
+ bitshift = 31;
+ }
# else
- pagefactor = PAGE_CACHE_SIZE >> (PAGE_CACHE_SHIFT - blockshift);
+ /*
+ * We are limited by the size of sector_t. Use block size, as
+ * that's what we expose to the VFS.
+ */
+ bytes = 1 << bbits;
+ trim = 1;
+ bitshift = 31;
# endif
#endif
- return (((unsigned long long)pagefactor) << bitshift) - 1;
+ /*
+ * Trim by a whole cluster when we can actually approach the
+ * on-disk limits. Otherwise we can overflow i_clusters when
+ * an extent start is at the max offset.
+ */
+ return (((unsigned long long)bytes) << bitshift) - trim;
}
static int ocfs2_remount(struct super_block *sb, int *flags, char *data)
{
int incompat_features;
int ret = 0;
- unsigned long parsed_options;
- s16 slot;
+ struct mount_options parsed_options;
struct ocfs2_super *osb = OCFS2_SB(sb);
- if (!ocfs2_parse_options(sb, data, &parsed_options, &slot, 1)) {
+ if (!ocfs2_parse_options(sb, data, &parsed_options, 1)) {
ret = -EINVAL;
goto out;
}
if ((osb->s_mount_opt & OCFS2_MOUNT_HB_LOCAL) !=
- (parsed_options & OCFS2_MOUNT_HB_LOCAL)) {
+ (parsed_options.mount_opt & OCFS2_MOUNT_HB_LOCAL)) {
ret = -EINVAL;
mlog(ML_ERROR, "Cannot change heartbeat mode on remount\n");
goto out;
}
if ((osb->s_mount_opt & OCFS2_MOUNT_DATA_WRITEBACK) !=
- (parsed_options & OCFS2_MOUNT_DATA_WRITEBACK)) {
+ (parsed_options.mount_opt & OCFS2_MOUNT_DATA_WRITEBACK)) {
ret = -EINVAL;
mlog(ML_ERROR, "Cannot change data mode on remount\n");
goto out;
@@ -423,7 +443,9 @@ unlock_osb:
/* Only save off the new mount options in case of a successful
* remount. */
- osb->s_mount_opt = parsed_options;
+ osb->s_mount_opt = parsed_options.mount_opt;
+ osb->s_atime_quantum = parsed_options.atime_quantum;
+ osb->preferred_slot = parsed_options.slot;
}
out:
return ret;
@@ -433,7 +455,7 @@ static int ocfs2_sb_probe(struct super_block *sb,
struct buffer_head **bh,
int *sector_size)
{
- int status = 0, tmpstat;
+ int status, tmpstat;
struct ocfs1_vol_disk_hdr *hdr;
struct ocfs2_dinode *di;
int blksize;
@@ -535,8 +557,7 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
{
struct dentry *root;
int status, sector_size;
- unsigned long parsed_opt;
- s16 slot;
+ struct mount_options parsed_options;
struct inode *inode = NULL;
struct ocfs2_super *osb = NULL;
struct buffer_head *bh = NULL;
@@ -544,14 +565,14 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
mlog_entry("%p, %p, %i", sb, data, silent);
- if (!ocfs2_parse_options(sb, data, &parsed_opt, &slot, 0)) {
+ if (!ocfs2_parse_options(sb, data, &parsed_options, 0)) {
status = -EINVAL;
goto read_super_error;
}
/* for now we only have one cluster/node, make sure we see it
* in the heartbeat universe */
- if (parsed_opt & OCFS2_MOUNT_HB_LOCAL) {
+ if (parsed_options.mount_opt & OCFS2_MOUNT_HB_LOCAL) {
if (!o2hb_check_local_node_heartbeating()) {
status = -EINVAL;
goto read_super_error;
@@ -573,8 +594,9 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
}
brelse(bh);
bh = NULL;
- osb->s_mount_opt = parsed_opt;
- osb->preferred_slot = slot;
+ osb->s_mount_opt = parsed_options.mount_opt;
+ osb->s_atime_quantum = parsed_options.atime_quantum;
+ osb->preferred_slot = parsed_options.slot;
sb->s_magic = OCFS2_SUPER_MAGIC;
@@ -716,8 +738,7 @@ static struct file_system_type ocfs2_fs_type = {
static int ocfs2_parse_options(struct super_block *sb,
char *options,
- unsigned long *mount_opt,
- s16 *slot,
+ struct mount_options *mopt,
int is_remount)
{
int status;
@@ -726,8 +747,9 @@ static int ocfs2_parse_options(struct super_block *sb,
mlog_entry("remount: %d, options: \"%s\"\n", is_remount,
options ? options : "(none)");
- *mount_opt = 0;
- *slot = OCFS2_INVALID_SLOT;
+ mopt->mount_opt = 0;
+ mopt->atime_quantum = OCFS2_DEFAULT_ATIME_QUANTUM;
+ mopt->slot = OCFS2_INVALID_SLOT;
if (!options) {
status = 1;
@@ -737,7 +759,6 @@ static int ocfs2_parse_options(struct super_block *sb,
while ((p = strsep(&options, ",")) != NULL) {
int token, option;
substring_t args[MAX_OPT_ARGS];
- struct ocfs2_super * osb = OCFS2_SB(sb);
if (!*p)
continue;
@@ -745,10 +766,10 @@ static int ocfs2_parse_options(struct super_block *sb,
token = match_token(p, tokens, args);
switch (token) {
case Opt_hb_local:
- *mount_opt |= OCFS2_MOUNT_HB_LOCAL;
+ mopt->mount_opt |= OCFS2_MOUNT_HB_LOCAL;
break;
case Opt_hb_none:
- *mount_opt &= ~OCFS2_MOUNT_HB_LOCAL;
+ mopt->mount_opt &= ~OCFS2_MOUNT_HB_LOCAL;
break;
case Opt_barrier:
if (match_int(&args[0], &option)) {
@@ -756,27 +777,27 @@ static int ocfs2_parse_options(struct super_block *sb,
goto bail;
}
if (option)
- *mount_opt |= OCFS2_MOUNT_BARRIER;
+ mopt->mount_opt |= OCFS2_MOUNT_BARRIER;
else
- *mount_opt &= ~OCFS2_MOUNT_BARRIER;
+ mopt->mount_opt &= ~OCFS2_MOUNT_BARRIER;
break;
case Opt_intr:
- *mount_opt &= ~OCFS2_MOUNT_NOINTR;
+ mopt->mount_opt &= ~OCFS2_MOUNT_NOINTR;
break;
case Opt_nointr:
- *mount_opt |= OCFS2_MOUNT_NOINTR;
+ mopt->mount_opt |= OCFS2_MOUNT_NOINTR;
break;
case Opt_err_panic:
- *mount_opt |= OCFS2_MOUNT_ERRORS_PANIC;
+ mopt->mount_opt |= OCFS2_MOUNT_ERRORS_PANIC;
break;
case Opt_err_ro:
- *mount_opt &= ~OCFS2_MOUNT_ERRORS_PANIC;
+ mopt->mount_opt &= ~OCFS2_MOUNT_ERRORS_PANIC;
break;
case Opt_data_ordered:
- *mount_opt &= ~OCFS2_MOUNT_DATA_WRITEBACK;
+ mopt->mount_opt &= ~OCFS2_MOUNT_DATA_WRITEBACK;
break;
case Opt_data_writeback:
- *mount_opt |= OCFS2_MOUNT_DATA_WRITEBACK;
+ mopt->mount_opt |= OCFS2_MOUNT_DATA_WRITEBACK;
break;
case Opt_atime_quantum:
if (match_int(&args[0], &option)) {
@@ -784,9 +805,7 @@ static int ocfs2_parse_options(struct super_block *sb,
goto bail;
}
if (option >= 0)
- osb->s_atime_quantum = option;
- else
- osb->s_atime_quantum = OCFS2_DEFAULT_ATIME_QUANTUM;
+ mopt->atime_quantum = option;
break;
case Opt_slot:
option = 0;
@@ -795,7 +814,7 @@ static int ocfs2_parse_options(struct super_block *sb,
goto bail;
}
if (option)
- *slot = (s16)option;
+ mopt->slot = (s16)option;
break;
default:
mlog(ML_ERROR,
@@ -813,6 +832,41 @@ bail:
return status;
}
+static int ocfs2_show_options(struct seq_file *s, struct vfsmount *mnt)
+{
+ struct ocfs2_super *osb = OCFS2_SB(mnt->mnt_sb);
+ unsigned long opts = osb->s_mount_opt;
+
+ if (opts & OCFS2_MOUNT_HB_LOCAL)
+ seq_printf(s, ",_netdev,heartbeat=local");
+ else
+ seq_printf(s, ",heartbeat=none");
+
+ if (opts & OCFS2_MOUNT_NOINTR)
+ seq_printf(s, ",nointr");
+
+ if (opts & OCFS2_MOUNT_DATA_WRITEBACK)
+ seq_printf(s, ",data=writeback");
+ else
+ seq_printf(s, ",data=ordered");
+
+ if (opts & OCFS2_MOUNT_BARRIER)
+ seq_printf(s, ",barrier=1");
+
+ if (opts & OCFS2_MOUNT_ERRORS_PANIC)
+ seq_printf(s, ",errors=panic");
+ else
+ seq_printf(s, ",errors=remount-ro");
+
+ if (osb->preferred_slot != OCFS2_INVALID_SLOT)
+ seq_printf(s, ",preferred_slot=%d", osb->preferred_slot);
+
+ if (osb->s_atime_quantum != OCFS2_DEFAULT_ATIME_QUANTUM)
+ seq_printf(s, ",atime_quantum=%u", osb->s_atime_quantum);
+
+ return 0;
+}
+
static int __init ocfs2_init(void)
{
int status;
@@ -984,7 +1038,7 @@ static int ocfs2_initialize_mem_caches(void)
0,
(SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD),
- ocfs2_inode_init_once, NULL);
+ ocfs2_inode_init_once);
if (!ocfs2_inode_cachep)
return -ENOMEM;
@@ -1192,12 +1246,13 @@ static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err)
tmp = ocfs2_request_umount_vote(osb);
if (tmp < 0)
mlog_errno(tmp);
+ }
- if (osb->slot_num != OCFS2_INVALID_SLOT)
- ocfs2_put_slot(osb);
+ if (osb->slot_num != OCFS2_INVALID_SLOT)
+ ocfs2_put_slot(osb);
+ if (osb->dlm)
ocfs2_super_unlock(osb, 1);
- }
ocfs2_release_system_inodes(osb);
@@ -1258,9 +1313,9 @@ static int ocfs2_initialize_super(struct super_block *sb,
struct buffer_head *bh,
int sector_size)
{
- int status = 0;
- int i;
- struct ocfs2_dinode *di = NULL;
+ int status;
+ int i, cbits, bbits;
+ struct ocfs2_dinode *di = (struct ocfs2_dinode *)bh->b_data;
struct inode *inode = NULL;
struct buffer_head *bitmap_bh = NULL;
struct ocfs2_journal *journal;
@@ -1279,9 +1334,12 @@ static int ocfs2_initialize_super(struct super_block *sb,
sb->s_fs_info = osb;
sb->s_op = &ocfs2_sops;
sb->s_export_op = &ocfs2_export_ops;
+ sb->s_time_gran = 1;
sb->s_flags |= MS_NOATIME;
/* this is needed to support O_LARGEFILE */
- sb->s_maxbytes = ocfs2_max_file_offset(sb->s_blocksize_bits);
+ cbits = le32_to_cpu(di->id2.i_super.s_clustersize_bits);
+ bbits = le32_to_cpu(di->id2.i_super.s_blocksize_bits);
+ sb->s_maxbytes = ocfs2_max_file_offset(bbits, cbits);
osb->sb = sb;
/* Save off for ocfs2_rw_direct */
@@ -1341,8 +1399,6 @@ static int ocfs2_initialize_super(struct super_block *sb,
goto bail;
}
- di = (struct ocfs2_dinode *)bh->b_data;
-
osb->max_slots = le16_to_cpu(di->id2.i_super.s_max_slots);
if (osb->max_slots > OCFS2_MAX_SLOTS || osb->max_slots == 0) {
mlog(ML_ERROR, "Invalid number of node slots (%u)\n",
@@ -1578,7 +1634,7 @@ static int ocfs2_verify_volume(struct ocfs2_dinode *di,
static int ocfs2_check_volume(struct ocfs2_super *osb)
{
- int status = 0;
+ int status;
int dirty;
int local;
struct ocfs2_dinode *local_alloc = NULL; /* only used if we
diff --git a/fs/ocfs2/super.h b/fs/ocfs2/super.h
index 3b9cb3d..783f527 100644
--- a/fs/ocfs2/super.h
+++ b/fs/ocfs2/super.h
@@ -45,6 +45,4 @@ void __ocfs2_abort(struct super_block *sb,
#define ocfs2_abort(sb, fmt, args...) __ocfs2_abort(sb, __PRETTY_FUNCTION__, fmt, ##args)
-unsigned long long ocfs2_max_file_offset(unsigned int blockshift);
-
#endif /* OCFS2_SUPER_H */
diff --git a/fs/ocfs2/sysfile.c b/fs/ocfs2/sysfile.c
index 5df6e35..fd2e846 100644
--- a/fs/ocfs2/sysfile.c
+++ b/fs/ocfs2/sysfile.c
@@ -100,17 +100,14 @@ static struct inode * _ocfs2_get_system_file_inode(struct ocfs2_super *osb,
char namebuf[40];
struct inode *inode = NULL;
u64 blkno;
- struct buffer_head *dirent_bh = NULL;
- struct ocfs2_dir_entry *de = NULL;
int status = 0;
ocfs2_sprintf_system_inode_name(namebuf,
sizeof(namebuf),
type, slot);
- status = ocfs2_find_files_on_disk(namebuf, strlen(namebuf),
- &blkno, osb->sys_root_inode,
- &dirent_bh, &de);
+ status = ocfs2_lookup_ino_from_name(osb->sys_root_inode, namebuf,
+ strlen(namebuf), &blkno);
if (status < 0) {
goto bail;
}
@@ -122,8 +119,7 @@ static struct inode * _ocfs2_get_system_file_inode(struct ocfs2_super *osb,
goto bail;
}
bail:
- if (dirent_bh)
- brelse(dirent_bh);
+
return inode;
}
diff --git a/fs/ocfs2/uptodate.c b/fs/ocfs2/uptodate.c
index 39814b9..4da8851 100644
--- a/fs/ocfs2/uptodate.c
+++ b/fs/ocfs2/uptodate.c
@@ -548,7 +548,7 @@ int __init init_ocfs2_uptodate_cache(void)
{
ocfs2_uptodate_cachep = kmem_cache_create("ocfs2_uptodate",
sizeof(struct ocfs2_meta_cache_item),
- 0, SLAB_HWCACHE_ALIGN, NULL, NULL);
+ 0, SLAB_HWCACHE_ALIGN, NULL);
if (!ocfs2_uptodate_cachep)
return -ENOMEM;
diff --git a/fs/ocfs2/vote.c b/fs/ocfs2/vote.c
index 66a13ee..c053585 100644
--- a/fs/ocfs2/vote.c
+++ b/fs/ocfs2/vote.c
@@ -66,7 +66,7 @@ struct ocfs2_vote_msg
{
struct ocfs2_msg_hdr v_hdr;
__be32 v_reserved1;
-};
+} __attribute__ ((packed));
/* Responses are given these values to maintain backwards
* compatibility with older ocfs2 versions */
@@ -78,7 +78,7 @@ struct ocfs2_response_msg
{
struct ocfs2_msg_hdr r_hdr;
__be32 r_response;
-};
+} __attribute__ ((packed));
struct ocfs2_vote_work {
struct list_head w_list;
diff --git a/fs/open.c b/fs/open.c
index be6a457..1d9e5e9 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -26,6 +26,7 @@
#include <linux/syscalls.h>
#include <linux/rcupdate.h>
#include <linux/audit.h>
+#include <linux/falloc.h>
int vfs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
@@ -255,24 +256,26 @@ static long do_sys_truncate(const char __user * path, loff_t length)
if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
goto dput_and_out;
- /*
- * Make sure that there are no leases.
- */
- error = break_lease(inode, FMODE_WRITE);
+ error = get_write_access(inode);
if (error)
goto dput_and_out;
- error = get_write_access(inode);
+ /*
+ * Make sure that there are no leases. get_write_access() protects
+ * against the truncate racing with a lease-granting setlease().
+ */
+ error = break_lease(inode, FMODE_WRITE);
if (error)
- goto dput_and_out;
+ goto put_write_and_out;
error = locks_verify_truncate(inode, NULL, length);
if (!error) {
DQUOT_INIT(inode);
error = do_truncate(nd.dentry, length, 0, NULL);
}
- put_write_access(inode);
+put_write_and_out:
+ put_write_access(inode);
dput_and_out:
path_release(&nd);
out:
@@ -352,6 +355,64 @@ asmlinkage long sys_ftruncate64(unsigned int fd, loff_t length)
}
#endif
+asmlinkage long sys_fallocate(int fd, int mode, loff_t offset, loff_t len)
+{
+ struct file *file;
+ struct inode *inode;
+ long ret = -EINVAL;
+
+ if (offset < 0 || len <= 0)
+ goto out;
+
+ /* Return error if mode is not supported */
+ ret = -EOPNOTSUPP;
+ if (mode && !(mode & FALLOC_FL_KEEP_SIZE))
+ goto out;
+
+ ret = -EBADF;
+ file = fget(fd);
+ if (!file)
+ goto out;
+ if (!(file->f_mode & FMODE_WRITE))
+ goto out_fput;
+ /*
+ * Revalidate the write permissions, in case security policy has
+ * changed since the files were opened.
+ */
+ ret = security_file_permission(file, MAY_WRITE);
+ if (ret)
+ goto out_fput;
+
+ inode = file->f_path.dentry->d_inode;
+
+ ret = -ESPIPE;
+ if (S_ISFIFO(inode->i_mode))
+ goto out_fput;
+
+ ret = -ENODEV;
+ /*
+ * Let individual file system decide if it supports preallocation
+ * for directories or not.
+ */
+ if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode))
+ goto out_fput;
+
+ ret = -EFBIG;
+ /* Check for wrap through zero too */
+ if (((offset + len) > inode->i_sb->s_maxbytes) || ((offset + len) < 0))
+ goto out_fput;
+
+ if (inode->i_op && inode->i_op->fallocate)
+ ret = inode->i_op->fallocate(inode, mode, offset, len);
+ else
+ ret = -EOPNOTSUPP;
+
+out_fput:
+ fput(file);
+out:
+ return ret;
+}
+
/*
* access() needs to use the real uid/gid, not the effective uid/gid.
* We do this by temporarily clearing all FS-related capabilities and
diff --git a/fs/openpromfs/inode.c b/fs/openpromfs/inode.c
index e623973..dd86be2 100644
--- a/fs/openpromfs/inode.c
+++ b/fs/openpromfs/inode.c
@@ -431,7 +431,7 @@ static int __init init_openprom_fs(void)
0,
(SLAB_RECLAIM_ACCOUNT |
SLAB_MEM_SPREAD),
- op_inode_init_once, NULL);
+ op_inode_init_once);
if (!op_inode_cachep)
return -ENOMEM;
diff --git a/fs/partitions/check.c b/fs/partitions/check.c
index 98e0b85..722e12e 100644
--- a/fs/partitions/check.c
+++ b/fs/partitions/check.c
@@ -372,20 +372,21 @@ void add_partition(struct gendisk *disk, int part, sector_t start, sector_t len,
{
struct hd_struct *p;
- p = kmalloc(sizeof(*p), GFP_KERNEL);
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
if (!p)
return;
- memset(p, 0, sizeof(*p));
p->start_sect = start;
p->nr_sects = len;
p->partno = part;
p->policy = disk->policy;
- if (isdigit(disk->kobj.name[strlen(disk->kobj.name)-1]))
- snprintf(p->kobj.name,KOBJ_NAME_LEN,"%sp%d",disk->kobj.name,part);
+ if (isdigit(disk->kobj.k_name[strlen(disk->kobj.k_name)-1]))
+ kobject_set_name(&p->kobj, "%sp%d",
+ kobject_name(&disk->kobj), part);
else
- snprintf(p->kobj.name,KOBJ_NAME_LEN,"%s%d",disk->kobj.name,part);
+ kobject_set_name(&p->kobj, "%s%d",
+ kobject_name(&disk->kobj),part);
p->kobj.parent = &disk->kobj;
p->kobj.ktype = &ktype_part;
kobject_init(&p->kobj);
@@ -478,9 +479,9 @@ void register_disk(struct gendisk *disk)
struct hd_struct *p;
int err;
- strlcpy(disk->kobj.name,disk->disk_name,KOBJ_NAME_LEN);
+ kobject_set_name(&disk->kobj, "%s", disk->disk_name);
/* ewww... some of these buggers have / in name... */
- s = strchr(disk->kobj.name, '/');
+ s = strchr(disk->kobj.k_name, '/');
if (s)
*s = '!';
if ((err = kobject_add(&disk->kobj)))
diff --git a/fs/partitions/msdos.c b/fs/partitions/msdos.c
index 4ccec4c..5567ec0 100644
--- a/fs/partitions/msdos.c
+++ b/fs/partitions/msdos.c
@@ -203,6 +203,7 @@ parse_solaris_x86(struct parsed_partitions *state, struct block_device *bdev,
Sector sect;
struct solaris_x86_vtoc *v;
int i;
+ short max_nparts;
v = (struct solaris_x86_vtoc *)read_dev_sector(bdev, offset+1, &sect);
if (!v)
@@ -218,7 +219,9 @@ parse_solaris_x86(struct parsed_partitions *state, struct block_device *bdev,
put_dev_sector(sect);
return;
}
- for (i=0; i<SOLARIS_X86_NUMSLICE && state->next<state->limit; i++) {
+ /* Ensure we can handle previous case of VTOC with 8 entries gracefully */
+ max_nparts = le16_to_cpu (v->v_nparts) > 8 ? SOLARIS_X86_NUMSLICE : 8;
+ for (i=0; i<max_nparts && state->next<state->limit; i++) {
struct solaris_x86_slice *s = &v->v_slice[i];
if (s->s_size == 0)
continue;
diff --git a/fs/partitions/sun.c b/fs/partitions/sun.c
index 123f8b4..794118d 100644
--- a/fs/partitions/sun.c
+++ b/fs/partitions/sun.c
@@ -19,34 +19,47 @@ int sun_partition(struct parsed_partitions *state, struct block_device *bdev)
Sector sect;
struct sun_disklabel {
unsigned char info[128]; /* Informative text string */
- unsigned char spare0[14];
- struct sun_info {
- unsigned char spare1;
- unsigned char id;
- unsigned char spare2;
- unsigned char flags;
- } infos[8];
- unsigned char spare[246]; /* Boot information etc. */
+ struct sun_vtoc {
+ __be32 version; /* Layout version */
+ char volume[8]; /* Volume name */
+ __be16 nparts; /* Number of partitions */
+ struct sun_info { /* Partition hdrs, sec 2 */
+ __be16 id;
+ __be16 flags;
+ } infos[8];
+ __be16 padding; /* Alignment padding */
+ __be32 bootinfo[3]; /* Info needed by mboot */
+ __be32 sanity; /* To verify vtoc sanity */
+ __be32 reserved[10]; /* Free space */
+ __be32 timestamp[8]; /* Partition timestamp */
+ } vtoc;
+ __be32 write_reinstruct; /* sectors to skip, writes */
+ __be32 read_reinstruct; /* sectors to skip, reads */
+ unsigned char spare[148]; /* Padding */
__be16 rspeed; /* Disk rotational speed */
__be16 pcylcount; /* Physical cylinder count */
__be16 sparecyl; /* extra sects per cylinder */
- unsigned char spare2[4]; /* More magic... */
+ __be16 obs1; /* gap1 */
+ __be16 obs2; /* gap2 */
__be16 ilfact; /* Interleave factor */
__be16 ncyl; /* Data cylinder count */
__be16 nacyl; /* Alt. cylinder count */
__be16 ntrks; /* Tracks per cylinder */
__be16 nsect; /* Sectors per track */
- unsigned char spare3[4]; /* Even more magic... */
+ __be16 obs3; /* bhead - Label head offset */
+ __be16 obs4; /* ppart - Physical Partition */
struct sun_partition {
__be32 start_cylinder;
__be32 num_sectors;
} partitions[8];
__be16 magic; /* Magic number */
__be16 csum; /* Label xor'd checksum */
- } * label;
+ } * label;
struct sun_partition *p;
unsigned long spc;
char b[BDEVNAME_SIZE];
+ int use_vtoc;
+ int nparts;
label = (struct sun_disklabel *)read_dev_sector(bdev, 0, &sect);
if (!label)
@@ -70,9 +83,22 @@ int sun_partition(struct parsed_partitions *state, struct block_device *bdev)
return 0;
}
- /* All Sun disks have 8 partition entries */
+ /* Check to see if we can use the VTOC table */
+ use_vtoc = ((be32_to_cpu(label->vtoc.sanity) == SUN_VTOC_SANITY) &&
+ (be32_to_cpu(label->vtoc.version) == 1) &&
+ (be16_to_cpu(label->vtoc.nparts) <= 8));
+
+ /* Use 8 partition entries if not specified in validated VTOC */
+ nparts = (use_vtoc) ? be16_to_cpu(label->vtoc.nparts) : 8;
+
+ /*
+ * So that old Linux-Sun partitions continue to work,
+ * alow the VTOC to be used under the additional condition ...
+ */
+ use_vtoc = use_vtoc || !(label->vtoc.sanity |
+ label->vtoc.version | label->vtoc.nparts);
spc = be16_to_cpu(label->ntrks) * be16_to_cpu(label->nsect);
- for (i = 0; i < 8; i++, p++) {
+ for (i = 0; i < nparts; i++, p++) {
unsigned long st_sector;
unsigned int num_sectors;
@@ -81,10 +107,12 @@ int sun_partition(struct parsed_partitions *state, struct block_device *bdev)
if (num_sectors) {
put_partition(state, slot, st_sector, num_sectors);
state->parts[slot].flags = 0;
- if (label->infos[i].id == LINUX_RAID_PARTITION)
- state->parts[slot].flags |= ADDPART_FLAG_RAID;
- if (label->infos[i].id == SUN_WHOLE_DISK)
- state->parts[slot].flags |= ADDPART_FLAG_WHOLEDISK;
+ if (use_vtoc) {
+ if (be16_to_cpu(label->vtoc.infos[i].id) == LINUX_RAID_PARTITION)
+ state->parts[slot].flags |= ADDPART_FLAG_RAID;
+ else if (be16_to_cpu(label->vtoc.infos[i].id) == SUN_WHOLE_DISK)
+ state->parts[slot].flags |= ADDPART_FLAG_WHOLEDISK;
+ }
}
slot++;
}
diff --git a/fs/partitions/sun.h b/fs/partitions/sun.h
index b1b19fd..7f864d1 100644
--- a/fs/partitions/sun.h
+++ b/fs/partitions/sun.h
@@ -3,5 +3,6 @@
*/
#define SUN_LABEL_MAGIC 0xDABE
+#define SUN_VTOC_SANITY 0x600DDEEE
int sun_partition(struct parsed_partitions *state, struct block_device *bdev);
diff --git a/fs/pipe.c b/fs/pipe.c
index d007830..6b3d91a 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -255,7 +255,7 @@ void generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
/**
* generic_pipe_buf_confirm - verify contents of the pipe buffer
- * @pipe: the pipe that the buffer belongs to
+ * @info: the pipe that the buffer belongs to
* @buf: the buffer to confirm
*
* Description:
diff --git a/fs/proc/Makefile b/fs/proc/Makefile
index bce38e3..ebaba02 100644
--- a/fs/proc/Makefile
+++ b/fs/proc/Makefile
@@ -11,6 +11,7 @@ proc-y += inode.o root.o base.o generic.o array.o \
proc_tty.o proc_misc.o
proc-$(CONFIG_PROC_SYSCTL) += proc_sysctl.o
+proc-$(CONFIG_NET) += proc_net.o
proc-$(CONFIG_PROC_KCORE) += kcore.o
proc-$(CONFIG_PROC_VMCORE) += vmcore.o
proc-$(CONFIG_PROC_DEVICETREE) += proc_devtree.o
diff --git a/fs/proc/array.c b/fs/proc/array.c
index 965625a..ee4814d 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -320,7 +320,21 @@ int proc_pid_status(struct task_struct *task, char *buffer)
return buffer - orig;
}
-static clock_t task_utime(struct task_struct *p)
+/*
+ * Use precise platform statistics if available:
+ */
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+static cputime_t task_utime(struct task_struct *p)
+{
+ return p->utime;
+}
+
+static cputime_t task_stime(struct task_struct *p)
+{
+ return p->stime;
+}
+#else
+static cputime_t task_utime(struct task_struct *p)
{
clock_t utime = cputime_to_clock_t(p->utime),
total = utime + cputime_to_clock_t(p->stime);
@@ -337,10 +351,10 @@ static clock_t task_utime(struct task_struct *p)
}
utime = (clock_t)temp;
- return utime;
+ return clock_t_to_cputime(utime);
}
-static clock_t task_stime(struct task_struct *p)
+static cputime_t task_stime(struct task_struct *p)
{
clock_t stime;
@@ -349,10 +363,12 @@ static clock_t task_stime(struct task_struct *p)
* the total, to make sure the total observed by userspace
* grows monotonically - apps rely on that):
*/
- stime = nsec_to_clock_t(p->se.sum_exec_runtime) - task_utime(p);
+ stime = nsec_to_clock_t(p->se.sum_exec_runtime) -
+ cputime_to_clock_t(task_utime(p));
- return stime;
+ return clock_t_to_cputime(stime);
}
+#endif
static int do_task_stat(struct task_struct *task, char *buffer, int whole)
{
@@ -368,8 +384,7 @@ static int do_task_stat(struct task_struct *task, char *buffer, int whole)
unsigned long long start_time;
unsigned long cmin_flt = 0, cmaj_flt = 0;
unsigned long min_flt = 0, maj_flt = 0;
- cputime_t cutime, cstime;
- clock_t utime, stime;
+ cputime_t cutime, cstime, utime, stime;
unsigned long rsslim = 0;
char tcomm[sizeof(task->comm)];
unsigned long flags;
@@ -387,8 +402,7 @@ static int do_task_stat(struct task_struct *task, char *buffer, int whole)
sigemptyset(&sigign);
sigemptyset(&sigcatch);
- cutime = cstime = cputime_zero;
- utime = stime = 0;
+ cutime = cstime = utime = stime = cputime_zero;
rcu_read_lock();
if (lock_task_sighand(task, &flags)) {
@@ -414,15 +428,15 @@ static int do_task_stat(struct task_struct *task, char *buffer, int whole)
do {
min_flt += t->min_flt;
maj_flt += t->maj_flt;
- utime += task_utime(t);
- stime += task_stime(t);
+ utime = cputime_add(utime, task_utime(t));
+ stime = cputime_add(stime, task_stime(t));
t = next_thread(t);
} while (t != task);
min_flt += sig->min_flt;
maj_flt += sig->maj_flt;
- utime += cputime_to_clock_t(sig->utime);
- stime += cputime_to_clock_t(sig->stime);
+ utime = cputime_add(utime, sig->utime);
+ stime = cputime_add(stime, sig->stime);
}
sid = signal_session(sig);
@@ -471,8 +485,8 @@ static int do_task_stat(struct task_struct *task, char *buffer, int whole)
cmin_flt,
maj_flt,
cmaj_flt,
- utime,
- stime,
+ cputime_to_clock_t(utime),
+ cputime_to_clock_t(stime),
cputime_to_clock_t(cutime),
cputime_to_clock_t(cstime),
priority,
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 42cb4f5..19489b0 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -72,6 +72,7 @@
#include <linux/poll.h>
#include <linux/nsproxy.h>
#include <linux/oom.h>
+#include <linux/elf.h>
#include "internal.h"
/* NOTE:
@@ -926,7 +927,7 @@ static const struct file_operations proc_pid_sched_operations = {
.read = seq_read,
.write = sched_write,
.llseek = seq_lseek,
- .release = seq_release,
+ .release = single_release,
};
#endif
@@ -1014,7 +1015,7 @@ static int task_dumpable(struct task_struct *task)
task_lock(task);
mm = task->mm;
if (mm)
- dumpable = mm->dumpable;
+ dumpable = get_dumpable(mm);
task_unlock(task);
if(dumpable == 1)
return 1;
@@ -1785,6 +1786,91 @@ static const struct inode_operations proc_attr_dir_inode_operations = {
#endif
+#if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
+static ssize_t proc_coredump_filter_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct task_struct *task = get_proc_task(file->f_dentry->d_inode);
+ struct mm_struct *mm;
+ char buffer[PROC_NUMBUF];
+ size_t len;
+ int ret;
+
+ if (!task)
+ return -ESRCH;
+
+ ret = 0;
+ mm = get_task_mm(task);
+ if (mm) {
+ len = snprintf(buffer, sizeof(buffer), "%08lx\n",
+ ((mm->flags & MMF_DUMP_FILTER_MASK) >>
+ MMF_DUMP_FILTER_SHIFT));
+ mmput(mm);
+ ret = simple_read_from_buffer(buf, count, ppos, buffer, len);
+ }
+
+ put_task_struct(task);
+
+ return ret;
+}
+
+static ssize_t proc_coredump_filter_write(struct file *file,
+ const char __user *buf,
+ size_t count,
+ loff_t *ppos)
+{
+ struct task_struct *task;
+ struct mm_struct *mm;
+ char buffer[PROC_NUMBUF], *end;
+ unsigned int val;
+ int ret;
+ int i;
+ unsigned long mask;
+
+ ret = -EFAULT;
+ memset(buffer, 0, sizeof(buffer));
+ if (count > sizeof(buffer) - 1)
+ count = sizeof(buffer) - 1;
+ if (copy_from_user(buffer, buf, count))
+ goto out_no_task;
+
+ ret = -EINVAL;
+ val = (unsigned int)simple_strtoul(buffer, &end, 0);
+ if (*end == '\n')
+ end++;
+ if (end - buffer == 0)
+ goto out_no_task;
+
+ ret = -ESRCH;
+ task = get_proc_task(file->f_dentry->d_inode);
+ if (!task)
+ goto out_no_task;
+
+ ret = end - buffer;
+ mm = get_task_mm(task);
+ if (!mm)
+ goto out_no_mm;
+
+ for (i = 0, mask = 1; i < MMF_DUMP_FILTER_BITS; i++, mask <<= 1) {
+ if (val & mask)
+ set_bit(i + MMF_DUMP_FILTER_SHIFT, &mm->flags);
+ else
+ clear_bit(i + MMF_DUMP_FILTER_SHIFT, &mm->flags);
+ }
+
+ mmput(mm);
+ out_no_mm:
+ put_task_struct(task);
+ out_no_task:
+ return ret;
+}
+
+static const struct file_operations proc_coredump_filter_operations = {
+ .read = proc_coredump_filter_read,
+ .write = proc_coredump_filter_write,
+};
+#endif
+
/*
* /proc/self:
*/
@@ -2005,6 +2091,9 @@ static const struct pid_entry tgid_base_stuff[] = {
#ifdef CONFIG_FAULT_INJECTION
REG("make-it-fail", S_IRUGO|S_IWUSR, fault_inject),
#endif
+#if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
+ REG("coredump_filter", S_IRUGO|S_IWUSR, coredump_filter),
+#endif
#ifdef CONFIG_TASK_IO_ACCOUNTING
INF("io", S_IRUGO, pid_io_accounting),
#endif
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index dd28e86..0e4d37c 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -11,6 +11,7 @@
#include <linux/string.h>
#include <linux/stat.h>
#include <linux/completion.h>
+#include <linux/poll.h>
#include <linux/file.h>
#include <linux/limits.h>
#include <linux/init.h>
@@ -112,14 +113,14 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
inode_init_once(&ei->vfs_inode);
}
-
+
int __init proc_init_inodecache(void)
{
proc_inode_cachep = kmem_cache_create("proc_inode_cache",
sizeof(struct proc_inode),
0, (SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD),
- init_once, NULL);
+ init_once);
if (proc_inode_cachep == NULL)
return -ENOMEM;
return 0;
@@ -232,7 +233,7 @@ static ssize_t proc_reg_write(struct file *file, const char __user *buf, size_t
static unsigned int proc_reg_poll(struct file *file, struct poll_table_struct *pts)
{
struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode);
- unsigned int rv = 0;
+ unsigned int rv = DEFAULT_POLLMASK;
unsigned int (*poll)(struct file *, struct poll_table_struct *);
spin_lock(&pde->pde_unload_lock);
@@ -386,6 +387,19 @@ static const struct file_operations proc_reg_file_ops = {
.release = proc_reg_release,
};
+#ifdef CONFIG_COMPAT
+static const struct file_operations proc_reg_file_ops_no_compat = {
+ .llseek = proc_reg_llseek,
+ .read = proc_reg_read,
+ .write = proc_reg_write,
+ .poll = proc_reg_poll,
+ .unlocked_ioctl = proc_reg_unlocked_ioctl,
+ .mmap = proc_reg_mmap,
+ .open = proc_reg_open,
+ .release = proc_reg_release,
+};
+#endif
+
struct inode *proc_get_inode(struct super_block *sb, unsigned int ino,
struct proc_dir_entry *de)
{
@@ -413,8 +427,15 @@ struct inode *proc_get_inode(struct super_block *sb, unsigned int ino,
if (de->proc_iops)
inode->i_op = de->proc_iops;
if (de->proc_fops) {
- if (S_ISREG(inode->i_mode))
- inode->i_fop = &proc_reg_file_ops;
+ if (S_ISREG(inode->i_mode)) {
+#ifdef CONFIG_COMPAT
+ if (!de->proc_fops->compat_ioctl)
+ inode->i_fop =
+ &proc_reg_file_ops_no_compat;
+ else
+#endif
+ inode->i_fop = &proc_reg_file_ops;
+ }
else
inode->i_fop = de->proc_fops;
}
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index b215c35..1820eb2 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -16,6 +16,11 @@ extern int proc_sys_init(void);
#else
static inline void proc_sys_init(void) { }
#endif
+#ifdef CONFIG_NET
+extern int proc_net_init(void);
+#else
+static inline int proc_net_init(void) { return 0; }
+#endif
struct vmalloc_info {
unsigned long used;
diff --git a/fs/proc/proc_misc.c b/fs/proc/proc_misc.c
index d24b8d4..bee251c 100644
--- a/fs/proc/proc_misc.c
+++ b/fs/proc/proc_misc.c
@@ -445,6 +445,11 @@ static int show_stat(struct seq_file *p, void *v)
cputime64_t user, nice, system, idle, iowait, irq, softirq, steal;
u64 sum = 0;
struct timespec boottime;
+ unsigned int *per_irq_sum;
+
+ per_irq_sum = kzalloc(sizeof(unsigned int)*NR_IRQS, GFP_KERNEL);
+ if (!per_irq_sum)
+ return -ENOMEM;
user = nice = system = idle = iowait =
irq = softirq = steal = cputime64_zero;
@@ -462,8 +467,11 @@ static int show_stat(struct seq_file *p, void *v)
irq = cputime64_add(irq, kstat_cpu(i).cpustat.irq);
softirq = cputime64_add(softirq, kstat_cpu(i).cpustat.softirq);
steal = cputime64_add(steal, kstat_cpu(i).cpustat.steal);
- for (j = 0 ; j < NR_IRQS ; j++)
- sum += kstat_cpu(i).irqs[j];
+ for (j = 0; j < NR_IRQS; j++) {
+ unsigned int temp = kstat_cpu(i).irqs[j];
+ sum += temp;
+ per_irq_sum[j] += temp;
+ }
}
seq_printf(p, "cpu %llu %llu %llu %llu %llu %llu %llu %llu\n",
@@ -499,9 +507,10 @@ static int show_stat(struct seq_file *p, void *v)
}
seq_printf(p, "intr %llu", (unsigned long long)sum);
-#if !defined(CONFIG_PPC64) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
+#ifndef CONFIG_SMP
+ /* Touches too many cache lines on SMP setups */
for (i = 0; i < NR_IRQS; i++)
- seq_printf(p, " %u", kstat_irqs(i));
+ seq_printf(p, " %u", per_irq_sum[i]);
#endif
seq_printf(p,
@@ -516,6 +525,7 @@ static int show_stat(struct seq_file *p, void *v)
nr_running(),
nr_iowait());
+ kfree(per_irq_sum);
return 0;
}
diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
new file mode 100644
index 0000000..2e91fb7
--- /dev/null
+++ b/fs/proc/proc_net.c
@@ -0,0 +1,200 @@
+/*
+ * linux/fs/proc/net.c
+ *
+ * Copyright (C) 2007
+ *
+ * Author: Eric Biederman <ebiederm@xmission.com>
+ *
+ * proc net directory handling functions
+ */
+
+#include <asm/uaccess.h>
+
+#include <linux/errno.h>
+#include <linux/time.h>
+#include <linux/proc_fs.h>
+#include <linux/stat.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+#include <linux/bitops.h>
+#include <linux/smp_lock.h>
+#include <linux/mount.h>
+#include <linux/nsproxy.h>
+#include <net/net_namespace.h>
+
+#include "internal.h"
+
+
+struct proc_dir_entry *proc_net_create(struct net *net,
+ const char *name, mode_t mode, get_info_t *get_info)
+{
+ return create_proc_info_entry(name,mode, net->proc_net, get_info);
+}
+EXPORT_SYMBOL_GPL(proc_net_create);
+
+struct proc_dir_entry *proc_net_fops_create(struct net *net,
+ const char *name, mode_t mode, const struct file_operations *fops)
+{
+ struct proc_dir_entry *res;
+
+ res = create_proc_entry(name, mode, net->proc_net);
+ if (res)
+ res->proc_fops = fops;
+ return res;
+}
+EXPORT_SYMBOL_GPL(proc_net_fops_create);
+
+void proc_net_remove(struct net *net, const char *name)
+{
+ remove_proc_entry(name, net->proc_net);
+}
+EXPORT_SYMBOL_GPL(proc_net_remove);
+
+struct net *get_proc_net(const struct inode *inode)
+{
+ return maybe_get_net(PDE_NET(PDE(inode)));
+}
+EXPORT_SYMBOL_GPL(get_proc_net);
+
+static struct proc_dir_entry *proc_net_shadow;
+
+static struct dentry *proc_net_shadow_dentry(struct dentry *parent,
+ struct proc_dir_entry *de)
+{
+ struct dentry *shadow = NULL;
+ struct inode *inode;
+ if (!de)
+ goto out;
+ de_get(de);
+ inode = proc_get_inode(parent->d_inode->i_sb, de->low_ino, de);
+ if (!inode)
+ goto out_de_put;
+ shadow = d_alloc_name(parent, de->name);
+ if (!shadow)
+ goto out_iput;
+ shadow->d_op = parent->d_op; /* proc_dentry_operations */
+ d_instantiate(shadow, inode);
+out:
+ return shadow;
+out_iput:
+ iput(inode);
+out_de_put:
+ de_put(de);
+ goto out;
+}
+
+static void *proc_net_follow_link(struct dentry *parent, struct nameidata *nd)
+{
+ struct net *net = current->nsproxy->net_ns;
+ struct dentry *shadow;
+ shadow = proc_net_shadow_dentry(parent, net->proc_net);
+ if (!shadow)
+ return ERR_PTR(-ENOENT);
+
+ dput(nd->dentry);
+ /* My dentry count is 1 and that should be enough as the
+ * shadow dentry is thrown away immediately.
+ */
+ nd->dentry = shadow;
+ return NULL;
+}
+
+static struct dentry *proc_net_lookup(struct inode *dir, struct dentry *dentry,
+ struct nameidata *nd)
+{
+ struct net *net = current->nsproxy->net_ns;
+ struct dentry *shadow;
+
+ shadow = proc_net_shadow_dentry(nd->dentry, net->proc_net);
+ if (!shadow)
+ return ERR_PTR(-ENOENT);
+
+ dput(nd->dentry);
+ nd->dentry = shadow;
+
+ return shadow->d_inode->i_op->lookup(shadow->d_inode, dentry, nd);
+}
+
+static int proc_net_setattr(struct dentry *dentry, struct iattr *iattr)
+{
+ struct net *net = current->nsproxy->net_ns;
+ struct dentry *shadow;
+ int ret;
+
+ shadow = proc_net_shadow_dentry(dentry->d_parent, net->proc_net);
+ if (!shadow)
+ return -ENOENT;
+ ret = shadow->d_inode->i_op->setattr(shadow, iattr);
+ dput(shadow);
+ return ret;
+}
+
+static const struct file_operations proc_net_dir_operations = {
+ .read = generic_read_dir,
+};
+
+static struct inode_operations proc_net_dir_inode_operations = {
+ .follow_link = proc_net_follow_link,
+ .lookup = proc_net_lookup,
+ .setattr = proc_net_setattr,
+};
+
+static __net_init int proc_net_ns_init(struct net *net)
+{
+ struct proc_dir_entry *root, *netd, *net_statd;
+ int err;
+
+ err = -ENOMEM;
+ root = kzalloc(sizeof(*root), GFP_KERNEL);
+ if (!root)
+ goto out;
+
+ err = -EEXIST;
+ netd = proc_mkdir("net", root);
+ if (!netd)
+ goto free_root;
+
+ err = -EEXIST;
+ net_statd = proc_mkdir("stat", netd);
+ if (!net_statd)
+ goto free_net;
+
+ root->data = net;
+ netd->data = net;
+ net_statd->data = net;
+
+ net->proc_net_root = root;
+ net->proc_net = netd;
+ net->proc_net_stat = net_statd;
+ err = 0;
+
+out:
+ return err;
+free_net:
+ remove_proc_entry("net", root);
+free_root:
+ kfree(root);
+ goto out;
+}
+
+static __net_exit void proc_net_ns_exit(struct net *net)
+{
+ remove_proc_entry("stat", net->proc_net);
+ remove_proc_entry("net", net->proc_net_root);
+ kfree(net->proc_net_root);
+}
+
+struct pernet_operations __net_initdata proc_net_ns_ops = {
+ .init = proc_net_ns_init,
+ .exit = proc_net_ns_exit,
+};
+
+int __init proc_net_init(void)
+{
+ proc_net_shadow = proc_mkdir("net", NULL);
+ proc_net_shadow->proc_iops = &proc_net_dir_inode_operations;
+ proc_net_shadow->proc_fops = &proc_net_dir_operations;
+
+ return register_pernet_subsys(&proc_net_ns_ops);
+}
diff --git a/fs/proc/root.c b/fs/proc/root.c
index 41f1703..cf30466 100644
--- a/fs/proc/root.c
+++ b/fs/proc/root.c
@@ -21,7 +21,7 @@
#include "internal.h"
-struct proc_dir_entry *proc_net, *proc_net_stat, *proc_bus, *proc_root_fs, *proc_root_driver;
+struct proc_dir_entry *proc_bus, *proc_root_fs, *proc_root_driver;
static int proc_get_sb(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data, struct vfsmount *mnt)
@@ -61,8 +61,8 @@ void __init proc_root_init(void)
return;
}
proc_misc_init();
- proc_net = proc_mkdir("net", NULL);
- proc_net_stat = proc_mkdir("net/stat", NULL);
+
+ proc_net_init();
#ifdef CONFIG_SYSVIPC
proc_mkdir("sysvipc", NULL);
@@ -159,7 +159,5 @@ EXPORT_SYMBOL(create_proc_entry);
EXPORT_SYMBOL(remove_proc_entry);
EXPORT_SYMBOL(proc_root);
EXPORT_SYMBOL(proc_root_fs);
-EXPORT_SYMBOL(proc_net);
-EXPORT_SYMBOL(proc_net_stat);
EXPORT_SYMBOL(proc_bus);
EXPORT_SYMBOL(proc_root_driver);
diff --git a/fs/qnx4/inode.c b/fs/qnx4/inode.c
index 8d256eb..1bc8d87 100644
--- a/fs/qnx4/inode.c
+++ b/fs/qnx4/inode.c
@@ -545,7 +545,7 @@ static int init_inodecache(void)
sizeof(struct qnx4_inode_info),
0, (SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD),
- init_once, NULL);
+ init_once);
if (qnx4_inode_cachep == NULL)
return -ENOMEM;
return 0;
diff --git a/fs/quota.c b/fs/quota.c
index e6577ac..99b24b5 100644
--- a/fs/quota.c
+++ b/fs/quota.c
@@ -387,7 +387,7 @@ asmlinkage long sys_quotactl(unsigned int cmd, const char __user *special, qid_t
return ret;
}
-#if defined(CONFIG_X86_64) || defined(CONFIG_IA64)
+#if defined(CONFIG_COMPAT_FOR_U64_ALIGNMENT)
/*
* This code works only for 32 bit quota tools over 64 bit OS (x86_64, ia64)
* and is necessary due to alignment problems.
diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c
index cad2b7a..237fe8b 100644
--- a/fs/ramfs/file-nommu.c
+++ b/fs/ramfs/file-nommu.c
@@ -295,5 +295,10 @@ unsigned long ramfs_nommu_get_unmapped_area(struct file *file,
*/
int ramfs_nommu_mmap(struct file *file, struct vm_area_struct *vma)
{
- return vma->vm_flags & VM_SHARED ? 0 : -ENOSYS;
+ if (!(vma->vm_flags & VM_SHARED))
+ return -ENOSYS;
+
+ file_accessed(file);
+ vma->vm_ops = &generic_file_vm_ops;
+ return 0;
}
diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
index b6f1259..981027d 100644
--- a/fs/reiserfs/stree.c
+++ b/fs/reiserfs/stree.c
@@ -1042,7 +1042,8 @@ static char prepare_for_delete_or_cut(struct reiserfs_transaction_handle *th, st
pos = I_UNFM_NUM(&s_ih);
while (le_ih_k_offset (&s_ih) + (pos - 1) * blk_size > n_new_file_length) {
- __u32 *unfm, block;
+ __le32 *unfm;
+ __u32 block;
/* Each unformatted block deletion may involve one additional
* bitmap block into the transaction, thereby the initial
@@ -1052,7 +1053,7 @@ static char prepare_for_delete_or_cut(struct reiserfs_transaction_handle *th, st
break;
}
- unfm = (__u32 *)B_I_PITEM(p_s_bh, &s_ih) + pos - 1;
+ unfm = (__le32 *)B_I_PITEM(p_s_bh, &s_ih) + pos - 1;
block = get_block_num(unfm, 0);
if (block != 0) {
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index 5a93cfe..a005451 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -527,7 +527,7 @@ static int init_inodecache(void)
reiserfs_inode_info),
0, (SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD),
- init_once, NULL);
+ init_once);
if (reiserfs_inode_cachep == NULL)
return -ENOMEM;
return 0;
@@ -1915,8 +1915,11 @@ static int reiserfs_release_dquot(struct dquot *dquot)
ret =
journal_begin(&th, dquot->dq_sb,
REISERFS_QUOTA_DEL_BLOCKS(dquot->dq_sb));
- if (ret)
+ if (ret) {
+ /* Release dquot anyway to avoid endless cycle in dqput() */
+ dquot_release(dquot);
goto out;
+ }
ret = dquot_release(dquot);
err =
journal_end(&th, dquot->dq_sb,
@@ -2067,6 +2070,12 @@ static ssize_t reiserfs_quota_write(struct super_block *sb, int type,
size_t towrite = len;
struct buffer_head tmp_bh, *bh;
+ if (!current->journal_info) {
+ printk(KERN_WARNING "reiserfs: Quota write (off=%Lu, len=%Lu)"
+ " cancelled because transaction is not started.\n",
+ (unsigned long long)off, (unsigned long long)len);
+ return -EIO;
+ }
mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
while (towrite > 0) {
tocopy = sb->s_blocksize - offset < towrite ?
@@ -2098,7 +2107,7 @@ static ssize_t reiserfs_quota_write(struct super_block *sb, int type,
data += tocopy;
blk++;
}
- out:
+out:
if (len == towrite)
return err;
if (inode->i_size < off + len - towrite)
diff --git a/fs/romfs/inode.c b/fs/romfs/inode.c
index 2284e03..dae7945 100644
--- a/fs/romfs/inode.c
+++ b/fs/romfs/inode.c
@@ -572,14 +572,14 @@ static void init_once(void *foo, struct kmem_cache *cachep, unsigned long flags)
inode_init_once(&ei->vfs_inode);
}
-
+
static int init_inodecache(void)
{
romfs_inode_cachep = kmem_cache_create("romfs_inode_cache",
sizeof(struct romfs_inode_info),
0, (SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD),
- init_once, NULL);
+ init_once);
if (romfs_inode_cachep == NULL)
return -ENOMEM;
return 0;
diff --git a/fs/select.c b/fs/select.c
index a974082..46dca31 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -26,8 +26,6 @@
#include <asm/uaccess.h>
-#define DEFAULT_POLLMASK (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)
-
struct poll_table_page {
struct poll_table_page * next;
struct poll_table_entry * entry;
diff --git a/fs/seq_file.c b/fs/seq_file.c
index bbb19be..ca71c11 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -429,6 +429,39 @@ int seq_release_private(struct inode *inode, struct file *file)
}
EXPORT_SYMBOL(seq_release_private);
+void *__seq_open_private(struct file *f, const struct seq_operations *ops,
+ int psize)
+{
+ int rc;
+ void *private;
+ struct seq_file *seq;
+
+ private = kzalloc(psize, GFP_KERNEL);
+ if (private == NULL)
+ goto out;
+
+ rc = seq_open(f, ops);
+ if (rc < 0)
+ goto out_free;
+
+ seq = f->private_data;
+ seq->private = private;
+ return private;
+
+out_free:
+ kfree(private);
+out:
+ return NULL;
+}
+EXPORT_SYMBOL(__seq_open_private);
+
+int seq_open_private(struct file *filp, const struct seq_operations *ops,
+ int psize)
+{
+ return __seq_open_private(filp, ops, psize) ? 0 : -ENOMEM;
+}
+EXPORT_SYMBOL(seq_open_private);
+
int seq_putc(struct seq_file *m, char c)
{
if (m->count < m->size) {
diff --git a/fs/signalfd.c b/fs/signalfd.c
index 3b07f26..aefb0be 100644
--- a/fs/signalfd.c
+++ b/fs/signalfd.c
@@ -11,8 +11,10 @@
* Now using anonymous inode source.
* Thanks to Oleg Nesterov for useful code review and suggestions.
* More comments and suggestions from Arnd Bergmann.
- * Sat May 19, 2007: Davi E. M. Arnaut <davi@haxent.com.br>
+ * Sat May 19, 2007: Davi E. M. Arnaut <davi@haxent.com.br>
* Retrieve multiple signals with one read() call
+ * Sun Jul 15, 2007: Davide Libenzi <davidel@xmailserver.org>
+ * Attach to the sighand only during read() and poll().
*/
#include <linux/file.h>
@@ -27,96 +29,12 @@
#include <linux/signalfd.h>
struct signalfd_ctx {
- struct list_head lnk;
- wait_queue_head_t wqh;
sigset_t sigmask;
- struct task_struct *tsk;
};
-struct signalfd_lockctx {
- struct task_struct *tsk;
- unsigned long flags;
-};
-
-/*
- * Tries to acquire the sighand lock. We do not increment the sighand
- * use count, and we do not even pin the task struct, so we need to
- * do it inside an RCU read lock, and we must be prepared for the
- * ctx->tsk going to NULL (in signalfd_deliver()), and for the sighand
- * being detached. We return 0 if the sighand has been detached, or
- * 1 if we were able to pin the sighand lock.
- */
-static int signalfd_lock(struct signalfd_ctx *ctx, struct signalfd_lockctx *lk)
-{
- struct sighand_struct *sighand = NULL;
-
- rcu_read_lock();
- lk->tsk = rcu_dereference(ctx->tsk);
- if (likely(lk->tsk != NULL))
- sighand = lock_task_sighand(lk->tsk, &lk->flags);
- rcu_read_unlock();
-
- if (sighand && !ctx->tsk) {
- unlock_task_sighand(lk->tsk, &lk->flags);
- sighand = NULL;
- }
-
- return sighand != NULL;
-}
-
-static void signalfd_unlock(struct signalfd_lockctx *lk)
-{
- unlock_task_sighand(lk->tsk, &lk->flags);
-}
-
-/*
- * This must be called with the sighand lock held.
- */
-void signalfd_deliver(struct task_struct *tsk, int sig)
-{
- struct sighand_struct *sighand = tsk->sighand;
- struct signalfd_ctx *ctx, *tmp;
-
- BUG_ON(!sig);
- list_for_each_entry_safe(ctx, tmp, &sighand->signalfd_list, lnk) {
- /*
- * We use a negative signal value as a way to broadcast that the
- * sighand has been orphaned, so that we can notify all the
- * listeners about this. Remember the ctx->sigmask is inverted,
- * so if the user is interested in a signal, that corresponding
- * bit will be zero.
- */
- if (sig < 0) {
- if (ctx->tsk == tsk) {
- ctx->tsk = NULL;
- list_del_init(&ctx->lnk);
- wake_up(&ctx->wqh);
- }
- } else {
- if (!sigismember(&ctx->sigmask, sig))
- wake_up(&ctx->wqh);
- }
- }
-}
-
-static void signalfd_cleanup(struct signalfd_ctx *ctx)
-{
- struct signalfd_lockctx lk;
-
- /*
- * This is tricky. If the sighand is gone, we do not need to remove
- * context from the list, the list itself won't be there anymore.
- */
- if (signalfd_lock(ctx, &lk)) {
- list_del(&ctx->lnk);
- signalfd_unlock(&lk);
- }
- kfree(ctx);
-}
-
static int signalfd_release(struct inode *inode, struct file *file)
{
- signalfd_cleanup(file->private_data);
+ kfree(file->private_data);
return 0;
}
@@ -124,23 +42,15 @@ static unsigned int signalfd_poll(struct file *file, poll_table *wait)
{
struct signalfd_ctx *ctx = file->private_data;
unsigned int events = 0;
- struct signalfd_lockctx lk;
- poll_wait(file, &ctx->wqh, wait);
+ poll_wait(file, &current->sighand->signalfd_wqh, wait);
- /*
- * Let the caller get a POLLIN in this case, ala socket recv() when
- * the peer disconnects.
- */
- if (signalfd_lock(ctx, &lk)) {
- if ((lk.tsk == current &&
- next_signal(&lk.tsk->pending, &ctx->sigmask) > 0) ||
- next_signal(&lk.tsk->signal->shared_pending,
- &ctx->sigmask) > 0)
- events |= POLLIN;
- signalfd_unlock(&lk);
- } else
+ spin_lock_irq(&current->sighand->siglock);
+ if (next_signal(&current->pending, &ctx->sigmask) ||
+ next_signal(&current->signal->shared_pending,
+ &ctx->sigmask))
events |= POLLIN;
+ spin_unlock_irq(&current->sighand->siglock);
return events;
}
@@ -213,59 +123,46 @@ static ssize_t signalfd_dequeue(struct signalfd_ctx *ctx, siginfo_t *info,
int nonblock)
{
ssize_t ret;
- struct signalfd_lockctx lk;
DECLARE_WAITQUEUE(wait, current);
- if (!signalfd_lock(ctx, &lk))
- return 0;
-
- ret = dequeue_signal(lk.tsk, &ctx->sigmask, info);
+ spin_lock_irq(&current->sighand->siglock);
+ ret = dequeue_signal(current, &ctx->sigmask, info);
switch (ret) {
case 0:
if (!nonblock)
break;
ret = -EAGAIN;
default:
- signalfd_unlock(&lk);
+ spin_unlock_irq(&current->sighand->siglock);
return ret;
}
- add_wait_queue(&ctx->wqh, &wait);
+ add_wait_queue(&current->sighand->signalfd_wqh, &wait);
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
- ret = dequeue_signal(lk.tsk, &ctx->sigmask, info);
- signalfd_unlock(&lk);
+ ret = dequeue_signal(current, &ctx->sigmask, info);
if (ret != 0)
break;
if (signal_pending(current)) {
ret = -ERESTARTSYS;
break;
}
+ spin_unlock_irq(&current->sighand->siglock);
schedule();
- ret = signalfd_lock(ctx, &lk);
- if (unlikely(!ret)) {
- /*
- * Let the caller read zero byte, ala socket
- * recv() when the peer disconnect. This test
- * must be done before doing a dequeue_signal(),
- * because if the sighand has been orphaned,
- * the dequeue_signal() call is going to crash
- * because ->sighand will be long gone.
- */
- break;
- }
+ spin_lock_irq(&current->sighand->siglock);
}
+ spin_unlock_irq(&current->sighand->siglock);
- remove_wait_queue(&ctx->wqh, &wait);
+ remove_wait_queue(&current->sighand->signalfd_wqh, &wait);
__set_current_state(TASK_RUNNING);
return ret;
}
/*
- * Returns either the size of a "struct signalfd_siginfo", or zero if the
- * sighand we are attached to, has been orphaned. The "count" parameter
- * must be at least the size of a "struct signalfd_siginfo".
+ * Returns a multiple of the size of a "struct signalfd_siginfo", or a negative
+ * error code. The "count" parameter must be at least the size of a
+ * "struct signalfd_siginfo".
*/
static ssize_t signalfd_read(struct file *file, char __user *buf, size_t count,
loff_t *ppos)
@@ -281,7 +178,6 @@ static ssize_t signalfd_read(struct file *file, char __user *buf, size_t count,
return -EINVAL;
siginfo = (struct signalfd_siginfo __user *) buf;
-
do {
ret = signalfd_dequeue(ctx, &info, nonblock);
if (unlikely(ret <= 0))
@@ -294,7 +190,7 @@ static ssize_t signalfd_read(struct file *file, char __user *buf, size_t count,
nonblock = 1;
} while (--count);
- return total ? total : ret;
+ return total ? total: ret;
}
static const struct file_operations signalfd_fops = {
@@ -303,24 +199,17 @@ static const struct file_operations signalfd_fops = {
.read = signalfd_read,
};
-/*
- * Create a file descriptor that is associated with our signal
- * state. We can pass it around to others if we want to, but
- * it will always be _our_ signal state.
- */
asmlinkage long sys_signalfd(int ufd, sigset_t __user *user_mask, size_t sizemask)
{
int error;
sigset_t sigmask;
struct signalfd_ctx *ctx;
- struct sighand_struct *sighand;
struct file *file;
struct inode *inode;
- struct signalfd_lockctx lk;
if (sizemask != sizeof(sigset_t) ||
copy_from_user(&sigmask, user_mask, sizeof(sigmask)))
- return error = -EINVAL;
+ return -EINVAL;
sigdelsetmask(&sigmask, sigmask(SIGKILL) | sigmask(SIGSTOP));
signotset(&sigmask);
@@ -329,17 +218,7 @@ asmlinkage long sys_signalfd(int ufd, sigset_t __user *user_mask, size_t sizemas
if (!ctx)
return -ENOMEM;
- init_waitqueue_head(&ctx->wqh);
ctx->sigmask = sigmask;
- ctx->tsk = current;
-
- sighand = current->sighand;
- /*
- * Add this fd to the list of signal listeners.
- */
- spin_lock_irq(&sighand->siglock);
- list_add_tail(&ctx->lnk, &sighand->signalfd_list);
- spin_unlock_irq(&sighand->siglock);
/*
* When we call this, the initialization must be complete, since
@@ -358,23 +237,18 @@ asmlinkage long sys_signalfd(int ufd, sigset_t __user *user_mask, size_t sizemas
fput(file);
return -EINVAL;
}
- /*
- * We need to be prepared of the fact that the sighand this fd
- * is attached to, has been detched. In that case signalfd_lock()
- * will return 0, and we'll just skip setting the new mask.
- */
- if (signalfd_lock(ctx, &lk)) {
- ctx->sigmask = sigmask;
- signalfd_unlock(&lk);
- }
- wake_up(&ctx->wqh);
+ spin_lock_irq(&current->sighand->siglock);
+ ctx->sigmask = sigmask;
+ spin_unlock_irq(&current->sighand->siglock);
+
+ wake_up(&current->sighand->signalfd_wqh);
fput(file);
}
return ufd;
err_fdalloc:
- signalfd_cleanup(ctx);
+ kfree(ctx);
return error;
}
diff --git a/fs/smbfs/inode.c b/fs/smbfs/inode.c
index 6724a6c..73d1450 100644
--- a/fs/smbfs/inode.c
+++ b/fs/smbfs/inode.c
@@ -73,14 +73,14 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
inode_init_once(&ei->vfs_inode);
}
-
+
static int init_inodecache(void)
{
smb_inode_cachep = kmem_cache_create("smb_inode_cache",
sizeof(struct smb_inode_info),
0, (SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD),
- init_once, NULL);
+ init_once);
if (smb_inode_cachep == NULL)
return -ENOMEM;
return 0;
diff --git a/fs/smbfs/request.c b/fs/smbfs/request.c
index 3f54a0f..ca4b2d5 100644
--- a/fs/smbfs/request.c
+++ b/fs/smbfs/request.c
@@ -40,7 +40,7 @@ int smb_init_request_cache(void)
req_cachep = kmem_cache_create("smb_request",
sizeof(struct smb_request), 0,
SMB_SLAB_DEBUG | SLAB_HWCACHE_ALIGN,
- NULL, NULL);
+ NULL);
if (req_cachep == NULL)
return -ENOMEM;
diff --git a/fs/splice.c b/fs/splice.c
index 53fc208..e95a362 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -164,7 +164,7 @@ static const struct pipe_buf_operations user_page_pipe_buf_ops = {
* @spd: data to fill
*
* Description:
- * @spd contains a map of pages and len/offset tupples, a long with
+ * @spd contains a map of pages and len/offset tuples, along with
* the struct pipe_buf_operations associated with these pages. This
* function will link that data to the pipe.
*
@@ -265,7 +265,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
unsigned int flags)
{
struct address_space *mapping = in->f_mapping;
- unsigned int loff, nr_pages;
+ unsigned int loff, nr_pages, req_pages;
struct page *pages[PIPE_BUFFERS];
struct partial_page partial[PIPE_BUFFERS];
struct page *page;
@@ -281,28 +281,24 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
index = *ppos >> PAGE_CACHE_SHIFT;
loff = *ppos & ~PAGE_CACHE_MASK;
- nr_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
-
- if (nr_pages > PIPE_BUFFERS)
- nr_pages = PIPE_BUFFERS;
-
- /*
- * Don't try to 2nd guess the read-ahead logic, call into
- * page_cache_readahead() like the page cache reads would do.
- */
- page_cache_readahead(mapping, &in->f_ra, in, index, nr_pages);
+ req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ nr_pages = min(req_pages, (unsigned)PIPE_BUFFERS);
/*
* Lookup the (hopefully) full range of pages we need.
*/
spd.nr_pages = find_get_pages_contig(mapping, index, nr_pages, pages);
+ index += spd.nr_pages;
/*
* If find_get_pages_contig() returned fewer pages than we needed,
- * allocate the rest and fill in the holes.
+ * readahead/allocate the rest and fill in the holes.
*/
+ if (spd.nr_pages < nr_pages)
+ page_cache_sync_readahead(mapping, &in->f_ra, in,
+ index, req_pages - spd.nr_pages);
+
error = 0;
- index += spd.nr_pages;
while (spd.nr_pages < nr_pages) {
/*
* Page could be there, find_get_pages_contig() breaks on
@@ -311,12 +307,6 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
page = find_get_page(mapping, index);
if (!page) {
/*
- * Make sure the read-ahead engine is notified
- * about this failure.
- */
- handle_ra_miss(mapping, &in->f_ra, index);
-
- /*
* page didn't exist, allocate one.
*/
page = page_cache_alloc_cold(mapping);
@@ -361,6 +351,10 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff);
page = pages[page_nr];
+ if (PageReadahead(page))
+ page_cache_async_readahead(mapping, &in->f_ra, in,
+ page, index, req_pages - page_nr);
+
/*
* If the page isn't uptodate, we may need to start io on it
*/
@@ -453,6 +447,7 @@ fill_it:
*/
while (page_nr < nr_pages)
page_cache_release(pages[page_nr++]);
+ in->f_ra.prev_index = index;
if (spd.nr_pages)
return splice_to_pipe(pipe, &spd);
@@ -599,7 +594,7 @@ find_page:
ret = add_to_page_cache_lru(page, mapping, index,
GFP_KERNEL);
if (unlikely(ret))
- goto out;
+ goto out_release;
}
ret = mapping->a_ops->prepare_write(file, page, offset, offset+this_len);
@@ -655,8 +650,9 @@ find_page:
*/
mark_page_accessed(page);
out:
- page_cache_release(page);
unlock_page(page);
+out_release:
+ page_cache_release(page);
out_ret:
return ret;
}
@@ -1004,7 +1000,7 @@ static long do_splice_to(struct file *in, loff_t *ppos,
* Description:
* This is a special case helper to splice directly between two
* points, without requiring an explicit pipe. Internally an allocated
- * pipe is cached in the process, and reused during the life time of
+ * pipe is cached in the process, and reused during the lifetime of
* that process.
*
*/
@@ -1228,6 +1224,33 @@ static long do_splice(struct file *in, loff_t __user *off_in,
}
/*
+ * Do a copy-from-user while holding the mmap_semaphore for reading, in a
+ * manner safe from deadlocking with simultaneous mmap() (grabbing mmap_sem
+ * for writing) and page faulting on the user memory pointed to by src.
+ * This assumes that we will very rarely hit the partial != 0 path, or this
+ * will not be a win.
+ */
+static int copy_from_user_mmap_sem(void *dst, const void __user *src, size_t n)
+{
+ int partial;
+
+ pagefault_disable();
+ partial = __copy_from_user_inatomic(dst, src, n);
+ pagefault_enable();
+
+ /*
+ * Didn't copy everything, drop the mmap_sem and do a faulting copy
+ */
+ if (unlikely(partial)) {
+ up_read(&current->mm->mmap_sem);
+ partial = copy_from_user(dst, src, n);
+ down_read(&current->mm->mmap_sem);
+ }
+
+ return partial;
+}
+
+/*
* Map an iov into an array of pages and offset/length tupples. With the
* partial_page structure, we can map several non-contiguous ranges into
* our ones pages[] map instead of splitting that operation into pieces.
@@ -1240,31 +1263,26 @@ static int get_iovec_page_array(const struct iovec __user *iov,
{
int buffers = 0, error = 0;
- /*
- * It's ok to take the mmap_sem for reading, even
- * across a "get_user()".
- */
down_read(&current->mm->mmap_sem);
while (nr_vecs) {
unsigned long off, npages;
+ struct iovec entry;
void __user *base;
size_t len;
int i;
- /*
- * Get user address base and length for this iovec.
- */
- error = get_user(base, &iov->iov_base);
- if (unlikely(error))
- break;
- error = get_user(len, &iov->iov_len);
- if (unlikely(error))
+ error = -EFAULT;
+ if (copy_from_user_mmap_sem(&entry, iov, sizeof(entry)))
break;
+ base = entry.iov_base;
+ len = entry.iov_len;
+
/*
* Sanity check this iovec. 0 read succeeds.
*/
+ error = 0;
if (unlikely(!len))
break;
error = -EFAULT;
diff --git a/fs/sysfs/bin.c b/fs/sysfs/bin.c
index 135353f..006fc64 100644
--- a/fs/sysfs/bin.c
+++ b/fs/sysfs/bin.c
@@ -1,9 +1,15 @@
/*
- * bin.c - binary file operations for sysfs.
+ * fs/sysfs/bin.c - sysfs binary file implementation
*
* Copyright (c) 2003 Patrick Mochel
* Copyright (c) 2003 Matthew Wilcox
* Copyright (c) 2004 Silicon Graphics, Inc.
+ * Copyright (c) 2007 SUSE Linux Products GmbH
+ * Copyright (c) 2007 Tejun Heo <teheo@suse.de>
+ *
+ * This file is released under the GPLv2.
+ *
+ * Please see Documentation/filesystems/sysfs.txt for more information.
*/
#undef DEBUG
@@ -14,9 +20,9 @@
#include <linux/kobject.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/mutex.h>
#include <asm/uaccess.h>
-#include <asm/semaphore.h>
#include "sysfs.h"
@@ -30,8 +36,8 @@ static int
fill_read(struct dentry *dentry, char *buffer, loff_t off, size_t count)
{
struct sysfs_dirent *attr_sd = dentry->d_fsdata;
- struct bin_attribute *attr = attr_sd->s_elem.bin_attr.bin_attr;
- struct kobject *kobj = attr_sd->s_parent->s_elem.dir.kobj;
+ struct bin_attribute *attr = attr_sd->s_bin_attr.bin_attr;
+ struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
int rc;
/* need attr_sd for attr, its parent for kobj */
@@ -87,8 +93,8 @@ static int
flush_write(struct dentry *dentry, char *buffer, loff_t offset, size_t count)
{
struct sysfs_dirent *attr_sd = dentry->d_fsdata;
- struct bin_attribute *attr = attr_sd->s_elem.bin_attr.bin_attr;
- struct kobject *kobj = attr_sd->s_parent->s_elem.dir.kobj;
+ struct bin_attribute *attr = attr_sd->s_bin_attr.bin_attr;
+ struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
int rc;
/* need attr_sd for attr, its parent for kobj */
@@ -140,8 +146,8 @@ static int mmap(struct file *file, struct vm_area_struct *vma)
{
struct bin_buffer *bb = file->private_data;
struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
- struct bin_attribute *attr = attr_sd->s_elem.bin_attr.bin_attr;
- struct kobject *kobj = attr_sd->s_parent->s_elem.dir.kobj;
+ struct bin_attribute *attr = attr_sd->s_bin_attr.bin_attr;
+ struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
int rc;
mutex_lock(&bb->mutex);
@@ -167,12 +173,12 @@ static int mmap(struct file *file, struct vm_area_struct *vma)
static int open(struct inode * inode, struct file * file)
{
struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
- struct bin_attribute *attr = attr_sd->s_elem.bin_attr.bin_attr;
+ struct bin_attribute *attr = attr_sd->s_bin_attr.bin_attr;
struct bin_buffer *bb = NULL;
int error;
- /* need attr_sd for attr */
- if (!sysfs_get_active(attr_sd))
+ /* binary file operations requires both @sd and its parent */
+ if (!sysfs_get_active_two(attr_sd))
return -ENODEV;
error = -EACCES;
@@ -193,13 +199,12 @@ static int open(struct inode * inode, struct file * file)
mutex_init(&bb->mutex);
file->private_data = bb;
- /* open succeeded, put active reference and pin attr_sd */
- sysfs_put_active(attr_sd);
- sysfs_get(attr_sd);
+ /* open succeeded, put active references */
+ sysfs_put_active_two(attr_sd);
return 0;
err_out:
- sysfs_put_active(attr_sd);
+ sysfs_put_active_two(attr_sd);
kfree(bb);
return error;
}
@@ -211,7 +216,6 @@ static int release(struct inode * inode, struct file * file)
if (bb->mmapped)
sysfs_put_active_two(attr_sd);
- sysfs_put(attr_sd);
kfree(bb->buffer);
kfree(bb);
return 0;
@@ -248,12 +252,7 @@ int sysfs_create_bin_file(struct kobject * kobj, struct bin_attribute * attr)
void sysfs_remove_bin_file(struct kobject * kobj, struct bin_attribute * attr)
{
- if (sysfs_hash_and_remove(kobj->sd, attr->attr.name) < 0) {
- printk(KERN_ERR "%s: "
- "bad dentry or inode or no such file: \"%s\"\n",
- __FUNCTION__, attr->attr.name);
- dump_stack();
- }
+ sysfs_hash_and_remove(kobj->sd, attr->attr.name);
}
EXPORT_SYMBOL_GPL(sysfs_create_bin_file);
diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
index aee966c..9161db4 100644
--- a/fs/sysfs/dir.c
+++ b/fs/sysfs/dir.c
@@ -1,5 +1,13 @@
/*
- * dir.c - Operations for sysfs directories.
+ * fs/sysfs/dir.c - sysfs core and dir operation implementation
+ *
+ * Copyright (c) 2001-3 Patrick Mochel
+ * Copyright (c) 2007 SUSE Linux Products GmbH
+ * Copyright (c) 2007 Tejun Heo <teheo@suse.de>
+ *
+ * This file is released under the GPLv2.
+ *
+ * Please see Documentation/filesystems/sysfs.txt for more information.
*/
#undef DEBUG
@@ -11,10 +19,11 @@
#include <linux/namei.h>
#include <linux/idr.h>
#include <linux/completion.h>
-#include <asm/semaphore.h>
+#include <linux/mutex.h>
#include "sysfs.h"
DEFINE_MUTEX(sysfs_mutex);
+DEFINE_MUTEX(sysfs_rename_mutex);
spinlock_t sysfs_assoc_lock = SPIN_LOCK_UNLOCKED;
static spinlock_t sysfs_ino_lock = SPIN_LOCK_UNLOCKED;
@@ -25,18 +34,28 @@ static DEFINE_IDA(sysfs_ino_ida);
* @sd: sysfs_dirent of interest
*
* Link @sd into its sibling list which starts from
- * sd->s_parent->s_children.
+ * sd->s_parent->s_dir.children.
*
* Locking:
* mutex_lock(sysfs_mutex)
*/
-void sysfs_link_sibling(struct sysfs_dirent *sd)
+static void sysfs_link_sibling(struct sysfs_dirent *sd)
{
struct sysfs_dirent *parent_sd = sd->s_parent;
+ struct sysfs_dirent **pos;
BUG_ON(sd->s_sibling);
- sd->s_sibling = parent_sd->s_children;
- parent_sd->s_children = sd;
+
+ /* Store directory entries in order by ino. This allows
+ * readdir to properly restart without having to add a
+ * cursor into the s_dir.children list.
+ */
+ for (pos = &parent_sd->s_dir.children; *pos; pos = &(*pos)->s_sibling) {
+ if (sd->s_ino < (*pos)->s_ino)
+ break;
+ }
+ sd->s_sibling = *pos;
+ *pos = sd;
}
/**
@@ -44,16 +63,17 @@ void sysfs_link_sibling(struct sysfs_dirent *sd)
* @sd: sysfs_dirent of interest
*
* Unlink @sd from its sibling list which starts from
- * sd->s_parent->s_children.
+ * sd->s_parent->s_dir.children.
*
* Locking:
* mutex_lock(sysfs_mutex)
*/
-void sysfs_unlink_sibling(struct sysfs_dirent *sd)
+static void sysfs_unlink_sibling(struct sysfs_dirent *sd)
{
struct sysfs_dirent **pos;
- for (pos = &sd->s_parent->s_children; *pos; pos = &(*pos)->s_sibling) {
+ for (pos = &sd->s_parent->s_dir.children; *pos;
+ pos = &(*pos)->s_sibling) {
if (*pos == sd) {
*pos = sd->s_sibling;
sd->s_sibling = NULL;
@@ -67,96 +87,39 @@ void sysfs_unlink_sibling(struct sysfs_dirent *sd)
* @sd: sysfs_dirent of interest
*
* Get dentry for @sd. Dentry is looked up if currently not
- * present. This function climbs sysfs_dirent tree till it
- * reaches a sysfs_dirent with valid dentry attached and descends
- * down from there looking up dentry for each step.
+ * present. This function descends from the root looking up
+ * dentry for each step.
*
* LOCKING:
- * Kernel thread context (may sleep)
+ * mutex_lock(sysfs_rename_mutex)
*
* RETURNS:
* Pointer to found dentry on success, ERR_PTR() value on error.
*/
struct dentry *sysfs_get_dentry(struct sysfs_dirent *sd)
{
- struct sysfs_dirent *cur;
- struct dentry *parent_dentry, *dentry;
- int i, depth;
+ struct dentry *dentry = dget(sysfs_sb->s_root);
- /* Find the first parent which has valid s_dentry and get the
- * dentry.
- */
- mutex_lock(&sysfs_mutex);
- restart0:
- spin_lock(&sysfs_assoc_lock);
- restart1:
- spin_lock(&dcache_lock);
-
- dentry = NULL;
- depth = 0;
- cur = sd;
- while (!cur->s_dentry || !cur->s_dentry->d_inode) {
- if (cur->s_flags & SYSFS_FLAG_REMOVED) {
- dentry = ERR_PTR(-ENOENT);
- depth = 0;
- break;
- }
- cur = cur->s_parent;
- depth++;
- }
- if (!IS_ERR(dentry))
- dentry = dget_locked(cur->s_dentry);
-
- spin_unlock(&dcache_lock);
- spin_unlock(&sysfs_assoc_lock);
+ while (dentry->d_fsdata != sd) {
+ struct sysfs_dirent *cur;
+ struct dentry *parent;
- /* from the found dentry, look up depth times */
- while (depth--) {
- /* find and get depth'th ancestor */
- for (cur = sd, i = 0; cur && i < depth; i++)
+ /* find the first ancestor which hasn't been looked up */
+ cur = sd;
+ while (cur->s_parent != dentry->d_fsdata)
cur = cur->s_parent;
- /* This can happen if tree structure was modified due
- * to move/rename. Restart.
- */
- if (i != depth) {
- dput(dentry);
- goto restart0;
- }
-
- sysfs_get(cur);
-
- mutex_unlock(&sysfs_mutex);
-
/* look it up */
- parent_dentry = dentry;
- dentry = lookup_one_len_kern(cur->s_name, parent_dentry,
+ parent = dentry;
+ mutex_lock(&parent->d_inode->i_mutex);
+ dentry = lookup_one_len_kern(cur->s_name, parent,
strlen(cur->s_name));
- dput(parent_dentry);
-
- if (IS_ERR(dentry)) {
- sysfs_put(cur);
- return dentry;
- }
-
- mutex_lock(&sysfs_mutex);
- spin_lock(&sysfs_assoc_lock);
-
- /* This, again, can happen if tree structure has
- * changed and we looked up the wrong thing. Restart.
- */
- if (cur->s_dentry != dentry) {
- dput(dentry);
- sysfs_put(cur);
- goto restart1;
- }
-
- spin_unlock(&sysfs_assoc_lock);
+ mutex_unlock(&parent->d_inode->i_mutex);
+ dput(parent);
- sysfs_put(cur);
+ if (IS_ERR(dentry))
+ break;
}
-
- mutex_unlock(&sysfs_mutex);
return dentry;
}
@@ -319,7 +282,7 @@ void release_sysfs_dirent(struct sysfs_dirent * sd)
parent_sd = sd->s_parent;
if (sysfs_type(sd) == SYSFS_KOBJ_LINK)
- sysfs_put(sd->s_elem.symlink.target_sd);
+ sysfs_put(sd->s_symlink.target_sd);
if (sysfs_type(sd) & SYSFS_COPY_NAME)
kfree(sd->s_name);
kfree(sd->s_iattr);
@@ -335,22 +298,7 @@ static void sysfs_d_iput(struct dentry * dentry, struct inode * inode)
{
struct sysfs_dirent * sd = dentry->d_fsdata;
- if (sd) {
- /* sd->s_dentry is protected with sysfs_assoc_lock.
- * This allows sysfs_drop_dentry() to dereference it.
- */
- spin_lock(&sysfs_assoc_lock);
-
- /* The dentry might have been deleted or another
- * lookup could have happened updating sd->s_dentry to
- * point the new dentry. Ignore if it isn't pointing
- * to this dentry.
- */
- if (sd->s_dentry == dentry)
- sd->s_dentry = NULL;
- spin_unlock(&sysfs_assoc_lock);
- sysfs_put(sd);
- }
+ sysfs_put(sd);
iput(inode);
}
@@ -361,24 +309,23 @@ static struct dentry_operations sysfs_dentry_ops = {
struct sysfs_dirent *sysfs_new_dirent(const char *name, umode_t mode, int type)
{
char *dup_name = NULL;
- struct sysfs_dirent *sd = NULL;
+ struct sysfs_dirent *sd;
if (type & SYSFS_COPY_NAME) {
name = dup_name = kstrdup(name, GFP_KERNEL);
if (!name)
- goto err_out;
+ return NULL;
}
sd = kmem_cache_zalloc(sysfs_dir_cachep, GFP_KERNEL);
if (!sd)
- goto err_out;
+ goto err_out1;
if (sysfs_alloc_ino(&sd->s_ino))
- goto err_out;
+ goto err_out2;
atomic_set(&sd->s_count, 1);
atomic_set(&sd->s_active, 0);
- atomic_set(&sd->s_event, 1);
sd->s_name = name;
sd->s_mode = mode;
@@ -386,36 +333,13 @@ struct sysfs_dirent *sysfs_new_dirent(const char *name, umode_t mode, int type)
return sd;
- err_out:
- kfree(dup_name);
+ err_out2:
kmem_cache_free(sysfs_dir_cachep, sd);
+ err_out1:
+ kfree(dup_name);
return NULL;
}
-/**
- * sysfs_attach_dentry - associate sysfs_dirent with dentry
- * @sd: target sysfs_dirent
- * @dentry: dentry to associate
- *
- * Associate @sd with @dentry. This is protected by
- * sysfs_assoc_lock to avoid race with sysfs_d_iput().
- *
- * LOCKING:
- * mutex_lock(sysfs_mutex)
- */
-static void sysfs_attach_dentry(struct sysfs_dirent *sd, struct dentry *dentry)
-{
- dentry->d_op = &sysfs_dentry_ops;
- dentry->d_fsdata = sysfs_get(sd);
-
- /* protect sd->s_dentry against sysfs_d_iput */
- spin_lock(&sysfs_assoc_lock);
- sd->s_dentry = dentry;
- spin_unlock(&sysfs_assoc_lock);
-
- d_rehash(dentry);
-}
-
static int sysfs_ilookup_test(struct inode *inode, void *arg)
{
struct sysfs_dirent *sd = arg;
@@ -479,10 +403,8 @@ void sysfs_addrm_start(struct sysfs_addrm_cxt *acxt,
* @sd: sysfs_dirent to be added
*
* Get @acxt->parent_sd and set sd->s_parent to it and increment
- * nlink of parent inode if @sd is a directory. @sd is NOT
- * linked into the children list of the parent. The caller
- * should invoke sysfs_link_sibling() after this function
- * completes if @sd needs to be on the children list.
+ * nlink of parent inode if @sd is a directory and link into the
+ * children list of the parent.
*
* This function should be called between calls to
* sysfs_addrm_start() and sysfs_addrm_finish() and should be
@@ -490,15 +412,30 @@ void sysfs_addrm_start(struct sysfs_addrm_cxt *acxt,
*
* LOCKING:
* Determined by sysfs_addrm_start().
+ *
+ * RETURNS:
+ * 0 on success, -EEXIST if entry with the given name already
+ * exists.
*/
-void sysfs_add_one(struct sysfs_addrm_cxt *acxt, struct sysfs_dirent *sd)
+int sysfs_add_one(struct sysfs_addrm_cxt *acxt, struct sysfs_dirent *sd)
{
+ if (sysfs_find_dirent(acxt->parent_sd, sd->s_name)) {
+ printk(KERN_WARNING "sysfs: duplicate filename '%s' "
+ "can not be created\n", sd->s_name);
+ WARN_ON(1);
+ return -EEXIST;
+ }
+
sd->s_parent = sysfs_get(acxt->parent_sd);
if (sysfs_type(sd) == SYSFS_DIR && acxt->parent_inode)
inc_nlink(acxt->parent_inode);
acxt->cnt++;
+
+ sysfs_link_sibling(sd);
+
+ return 0;
}
/**
@@ -507,9 +444,7 @@ void sysfs_add_one(struct sysfs_addrm_cxt *acxt, struct sysfs_dirent *sd)
* @sd: sysfs_dirent to be added
*
* Mark @sd removed and drop nlink of parent inode if @sd is a
- * directory. @sd is NOT unlinked from the children list of the
- * parent. The caller is repsonsible for removing @sd from the
- * children list before calling this function.
+ * directory. @sd is unlinked from the children list.
*
* This function should be called between calls to
* sysfs_addrm_start() and sysfs_addrm_finish() and should be
@@ -520,7 +455,9 @@ void sysfs_add_one(struct sysfs_addrm_cxt *acxt, struct sysfs_dirent *sd)
*/
void sysfs_remove_one(struct sysfs_addrm_cxt *acxt, struct sysfs_dirent *sd)
{
- BUG_ON(sd->s_sibling || (sd->s_flags & SYSFS_FLAG_REMOVED));
+ BUG_ON(sd->s_flags & SYSFS_FLAG_REMOVED);
+
+ sysfs_unlink_sibling(sd);
sd->s_flags |= SYSFS_FLAG_REMOVED;
sd->s_sibling = acxt->removed;
@@ -539,53 +476,49 @@ void sysfs_remove_one(struct sysfs_addrm_cxt *acxt, struct sysfs_dirent *sd)
* Drop dentry for @sd. @sd must have been unlinked from its
* parent on entry to this function such that it can't be looked
* up anymore.
- *
- * @sd->s_dentry which is protected with sysfs_assoc_lock points
- * to the currently associated dentry but we're not holding a
- * reference to it and racing with dput(). Grab dcache_lock and
- * verify dentry before dropping it. If @sd->s_dentry is NULL or
- * dput() beats us, no need to bother.
*/
static void sysfs_drop_dentry(struct sysfs_dirent *sd)
{
- struct dentry *dentry = NULL;
struct inode *inode;
+ struct dentry *dentry;
+
+ inode = ilookup(sysfs_sb, sd->s_ino);
+ if (!inode)
+ return;
- /* We're not holding a reference to ->s_dentry dentry but the
- * field will stay valid as long as sysfs_assoc_lock is held.
+ /* Drop any existing dentries associated with sd.
+ *
+ * For the dentry to be properly freed we need to grab a
+ * reference to the dentry under the dcache lock, unhash it,
+ * and then put it. The playing with the dentry count allows
+ * dput to immediately free the dentry if it is not in use.
*/
- spin_lock(&sysfs_assoc_lock);
+repeat:
spin_lock(&dcache_lock);
-
- /* drop dentry if it's there and dput() didn't kill it yet */
- if (sd->s_dentry && sd->s_dentry->d_inode) {
- dentry = dget_locked(sd->s_dentry);
+ list_for_each_entry(dentry, &inode->i_dentry, d_alias) {
+ if (d_unhashed(dentry))
+ continue;
+ dget_locked(dentry);
spin_lock(&dentry->d_lock);
__d_drop(dentry);
spin_unlock(&dentry->d_lock);
+ spin_unlock(&dcache_lock);
+ dput(dentry);
+ goto repeat;
}
-
spin_unlock(&dcache_lock);
- spin_unlock(&sysfs_assoc_lock);
-
- /* dentries for shadowed inodes are pinned, unpin */
- if (dentry && sysfs_is_shadowed_inode(dentry->d_inode))
- dput(dentry);
- dput(dentry);
/* adjust nlink and update timestamp */
- inode = ilookup(sysfs_sb, sd->s_ino);
- if (inode) {
- mutex_lock(&inode->i_mutex);
+ mutex_lock(&inode->i_mutex);
- inode->i_ctime = CURRENT_TIME;
+ inode->i_ctime = CURRENT_TIME;
+ drop_nlink(inode);
+ if (sysfs_type(sd) == SYSFS_DIR)
drop_nlink(inode);
- if (sysfs_type(sd) == SYSFS_DIR)
- drop_nlink(inode);
- mutex_unlock(&inode->i_mutex);
- iput(inode);
- }
+ mutex_unlock(&inode->i_mutex);
+
+ iput(inode);
}
/**
@@ -598,11 +531,8 @@ static void sysfs_drop_dentry(struct sysfs_dirent *sd)
*
* LOCKING:
* All mutexes acquired by sysfs_addrm_start() are released.
- *
- * RETURNS:
- * Number of added/removed sysfs_dirents since sysfs_addrm_start().
*/
-int sysfs_addrm_finish(struct sysfs_addrm_cxt *acxt)
+void sysfs_addrm_finish(struct sysfs_addrm_cxt *acxt)
{
/* release resources acquired by sysfs_addrm_start() */
mutex_unlock(&sysfs_mutex);
@@ -628,8 +558,6 @@ int sysfs_addrm_finish(struct sysfs_addrm_cxt *acxt)
sysfs_deactivate(sd);
sysfs_put(sd);
}
-
- return acxt->cnt;
}
/**
@@ -650,8 +578,8 @@ struct sysfs_dirent *sysfs_find_dirent(struct sysfs_dirent *parent_sd,
{
struct sysfs_dirent *sd;
- for (sd = parent_sd->s_children; sd; sd = sd->s_sibling)
- if (sysfs_type(sd) && !strcmp(sd->s_name, name))
+ for (sd = parent_sd->s_dir.children; sd; sd = sd->s_sibling)
+ if (!strcmp(sd->s_name, name))
return sd;
return NULL;
}
@@ -689,26 +617,25 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
umode_t mode = S_IFDIR| S_IRWXU | S_IRUGO | S_IXUGO;
struct sysfs_addrm_cxt acxt;
struct sysfs_dirent *sd;
+ int rc;
/* allocate */
sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
if (!sd)
return -ENOMEM;
- sd->s_elem.dir.kobj = kobj;
+ sd->s_dir.kobj = kobj;
/* link in */
sysfs_addrm_start(&acxt, parent_sd);
- if (!sysfs_find_dirent(parent_sd, name)) {
- sysfs_add_one(&acxt, sd);
- sysfs_link_sibling(sd);
- }
- if (sysfs_addrm_finish(&acxt)) {
+ rc = sysfs_add_one(&acxt, sd);
+ sysfs_addrm_finish(&acxt);
+
+ if (rc == 0)
*p_sd = sd;
- return 0;
- }
+ else
+ sysfs_put(sd);
- sysfs_put(sd);
- return -EEXIST;
+ return rc;
}
int sysfs_create_subdir(struct kobject *kobj, const char *name,
@@ -720,24 +647,18 @@ int sysfs_create_subdir(struct kobject *kobj, const char *name,
/**
* sysfs_create_dir - create a directory for an object.
* @kobj: object we're creating directory for.
- * @shadow_parent: parent object.
*/
-int sysfs_create_dir(struct kobject *kobj,
- struct sysfs_dirent *shadow_parent_sd)
+int sysfs_create_dir(struct kobject * kobj)
{
struct sysfs_dirent *parent_sd, *sd;
int error = 0;
BUG_ON(!kobj);
- if (shadow_parent_sd)
- parent_sd = shadow_parent_sd;
- else if (kobj->parent)
+ if (kobj->parent)
parent_sd = kobj->parent->sd;
- else if (sysfs_mount && sysfs_mount->mnt_sb)
- parent_sd = sysfs_mount->mnt_sb->s_root->d_fsdata;
else
- return -EFAULT;
+ parent_sd = &sysfs_root;
error = create_dir(kobj, parent_sd, kobject_name(kobj), &sd);
if (!error)
@@ -745,76 +666,38 @@ int sysfs_create_dir(struct kobject *kobj,
return error;
}
-static int sysfs_count_nlink(struct sysfs_dirent *sd)
-{
- struct sysfs_dirent *child;
- int nr = 0;
-
- for (child = sd->s_children; child; child = child->s_sibling)
- if (sysfs_type(child) == SYSFS_DIR)
- nr++;
- return nr + 2;
-}
-
static struct dentry * sysfs_lookup(struct inode *dir, struct dentry *dentry,
struct nameidata *nd)
{
- struct sysfs_dirent * parent_sd = dentry->d_parent->d_fsdata;
- struct sysfs_dirent * sd;
- struct bin_attribute *bin_attr;
+ struct dentry *ret = NULL;
+ struct sysfs_dirent *parent_sd = dentry->d_parent->d_fsdata;
+ struct sysfs_dirent *sd;
struct inode *inode;
- int found = 0;
- for (sd = parent_sd->s_children; sd; sd = sd->s_sibling) {
- if (sysfs_type(sd) &&
- !strcmp(sd->s_name, dentry->d_name.name)) {
- found = 1;
- break;
- }
- }
+ mutex_lock(&sysfs_mutex);
+
+ sd = sysfs_find_dirent(parent_sd, dentry->d_name.name);
/* no such entry */
- if (!found)
- return NULL;
+ if (!sd)
+ goto out_unlock;
/* attach dentry and inode */
inode = sysfs_get_inode(sd);
- if (!inode)
- return ERR_PTR(-ENOMEM);
-
- mutex_lock(&sysfs_mutex);
-
- if (inode->i_state & I_NEW) {
- /* initialize inode according to type */
- switch (sysfs_type(sd)) {
- case SYSFS_DIR:
- inode->i_op = &sysfs_dir_inode_operations;
- inode->i_fop = &sysfs_dir_operations;
- inode->i_nlink = sysfs_count_nlink(sd);
- break;
- case SYSFS_KOBJ_ATTR:
- inode->i_size = PAGE_SIZE;
- inode->i_fop = &sysfs_file_operations;
- break;
- case SYSFS_KOBJ_BIN_ATTR:
- bin_attr = sd->s_elem.bin_attr.bin_attr;
- inode->i_size = bin_attr->size;
- inode->i_fop = &bin_fops;
- break;
- case SYSFS_KOBJ_LINK:
- inode->i_op = &sysfs_symlink_inode_operations;
- break;
- default:
- BUG();
- }
+ if (!inode) {
+ ret = ERR_PTR(-ENOMEM);
+ goto out_unlock;
}
- sysfs_instantiate(dentry, inode);
- sysfs_attach_dentry(sd, dentry);
+ /* instantiate and hash dentry */
+ dentry->d_op = &sysfs_dentry_ops;
+ dentry->d_fsdata = sysfs_get(sd);
+ d_instantiate(dentry, inode);
+ d_rehash(dentry);
+ out_unlock:
mutex_unlock(&sysfs_mutex);
-
- return NULL;
+ return ret;
}
const struct inode_operations sysfs_dir_inode_operations = {
@@ -827,7 +710,6 @@ static void remove_dir(struct sysfs_dirent *sd)
struct sysfs_addrm_cxt acxt;
sysfs_addrm_start(&acxt, sd->s_parent);
- sysfs_unlink_sibling(sd);
sysfs_remove_one(&acxt, sd);
sysfs_addrm_finish(&acxt);
}
@@ -848,15 +730,13 @@ static void __sysfs_remove_dir(struct sysfs_dirent *dir_sd)
pr_debug("sysfs %s: removing dir\n", dir_sd->s_name);
sysfs_addrm_start(&acxt, dir_sd);
- pos = &dir_sd->s_children;
+ pos = &dir_sd->s_dir.children;
while (*pos) {
struct sysfs_dirent *sd = *pos;
- if (sysfs_type(sd) && sysfs_type(sd) != SYSFS_DIR) {
- *pos = sd->s_sibling;
- sd->s_sibling = NULL;
+ if (sysfs_type(sd) != SYSFS_DIR)
sysfs_remove_one(&acxt, sd);
- } else
+ else
pos = &(*pos)->s_sibling;
}
sysfs_addrm_finish(&acxt);
@@ -884,90 +764,68 @@ void sysfs_remove_dir(struct kobject * kobj)
__sysfs_remove_dir(sd);
}
-int sysfs_rename_dir(struct kobject *kobj, struct sysfs_dirent *new_parent_sd,
- const char *new_name)
+int sysfs_rename_dir(struct kobject * kobj, const char *new_name)
{
struct sysfs_dirent *sd = kobj->sd;
- struct dentry *new_parent = NULL;
+ struct dentry *parent = NULL;
struct dentry *old_dentry = NULL, *new_dentry = NULL;
const char *dup_name = NULL;
int error;
- /* get dentries */
+ mutex_lock(&sysfs_rename_mutex);
+
+ error = 0;
+ if (strcmp(sd->s_name, new_name) == 0)
+ goto out; /* nothing to rename */
+
+ /* get the original dentry */
old_dentry = sysfs_get_dentry(sd);
if (IS_ERR(old_dentry)) {
error = PTR_ERR(old_dentry);
- goto out_dput;
- }
-
- new_parent = sysfs_get_dentry(new_parent_sd);
- if (IS_ERR(new_parent)) {
- error = PTR_ERR(new_parent);
- goto out_dput;
+ goto out;
}
- /* lock new_parent and get dentry for new name */
- mutex_lock(&new_parent->d_inode->i_mutex);
+ parent = old_dentry->d_parent;
- new_dentry = lookup_one_len(new_name, new_parent, strlen(new_name));
- if (IS_ERR(new_dentry)) {
- error = PTR_ERR(new_dentry);
- goto out_unlock;
- }
+ /* lock parent and get dentry for new name */
+ mutex_lock(&parent->d_inode->i_mutex);
+ mutex_lock(&sysfs_mutex);
- /* By allowing two different directories with the same
- * d_parent we allow this routine to move between different
- * shadows of the same directory
- */
- error = -EINVAL;
- if (old_dentry->d_parent->d_inode != new_parent->d_inode ||
- new_dentry->d_parent->d_inode != new_parent->d_inode ||
- old_dentry == new_dentry)
+ error = -EEXIST;
+ if (sysfs_find_dirent(sd->s_parent, new_name))
goto out_unlock;
- error = -EEXIST;
- if (new_dentry->d_inode)
+ error = -ENOMEM;
+ new_dentry = d_alloc_name(parent, new_name);
+ if (!new_dentry)
goto out_unlock;
/* rename kobject and sysfs_dirent */
error = -ENOMEM;
new_name = dup_name = kstrdup(new_name, GFP_KERNEL);
if (!new_name)
- goto out_drop;
+ goto out_unlock;
error = kobject_set_name(kobj, "%s", new_name);
if (error)
- goto out_drop;
+ goto out_unlock;
dup_name = sd->s_name;
sd->s_name = new_name;
- /* move under the new parent */
+ /* rename */
d_add(new_dentry, NULL);
- d_move(sd->s_dentry, new_dentry);
-
- mutex_lock(&sysfs_mutex);
-
- sysfs_unlink_sibling(sd);
- sysfs_get(new_parent_sd);
- sysfs_put(sd->s_parent);
- sd->s_parent = new_parent_sd;
- sysfs_link_sibling(sd);
-
- mutex_unlock(&sysfs_mutex);
+ d_move(old_dentry, new_dentry);
error = 0;
- goto out_unlock;
-
- out_drop:
- d_drop(new_dentry);
out_unlock:
- mutex_unlock(&new_parent->d_inode->i_mutex);
- out_dput:
+ mutex_unlock(&sysfs_mutex);
+ mutex_unlock(&parent->d_inode->i_mutex);
kfree(dup_name);
- dput(new_parent);
dput(old_dentry);
dput(new_dentry);
+ out:
+ mutex_unlock(&sysfs_rename_mutex);
return error;
}
@@ -979,96 +837,69 @@ int sysfs_move_dir(struct kobject *kobj, struct kobject *new_parent_kobj)
struct dentry *old_dentry = NULL, *new_dentry = NULL;
int error;
+ mutex_lock(&sysfs_rename_mutex);
BUG_ON(!sd->s_parent);
new_parent_sd = new_parent_kobj->sd ? new_parent_kobj->sd : &sysfs_root;
+ error = 0;
+ if (sd->s_parent == new_parent_sd)
+ goto out; /* nothing to move */
+
/* get dentries */
old_dentry = sysfs_get_dentry(sd);
if (IS_ERR(old_dentry)) {
error = PTR_ERR(old_dentry);
- goto out_dput;
+ goto out;
}
- old_parent = sd->s_parent->s_dentry;
+ old_parent = old_dentry->d_parent;
new_parent = sysfs_get_dentry(new_parent_sd);
if (IS_ERR(new_parent)) {
error = PTR_ERR(new_parent);
- goto out_dput;
+ goto out;
}
- if (old_parent->d_inode == new_parent->d_inode) {
- error = 0;
- goto out_dput; /* nothing to move */
- }
again:
mutex_lock(&old_parent->d_inode->i_mutex);
if (!mutex_trylock(&new_parent->d_inode->i_mutex)) {
mutex_unlock(&old_parent->d_inode->i_mutex);
goto again;
}
+ mutex_lock(&sysfs_mutex);
- new_dentry = lookup_one_len(kobj->name, new_parent, strlen(kobj->name));
- if (IS_ERR(new_dentry)) {
- error = PTR_ERR(new_dentry);
+ error = -EEXIST;
+ if (sysfs_find_dirent(new_parent_sd, sd->s_name))
goto out_unlock;
- } else
- error = 0;
+
+ error = -ENOMEM;
+ new_dentry = d_alloc_name(new_parent, sd->s_name);
+ if (!new_dentry)
+ goto out_unlock;
+
+ error = 0;
d_add(new_dentry, NULL);
- d_move(sd->s_dentry, new_dentry);
+ d_move(old_dentry, new_dentry);
dput(new_dentry);
/* Remove from old parent's list and insert into new parent's list. */
- mutex_lock(&sysfs_mutex);
-
sysfs_unlink_sibling(sd);
sysfs_get(new_parent_sd);
sysfs_put(sd->s_parent);
sd->s_parent = new_parent_sd;
sysfs_link_sibling(sd);
- mutex_unlock(&sysfs_mutex);
-
out_unlock:
+ mutex_unlock(&sysfs_mutex);
mutex_unlock(&new_parent->d_inode->i_mutex);
mutex_unlock(&old_parent->d_inode->i_mutex);
- out_dput:
+ out:
dput(new_parent);
dput(old_dentry);
dput(new_dentry);
+ mutex_unlock(&sysfs_rename_mutex);
return error;
}
-static int sysfs_dir_open(struct inode *inode, struct file *file)
-{
- struct dentry * dentry = file->f_path.dentry;
- struct sysfs_dirent * parent_sd = dentry->d_fsdata;
- struct sysfs_dirent * sd;
-
- sd = sysfs_new_dirent("_DIR_", 0, 0);
- if (sd) {
- mutex_lock(&sysfs_mutex);
- sd->s_parent = sysfs_get(parent_sd);
- sysfs_link_sibling(sd);
- mutex_unlock(&sysfs_mutex);
- }
-
- file->private_data = sd;
- return sd ? 0 : -ENOMEM;
-}
-
-static int sysfs_dir_close(struct inode *inode, struct file *file)
-{
- struct sysfs_dirent * cursor = file->private_data;
-
- mutex_lock(&sysfs_mutex);
- sysfs_unlink_sibling(cursor);
- mutex_unlock(&sysfs_mutex);
-
- release_sysfs_dirent(cursor);
-
- return 0;
-}
-
/* Relationship between s_mode and the DT_xxx types */
static inline unsigned char dt_type(struct sysfs_dirent *sd)
{
@@ -1079,232 +910,51 @@ static int sysfs_readdir(struct file * filp, void * dirent, filldir_t filldir)
{
struct dentry *dentry = filp->f_path.dentry;
struct sysfs_dirent * parent_sd = dentry->d_fsdata;
- struct sysfs_dirent *cursor = filp->private_data;
- struct sysfs_dirent **pos;
+ struct sysfs_dirent *pos;
ino_t ino;
- int i = filp->f_pos;
- switch (i) {
- case 0:
- ino = parent_sd->s_ino;
- if (filldir(dirent, ".", 1, i, ino, DT_DIR) < 0)
- break;
+ if (filp->f_pos == 0) {
+ ino = parent_sd->s_ino;
+ if (filldir(dirent, ".", 1, filp->f_pos, ino, DT_DIR) == 0)
filp->f_pos++;
- i++;
- /* fallthrough */
- case 1:
- if (parent_sd->s_parent)
- ino = parent_sd->s_parent->s_ino;
- else
- ino = parent_sd->s_ino;
- if (filldir(dirent, "..", 2, i, ino, DT_DIR) < 0)
- break;
+ }
+ if (filp->f_pos == 1) {
+ if (parent_sd->s_parent)
+ ino = parent_sd->s_parent->s_ino;
+ else
+ ino = parent_sd->s_ino;
+ if (filldir(dirent, "..", 2, filp->f_pos, ino, DT_DIR) == 0)
filp->f_pos++;
- i++;
- /* fallthrough */
- default:
- mutex_lock(&sysfs_mutex);
-
- pos = &parent_sd->s_children;
- while (*pos != cursor)
- pos = &(*pos)->s_sibling;
-
- /* unlink cursor */
- *pos = cursor->s_sibling;
-
- if (filp->f_pos == 2)
- pos = &parent_sd->s_children;
-
- for ( ; *pos; pos = &(*pos)->s_sibling) {
- struct sysfs_dirent *next = *pos;
- const char * name;
- int len;
-
- if (!sysfs_type(next))
- continue;
-
- name = next->s_name;
- len = strlen(name);
- ino = next->s_ino;
-
- if (filldir(dirent, name, len, filp->f_pos, ino,
- dt_type(next)) < 0)
- break;
-
- filp->f_pos++;
- }
+ }
+ if ((filp->f_pos > 1) && (filp->f_pos < INT_MAX)) {
+ mutex_lock(&sysfs_mutex);
- /* put cursor back in */
- cursor->s_sibling = *pos;
- *pos = cursor;
+ /* Skip the dentries we have already reported */
+ pos = parent_sd->s_dir.children;
+ while (pos && (filp->f_pos > pos->s_ino))
+ pos = pos->s_sibling;
- mutex_unlock(&sysfs_mutex);
- }
- return 0;
-}
+ for ( ; pos; pos = pos->s_sibling) {
+ const char * name;
+ int len;
-static loff_t sysfs_dir_lseek(struct file * file, loff_t offset, int origin)
-{
- struct dentry * dentry = file->f_path.dentry;
+ name = pos->s_name;
+ len = strlen(name);
+ filp->f_pos = ino = pos->s_ino;
- switch (origin) {
- case 1:
- offset += file->f_pos;
- case 0:
- if (offset >= 0)
+ if (filldir(dirent, name, len, filp->f_pos, ino,
+ dt_type(pos)) < 0)
break;
- default:
- return -EINVAL;
- }
- if (offset != file->f_pos) {
- mutex_lock(&sysfs_mutex);
-
- file->f_pos = offset;
- if (file->f_pos >= 2) {
- struct sysfs_dirent *sd = dentry->d_fsdata;
- struct sysfs_dirent *cursor = file->private_data;
- struct sysfs_dirent **pos;
- loff_t n = file->f_pos - 2;
-
- sysfs_unlink_sibling(cursor);
-
- pos = &sd->s_children;
- while (n && *pos) {
- struct sysfs_dirent *next = *pos;
- if (sysfs_type(next))
- n--;
- pos = &(*pos)->s_sibling;
- }
-
- cursor->s_sibling = *pos;
- *pos = cursor;
}
-
+ if (!pos)
+ filp->f_pos = INT_MAX;
mutex_unlock(&sysfs_mutex);
}
-
- return offset;
-}
-
-
-/**
- * sysfs_make_shadowed_dir - Setup so a directory can be shadowed
- * @kobj: object we're creating shadow of.
- */
-
-int sysfs_make_shadowed_dir(struct kobject *kobj,
- void * (*follow_link)(struct dentry *, struct nameidata *))
-{
- struct dentry *dentry;
- struct inode *inode;
- struct inode_operations *i_op;
-
- /* get dentry for @kobj->sd, dentry of a shadowed dir is pinned */
- dentry = sysfs_get_dentry(kobj->sd);
- if (IS_ERR(dentry))
- return PTR_ERR(dentry);
-
- inode = dentry->d_inode;
- if (inode->i_op != &sysfs_dir_inode_operations) {
- dput(dentry);
- return -EINVAL;
- }
-
- i_op = kmalloc(sizeof(*i_op), GFP_KERNEL);
- if (!i_op)
- return -ENOMEM;
-
- memcpy(i_op, &sysfs_dir_inode_operations, sizeof(*i_op));
- i_op->follow_link = follow_link;
-
- /* Locking of inode->i_op?
- * Since setting i_op is a single word write and they
- * are atomic we should be ok here.
- */
- inode->i_op = i_op;
return 0;
}
-/**
- * sysfs_create_shadow_dir - create a shadow directory for an object.
- * @kobj: object we're creating directory for.
- *
- * sysfs_make_shadowed_dir must already have been called on this
- * directory.
- */
-
-struct sysfs_dirent *sysfs_create_shadow_dir(struct kobject *kobj)
-{
- struct sysfs_dirent *parent_sd = kobj->sd->s_parent;
- struct dentry *dir, *parent, *shadow;
- struct inode *inode;
- struct sysfs_dirent *sd;
- struct sysfs_addrm_cxt acxt;
-
- dir = sysfs_get_dentry(kobj->sd);
- if (IS_ERR(dir)) {
- sd = (void *)dir;
- goto out;
- }
- parent = dir->d_parent;
-
- inode = dir->d_inode;
- sd = ERR_PTR(-EINVAL);
- if (!sysfs_is_shadowed_inode(inode))
- goto out_dput;
-
- shadow = d_alloc(parent, &dir->d_name);
- if (!shadow)
- goto nomem;
-
- sd = sysfs_new_dirent("_SHADOW_", inode->i_mode, SYSFS_DIR);
- if (!sd)
- goto nomem;
- sd->s_elem.dir.kobj = kobj;
-
- sysfs_addrm_start(&acxt, parent_sd);
-
- /* add but don't link into children list */
- sysfs_add_one(&acxt, sd);
-
- /* attach and instantiate dentry */
- sysfs_attach_dentry(sd, shadow);
- d_instantiate(shadow, igrab(inode));
- inc_nlink(inode); /* tj: synchronization? */
-
- sysfs_addrm_finish(&acxt);
-
- dget(shadow); /* Extra count - pin the dentry in core */
-
- goto out_dput;
-
- nomem:
- dput(shadow);
- sd = ERR_PTR(-ENOMEM);
- out_dput:
- dput(dir);
- out:
- return sd;
-}
-
-/**
- * sysfs_remove_shadow_dir - remove an object's directory.
- * @shadow_sd: sysfs_dirent of shadow directory
- *
- * The only thing special about this is that we remove any files in
- * the directory before we remove the directory, and we've inlined
- * what used to be sysfs_rmdir() below, instead of calling separately.
- */
-
-void sysfs_remove_shadow_dir(struct sysfs_dirent *shadow_sd)
-{
- __sysfs_remove_dir(shadow_sd);
-}
const struct file_operations sysfs_dir_operations = {
- .open = sysfs_dir_open,
- .release = sysfs_dir_close,
- .llseek = sysfs_dir_lseek,
.read = generic_read_dir,
.readdir = sysfs_readdir,
};
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index cc49799..d3be1e7 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -1,15 +1,22 @@
/*
- * file.c - operations for regular (text) files.
+ * fs/sysfs/file.c - sysfs regular (text) file implementation
+ *
+ * Copyright (c) 2001-3 Patrick Mochel
+ * Copyright (c) 2007 SUSE Linux Products GmbH
+ * Copyright (c) 2007 Tejun Heo <teheo@suse.de>
+ *
+ * This file is released under the GPLv2.
+ *
+ * Please see Documentation/filesystems/sysfs.txt for more information.
*/
#include <linux/module.h>
-#include <linux/fsnotify.h>
#include <linux/kobject.h>
#include <linux/namei.h>
#include <linux/poll.h>
#include <linux/list.h>
+#include <linux/mutex.h>
#include <asm/uaccess.h>
-#include <asm/semaphore.h>
#include "sysfs.h"
@@ -50,14 +57,33 @@ static struct sysfs_ops subsys_sysfs_ops = {
.store = subsys_attr_store,
};
+/*
+ * There's one sysfs_buffer for each open file and one
+ * sysfs_open_dirent for each sysfs_dirent with one or more open
+ * files.
+ *
+ * filp->private_data points to sysfs_buffer and
+ * sysfs_dirent->s_attr.open points to sysfs_open_dirent. s_attr.open
+ * is protected by sysfs_open_dirent_lock.
+ */
+static spinlock_t sysfs_open_dirent_lock = SPIN_LOCK_UNLOCKED;
+
+struct sysfs_open_dirent {
+ atomic_t refcnt;
+ atomic_t event;
+ wait_queue_head_t poll;
+ struct list_head buffers; /* goes through sysfs_buffer.list */
+};
+
struct sysfs_buffer {
size_t count;
loff_t pos;
char * page;
struct sysfs_ops * ops;
- struct semaphore sem;
+ struct mutex mutex;
int needs_read_fill;
int event;
+ struct list_head list;
};
/**
@@ -74,7 +100,7 @@ struct sysfs_buffer {
static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer)
{
struct sysfs_dirent *attr_sd = dentry->d_fsdata;
- struct kobject *kobj = attr_sd->s_parent->s_elem.dir.kobj;
+ struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
struct sysfs_ops * ops = buffer->ops;
int ret = 0;
ssize_t count;
@@ -88,8 +114,8 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
if (!sysfs_get_active_two(attr_sd))
return -ENODEV;
- buffer->event = atomic_read(&attr_sd->s_event);
- count = ops->show(kobj, attr_sd->s_elem.attr.attr, buffer->page);
+ buffer->event = atomic_read(&attr_sd->s_attr.open->event);
+ count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
sysfs_put_active_two(attr_sd);
@@ -128,7 +154,7 @@ sysfs_read_file(struct file *file, char __user *buf, size_t count, loff_t *ppos)
struct sysfs_buffer * buffer = file->private_data;
ssize_t retval = 0;
- down(&buffer->sem);
+ mutex_lock(&buffer->mutex);
if (buffer->needs_read_fill) {
retval = fill_read_buffer(file->f_path.dentry,buffer);
if (retval)
@@ -139,7 +165,7 @@ sysfs_read_file(struct file *file, char __user *buf, size_t count, loff_t *ppos)
retval = simple_read_from_buffer(buf, count, ppos, buffer->page,
buffer->count);
out:
- up(&buffer->sem);
+ mutex_unlock(&buffer->mutex);
return retval;
}
@@ -189,7 +215,7 @@ static int
flush_write_buffer(struct dentry * dentry, struct sysfs_buffer * buffer, size_t count)
{
struct sysfs_dirent *attr_sd = dentry->d_fsdata;
- struct kobject *kobj = attr_sd->s_parent->s_elem.dir.kobj;
+ struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
struct sysfs_ops * ops = buffer->ops;
int rc;
@@ -197,7 +223,7 @@ flush_write_buffer(struct dentry * dentry, struct sysfs_buffer * buffer, size_t
if (!sysfs_get_active_two(attr_sd))
return -ENODEV;
- rc = ops->store(kobj, attr_sd->s_elem.attr.attr, buffer->page, count);
+ rc = ops->store(kobj, attr_sd->s_attr.attr, buffer->page, count);
sysfs_put_active_two(attr_sd);
@@ -228,20 +254,102 @@ sysfs_write_file(struct file *file, const char __user *buf, size_t count, loff_t
struct sysfs_buffer * buffer = file->private_data;
ssize_t len;
- down(&buffer->sem);
+ mutex_lock(&buffer->mutex);
len = fill_write_buffer(buffer, buf, count);
if (len > 0)
len = flush_write_buffer(file->f_path.dentry, buffer, len);
if (len > 0)
*ppos += len;
- up(&buffer->sem);
+ mutex_unlock(&buffer->mutex);
return len;
}
+/**
+ * sysfs_get_open_dirent - get or create sysfs_open_dirent
+ * @sd: target sysfs_dirent
+ * @buffer: sysfs_buffer for this instance of open
+ *
+ * If @sd->s_attr.open exists, increment its reference count;
+ * otherwise, create one. @buffer is chained to the buffers
+ * list.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep).
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
+ */
+static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
+ struct sysfs_buffer *buffer)
+{
+ struct sysfs_open_dirent *od, *new_od = NULL;
+
+ retry:
+ spin_lock(&sysfs_open_dirent_lock);
+
+ if (!sd->s_attr.open && new_od) {
+ sd->s_attr.open = new_od;
+ new_od = NULL;
+ }
+
+ od = sd->s_attr.open;
+ if (od) {
+ atomic_inc(&od->refcnt);
+ list_add_tail(&buffer->list, &od->buffers);
+ }
+
+ spin_unlock(&sysfs_open_dirent_lock);
+
+ if (od) {
+ kfree(new_od);
+ return 0;
+ }
+
+ /* not there, initialize a new one and retry */
+ new_od = kmalloc(sizeof(*new_od), GFP_KERNEL);
+ if (!new_od)
+ return -ENOMEM;
+
+ atomic_set(&new_od->refcnt, 0);
+ atomic_set(&new_od->event, 1);
+ init_waitqueue_head(&new_od->poll);
+ INIT_LIST_HEAD(&new_od->buffers);
+ goto retry;
+}
+
+/**
+ * sysfs_put_open_dirent - put sysfs_open_dirent
+ * @sd: target sysfs_dirent
+ * @buffer: associated sysfs_buffer
+ *
+ * Put @sd->s_attr.open and unlink @buffer from the buffers list.
+ * If reference count reaches zero, disassociate and free it.
+ *
+ * LOCKING:
+ * None.
+ */
+static void sysfs_put_open_dirent(struct sysfs_dirent *sd,
+ struct sysfs_buffer *buffer)
+{
+ struct sysfs_open_dirent *od = sd->s_attr.open;
+
+ spin_lock(&sysfs_open_dirent_lock);
+
+ list_del(&buffer->list);
+ if (atomic_dec_and_test(&od->refcnt))
+ sd->s_attr.open = NULL;
+ else
+ od = NULL;
+
+ spin_unlock(&sysfs_open_dirent_lock);
+
+ kfree(od);
+}
+
static int sysfs_open_file(struct inode *inode, struct file *file)
{
struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
- struct kobject *kobj = attr_sd->s_parent->s_elem.dir.kobj;
+ struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
struct sysfs_buffer * buffer;
struct sysfs_ops * ops = NULL;
int error;
@@ -294,33 +402,38 @@ static int sysfs_open_file(struct inode *inode, struct file *file)
if (!buffer)
goto err_out;
- init_MUTEX(&buffer->sem);
+ mutex_init(&buffer->mutex);
buffer->needs_read_fill = 1;
buffer->ops = ops;
file->private_data = buffer;
- /* open succeeded, put active references and pin attr_sd */
+ /* make sure we have open dirent struct */
+ error = sysfs_get_open_dirent(attr_sd, buffer);
+ if (error)
+ goto err_free;
+
+ /* open succeeded, put active references */
sysfs_put_active_two(attr_sd);
- sysfs_get(attr_sd);
return 0;
+ err_free:
+ kfree(buffer);
err_out:
sysfs_put_active_two(attr_sd);
return error;
}
-static int sysfs_release(struct inode * inode, struct file * filp)
+static int sysfs_release(struct inode *inode, struct file *filp)
{
- struct sysfs_dirent *attr_sd = filp->f_path.dentry->d_fsdata;
+ struct sysfs_dirent *sd = filp->f_path.dentry->d_fsdata;
struct sysfs_buffer *buffer = filp->private_data;
- sysfs_put(attr_sd);
+ sysfs_put_open_dirent(sd, buffer);
+
+ if (buffer->page)
+ free_page((unsigned long)buffer->page);
+ kfree(buffer);
- if (buffer) {
- if (buffer->page)
- free_page((unsigned long)buffer->page);
- kfree(buffer);
- }
return 0;
}
@@ -335,24 +448,24 @@ static int sysfs_release(struct inode * inode, struct file * filp)
* again will not get new data, or reset the state of 'poll'.
* Reminder: this only works for attributes which actively support
* it, and it is not possible to test an attribute from userspace
- * to see if it supports poll (Nether 'poll' or 'select' return
+ * to see if it supports poll (Neither 'poll' nor 'select' return
* an appropriate error code). When in doubt, set a suitable timeout value.
*/
static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
{
struct sysfs_buffer * buffer = filp->private_data;
struct sysfs_dirent *attr_sd = filp->f_path.dentry->d_fsdata;
- struct kobject *kobj = attr_sd->s_parent->s_elem.dir.kobj;
+ struct sysfs_open_dirent *od = attr_sd->s_attr.open;
/* need parent for the kobj, grab both */
if (!sysfs_get_active_two(attr_sd))
goto trigger;
- poll_wait(filp, &kobj->poll, wait);
+ poll_wait(filp, &od->poll, wait);
sysfs_put_active_two(attr_sd);
- if (buffer->event != atomic_read(&attr_sd->s_event))
+ if (buffer->event != atomic_read(&od->event))
goto trigger;
return 0;
@@ -373,8 +486,17 @@ void sysfs_notify(struct kobject *k, char *dir, char *attr)
if (sd && attr)
sd = sysfs_find_dirent(sd, attr);
if (sd) {
- atomic_inc(&sd->s_event);
- wake_up_interruptible(&k->poll);
+ struct sysfs_open_dirent *od;
+
+ spin_lock(&sysfs_open_dirent_lock);
+
+ od = sd->s_attr.open;
+ if (od) {
+ atomic_inc(&od->event);
+ wake_up_interruptible(&od->poll);
+ }
+
+ spin_unlock(&sysfs_open_dirent_lock);
}
mutex_unlock(&sysfs_mutex);
@@ -397,24 +519,21 @@ int sysfs_add_file(struct sysfs_dirent *dir_sd, const struct attribute *attr,
umode_t mode = (attr->mode & S_IALLUGO) | S_IFREG;
struct sysfs_addrm_cxt acxt;
struct sysfs_dirent *sd;
+ int rc;
sd = sysfs_new_dirent(attr->name, mode, type);
if (!sd)
return -ENOMEM;
- sd->s_elem.attr.attr = (void *)attr;
+ sd->s_attr.attr = (void *)attr;
sysfs_addrm_start(&acxt, dir_sd);
+ rc = sysfs_add_one(&acxt, sd);
+ sysfs_addrm_finish(&acxt);
- if (!sysfs_find_dirent(dir_sd, attr->name)) {
- sysfs_add_one(&acxt, sd);
- sysfs_link_sibling(sd);
- }
+ if (rc)
+ sysfs_put(sd);
- if (sysfs_addrm_finish(&acxt))
- return 0;
-
- sysfs_put(sd);
- return -EEXIST;
+ return rc;
}
@@ -456,42 +575,6 @@ int sysfs_add_file_to_group(struct kobject *kobj,
}
EXPORT_SYMBOL_GPL(sysfs_add_file_to_group);
-
-/**
- * sysfs_update_file - update the modified timestamp on an object attribute.
- * @kobj: object we're acting for.
- * @attr: attribute descriptor.
- */
-int sysfs_update_file(struct kobject * kobj, const struct attribute * attr)
-{
- struct sysfs_dirent *victim_sd = NULL;
- struct dentry *victim = NULL;
- int rc;
-
- rc = -ENOENT;
- victim_sd = sysfs_get_dirent(kobj->sd, attr->name);
- if (!victim_sd)
- goto out;
-
- victim = sysfs_get_dentry(victim_sd);
- if (IS_ERR(victim)) {
- rc = PTR_ERR(victim);
- victim = NULL;
- goto out;
- }
-
- mutex_lock(&victim->d_inode->i_mutex);
- victim->d_inode->i_mtime = CURRENT_TIME;
- fsnotify_modify(victim);
- mutex_unlock(&victim->d_inode->i_mutex);
- rc = 0;
- out:
- dput(victim);
- sysfs_put(victim_sd);
- return rc;
-}
-
-
/**
* sysfs_chmod_file - update the modified mode value on an object attribute.
* @kobj: object we're acting for.
@@ -512,7 +595,9 @@ int sysfs_chmod_file(struct kobject *kobj, struct attribute *attr, mode_t mode)
if (!victim_sd)
goto out;
+ mutex_lock(&sysfs_rename_mutex);
victim = sysfs_get_dentry(victim_sd);
+ mutex_unlock(&sysfs_rename_mutex);
if (IS_ERR(victim)) {
rc = PTR_ERR(victim);
victim = NULL;
@@ -520,10 +605,19 @@ int sysfs_chmod_file(struct kobject *kobj, struct attribute *attr, mode_t mode)
}
inode = victim->d_inode;
+
mutex_lock(&inode->i_mutex);
+
newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
rc = notify_change(victim, &newattrs);
+
+ if (rc == 0) {
+ mutex_lock(&sysfs_mutex);
+ victim_sd->s_mode = newattrs.ia_mode;
+ mutex_unlock(&sysfs_mutex);
+ }
+
mutex_unlock(&inode->i_mutex);
out:
dput(victim);
@@ -631,4 +725,3 @@ EXPORT_SYMBOL_GPL(sysfs_schedule_callback);
EXPORT_SYMBOL_GPL(sysfs_create_file);
EXPORT_SYMBOL_GPL(sysfs_remove_file);
-EXPORT_SYMBOL_GPL(sysfs_update_file);
diff --git a/fs/sysfs/group.c b/fs/sysfs/group.c
index f318b73..d197237 100644
--- a/fs/sysfs/group.c
+++ b/fs/sysfs/group.c
@@ -13,8 +13,6 @@
#include <linux/dcache.h>
#include <linux/namei.h>
#include <linux/err.h>
-#include <linux/fs.h>
-#include <asm/semaphore.h>
#include "sysfs.h"
diff --git a/fs/sysfs/inode.c b/fs/sysfs/inode.c
index 3756e15..9236635 100644
--- a/fs/sysfs/inode.c
+++ b/fs/sysfs/inode.c
@@ -1,7 +1,11 @@
/*
- * inode.c - basic inode and dentry operations.
+ * fs/sysfs/inode.c - basic sysfs inode and dentry operations
*
- * sysfs is Copyright (c) 2001-3 Patrick Mochel
+ * Copyright (c) 2001-3 Patrick Mochel
+ * Copyright (c) 2007 SUSE Linux Products GmbH
+ * Copyright (c) 2007 Tejun Heo <teheo@suse.de>
+ *
+ * This file is released under the GPLv2.
*
* Please see Documentation/filesystems/sysfs.txt for more information.
*/
@@ -14,7 +18,6 @@
#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/sched.h>
-#include <asm/semaphore.h>
#include "sysfs.h"
extern struct super_block * sysfs_sb;
@@ -34,16 +37,6 @@ static const struct inode_operations sysfs_inode_operations ={
.setattr = sysfs_setattr,
};
-void sysfs_delete_inode(struct inode *inode)
-{
- /* Free the shadowed directory inode operations */
- if (sysfs_is_shadowed_inode(inode)) {
- kfree(inode->i_op);
- inode->i_op = NULL;
- }
- return generic_delete_inode(inode);
-}
-
int sysfs_setattr(struct dentry * dentry, struct iattr * iattr)
{
struct inode * inode = dentry->d_inode;
@@ -133,8 +126,22 @@ static inline void set_inode_attr(struct inode * inode, struct iattr * iattr)
*/
static struct lock_class_key sysfs_inode_imutex_key;
-void sysfs_init_inode(struct sysfs_dirent *sd, struct inode *inode)
+static int sysfs_count_nlink(struct sysfs_dirent *sd)
+{
+ struct sysfs_dirent *child;
+ int nr = 0;
+
+ for (child = sd->s_dir.children; child; child = child->s_sibling)
+ if (sysfs_type(child) == SYSFS_DIR)
+ nr++;
+
+ return nr + 2;
+}
+
+static void sysfs_init_inode(struct sysfs_dirent *sd, struct inode *inode)
{
+ struct bin_attribute *bin_attr;
+
inode->i_blocks = 0;
inode->i_mapping->a_ops = &sysfs_aops;
inode->i_mapping->backing_dev_info = &sysfs_backing_dev_info;
@@ -150,6 +157,32 @@ void sysfs_init_inode(struct sysfs_dirent *sd, struct inode *inode)
set_inode_attr(inode, sd->s_iattr);
} else
set_default_inode_attr(inode, sd->s_mode);
+
+
+ /* initialize inode according to type */
+ switch (sysfs_type(sd)) {
+ case SYSFS_DIR:
+ inode->i_op = &sysfs_dir_inode_operations;
+ inode->i_fop = &sysfs_dir_operations;
+ inode->i_nlink = sysfs_count_nlink(sd);
+ break;
+ case SYSFS_KOBJ_ATTR:
+ inode->i_size = PAGE_SIZE;
+ inode->i_fop = &sysfs_file_operations;
+ break;
+ case SYSFS_KOBJ_BIN_ATTR:
+ bin_attr = sd->s_bin_attr.bin_attr;
+ inode->i_size = bin_attr->size;
+ inode->i_fop = &bin_fops;
+ break;
+ case SYSFS_KOBJ_LINK:
+ inode->i_op = &sysfs_symlink_inode_operations;
+ break;
+ default:
+ BUG();
+ }
+
+ unlock_new_inode(inode);
}
/**
@@ -177,50 +210,24 @@ struct inode * sysfs_get_inode(struct sysfs_dirent *sd)
return inode;
}
-/**
- * sysfs_instantiate - instantiate dentry
- * @dentry: dentry to be instantiated
- * @inode: inode associated with @sd
- *
- * Unlock @inode if locked and instantiate @dentry with @inode.
- *
- * LOCKING:
- * None.
- */
-void sysfs_instantiate(struct dentry *dentry, struct inode *inode)
-{
- BUG_ON(!dentry || dentry->d_inode);
-
- if (inode->i_state & I_NEW)
- unlock_new_inode(inode);
-
- d_instantiate(dentry, inode);
-}
-
int sysfs_hash_and_remove(struct sysfs_dirent *dir_sd, const char *name)
{
struct sysfs_addrm_cxt acxt;
- struct sysfs_dirent **pos, *sd;
+ struct sysfs_dirent *sd;
if (!dir_sd)
return -ENOENT;
sysfs_addrm_start(&acxt, dir_sd);
- for (pos = &dir_sd->s_children; *pos; pos = &(*pos)->s_sibling) {
- sd = *pos;
-
- if (!sysfs_type(sd))
- continue;
- if (!strcmp(sd->s_name, name)) {
- *pos = sd->s_sibling;
- sd->s_sibling = NULL;
- sysfs_remove_one(&acxt, sd);
- break;
- }
- }
+ sd = sysfs_find_dirent(dir_sd, name);
+ if (sd)
+ sysfs_remove_one(&acxt, sd);
+
+ sysfs_addrm_finish(&acxt);
- if (sysfs_addrm_finish(&acxt))
+ if (sd)
return 0;
- return -ENOENT;
+ else
+ return -ENOENT;
}
diff --git a/fs/sysfs/mount.c b/fs/sysfs/mount.c
index 402cc35..c76c540 100644
--- a/fs/sysfs/mount.c
+++ b/fs/sysfs/mount.c
@@ -1,5 +1,13 @@
/*
- * mount.c - operations for initializing and mounting sysfs.
+ * fs/sysfs/symlink.c - operations for initializing and mounting sysfs
+ *
+ * Copyright (c) 2001-3 Patrick Mochel
+ * Copyright (c) 2007 SUSE Linux Products GmbH
+ * Copyright (c) 2007 Tejun Heo <teheo@suse.de>
+ *
+ * This file is released under the GPLv2.
+ *
+ * Please see Documentation/filesystems/sysfs.txt for more information.
*/
#define DEBUG
@@ -8,25 +16,25 @@
#include <linux/mount.h>
#include <linux/pagemap.h>
#include <linux/init.h>
-#include <asm/semaphore.h>
#include "sysfs.h"
/* Random magic number */
#define SYSFS_MAGIC 0x62656572
-struct vfsmount *sysfs_mount;
+static struct vfsmount *sysfs_mount;
struct super_block * sysfs_sb = NULL;
struct kmem_cache *sysfs_dir_cachep;
static const struct super_operations sysfs_ops = {
.statfs = simple_statfs,
- .drop_inode = sysfs_delete_inode,
+ .drop_inode = generic_delete_inode,
};
struct sysfs_dirent sysfs_root = {
+ .s_name = "",
.s_count = ATOMIC_INIT(1),
- .s_flags = SYSFS_ROOT,
+ .s_flags = SYSFS_DIR,
.s_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
.s_ino = 1,
};
@@ -43,26 +51,20 @@ static int sysfs_fill_super(struct super_block *sb, void *data, int silent)
sb->s_time_gran = 1;
sysfs_sb = sb;
- inode = new_inode(sysfs_sb);
+ /* get root inode, initialize and unlock it */
+ inode = sysfs_get_inode(&sysfs_root);
if (!inode) {
pr_debug("sysfs: could not get root inode\n");
return -ENOMEM;
}
- sysfs_init_inode(&sysfs_root, inode);
-
- inode->i_op = &sysfs_dir_inode_operations;
- inode->i_fop = &sysfs_dir_operations;
- /* directory inodes start off with i_nlink == 2 (for "." entry) */
- inc_nlink(inode);
-
+ /* instantiate and link root dentry */
root = d_alloc_root(inode);
if (!root) {
pr_debug("%s: could not get root dentry!\n",__FUNCTION__);
iput(inode);
return -ENOMEM;
}
- sysfs_root.s_dentry = root;
root->d_fsdata = &sysfs_root;
sb->s_root = root;
return 0;
@@ -77,7 +79,7 @@ static int sysfs_get_sb(struct file_system_type *fs_type,
static struct file_system_type sysfs_fs_type = {
.name = "sysfs",
.get_sb = sysfs_get_sb,
- .kill_sb = kill_litter_super,
+ .kill_sb = kill_anon_super,
};
int __init sysfs_init(void)
@@ -86,7 +88,7 @@ int __init sysfs_init(void)
sysfs_dir_cachep = kmem_cache_create("sysfs_dir_cache",
sizeof(struct sysfs_dirent),
- 0, 0, NULL, NULL);
+ 0, 0, NULL);
if (!sysfs_dir_cachep)
goto out;
diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
index 2f86e04..3eac20c 100644
--- a/fs/sysfs/symlink.c
+++ b/fs/sysfs/symlink.c
@@ -1,5 +1,13 @@
/*
- * symlink.c - operations for sysfs symlinks.
+ * fs/sysfs/symlink.c - sysfs symlink implementation
+ *
+ * Copyright (c) 2001-3 Patrick Mochel
+ * Copyright (c) 2007 SUSE Linux Products GmbH
+ * Copyright (c) 2007 Tejun Heo <teheo@suse.de>
+ *
+ * This file is released under the GPLv2.
+ *
+ * Please see Documentation/filesystems/sysfs.txt for more information.
*/
#include <linux/fs.h>
@@ -7,7 +15,7 @@
#include <linux/module.h>
#include <linux/kobject.h>
#include <linux/namei.h>
-#include <asm/semaphore.h>
+#include <linux/mutex.h>
#include "sysfs.h"
@@ -60,10 +68,9 @@ int sysfs_create_link(struct kobject * kobj, struct kobject * target, const char
BUG_ON(!name);
- if (!kobj) {
- if (sysfs_mount && sysfs_mount->mnt_sb)
- parent_sd = sysfs_mount->mnt_sb->s_root->d_fsdata;
- } else
+ if (!kobj)
+ parent_sd = &sysfs_root;
+ else
parent_sd = kobj->sd;
error = -EFAULT;
@@ -86,20 +93,19 @@ int sysfs_create_link(struct kobject * kobj, struct kobject * target, const char
sd = sysfs_new_dirent(name, S_IFLNK|S_IRWXUGO, SYSFS_KOBJ_LINK);
if (!sd)
goto out_put;
- sd->s_elem.symlink.target_sd = target_sd;
+
+ sd->s_symlink.target_sd = target_sd;
+ target_sd = NULL; /* reference is now owned by the symlink */
sysfs_addrm_start(&acxt, parent_sd);
+ error = sysfs_add_one(&acxt, sd);
+ sysfs_addrm_finish(&acxt);
- if (!sysfs_find_dirent(parent_sd, name)) {
- sysfs_add_one(&acxt, sd);
- sysfs_link_sibling(sd);
- }
+ if (error)
+ goto out_put;
- if (sysfs_addrm_finish(&acxt))
- return 0;
+ return 0;
- error = -EEXIST;
- /* fall through */
out_put:
sysfs_put(target_sd);
sysfs_put(sd);
@@ -144,7 +150,7 @@ static int sysfs_getlink(struct dentry *dentry, char * path)
{
struct sysfs_dirent *sd = dentry->d_fsdata;
struct sysfs_dirent *parent_sd = sd->s_parent;
- struct sysfs_dirent *target_sd = sd->s_elem.symlink.target_sd;
+ struct sysfs_dirent *target_sd = sd->s_symlink.target_sd;
int error;
mutex_lock(&sysfs_mutex);
diff --git a/fs/sysfs/sysfs.h b/fs/sysfs/sysfs.h
index 6a37f23..f0326f2 100644
--- a/fs/sysfs/sysfs.h
+++ b/fs/sysfs/sysfs.h
@@ -1,20 +1,39 @@
+/*
+ * fs/sysfs/sysfs.h - sysfs internal header file
+ *
+ * Copyright (c) 2001-3 Patrick Mochel
+ * Copyright (c) 2007 SUSE Linux Products GmbH
+ * Copyright (c) 2007 Tejun Heo <teheo@suse.de>
+ *
+ * This file is released under the GPLv2.
+ */
+
+struct sysfs_open_dirent;
+
+/* type-specific structures for sysfs_dirent->s_* union members */
struct sysfs_elem_dir {
- struct kobject * kobj;
+ struct kobject *kobj;
+ /* children list starts here and goes through sd->s_sibling */
+ struct sysfs_dirent *children;
};
struct sysfs_elem_symlink {
- struct sysfs_dirent * target_sd;
+ struct sysfs_dirent *target_sd;
};
struct sysfs_elem_attr {
- struct attribute * attr;
+ struct attribute *attr;
+ struct sysfs_open_dirent *open;
};
struct sysfs_elem_bin_attr {
- struct bin_attribute * bin_attr;
+ struct bin_attribute *bin_attr;
};
/*
+ * sysfs_dirent - the building block of sysfs hierarchy. Each and
+ * every sysfs node is represented by single sysfs_dirent.
+ *
* As long as s_count reference is held, the sysfs_dirent itself is
* accessible. Dereferencing s_elem or any other outer entity
* requires s_active reference.
@@ -22,28 +41,43 @@ struct sysfs_elem_bin_attr {
struct sysfs_dirent {
atomic_t s_count;
atomic_t s_active;
- struct sysfs_dirent * s_parent;
- struct sysfs_dirent * s_sibling;
- struct sysfs_dirent * s_children;
- const char * s_name;
+ struct sysfs_dirent *s_parent;
+ struct sysfs_dirent *s_sibling;
+ const char *s_name;
union {
- struct sysfs_elem_dir dir;
- struct sysfs_elem_symlink symlink;
- struct sysfs_elem_attr attr;
- struct sysfs_elem_bin_attr bin_attr;
- } s_elem;
+ struct sysfs_elem_dir s_dir;
+ struct sysfs_elem_symlink s_symlink;
+ struct sysfs_elem_attr s_attr;
+ struct sysfs_elem_bin_attr s_bin_attr;
+ };
unsigned int s_flags;
- umode_t s_mode;
ino_t s_ino;
- struct dentry * s_dentry;
- struct iattr * s_iattr;
- atomic_t s_event;
+ umode_t s_mode;
+ struct iattr *s_iattr;
};
-#define SD_DEACTIVATED_BIAS INT_MIN
+#define SD_DEACTIVATED_BIAS INT_MIN
+
+#define SYSFS_TYPE_MASK 0x00ff
+#define SYSFS_DIR 0x0001
+#define SYSFS_KOBJ_ATTR 0x0002
+#define SYSFS_KOBJ_BIN_ATTR 0x0004
+#define SYSFS_KOBJ_LINK 0x0008
+#define SYSFS_COPY_NAME (SYSFS_DIR | SYSFS_KOBJ_LINK)
+
+#define SYSFS_FLAG_MASK ~SYSFS_TYPE_MASK
+#define SYSFS_FLAG_REMOVED 0x0200
+
+static inline unsigned int sysfs_type(struct sysfs_dirent *sd)
+{
+ return sd->s_flags & SYSFS_TYPE_MASK;
+}
+/*
+ * Context structure to be used while adding/removing nodes.
+ */
struct sysfs_addrm_cxt {
struct sysfs_dirent *parent_sd;
struct inode *parent_inode;
@@ -51,64 +85,47 @@ struct sysfs_addrm_cxt {
int cnt;
};
-extern struct vfsmount * sysfs_mount;
+/*
+ * mount.c
+ */
extern struct sysfs_dirent sysfs_root;
+extern struct super_block *sysfs_sb;
extern struct kmem_cache *sysfs_dir_cachep;
-extern struct dentry *sysfs_get_dentry(struct sysfs_dirent *sd);
-extern void sysfs_link_sibling(struct sysfs_dirent *sd);
-extern void sysfs_unlink_sibling(struct sysfs_dirent *sd);
-extern struct sysfs_dirent *sysfs_get_active(struct sysfs_dirent *sd);
-extern void sysfs_put_active(struct sysfs_dirent *sd);
-extern struct sysfs_dirent *sysfs_get_active_two(struct sysfs_dirent *sd);
-extern void sysfs_put_active_two(struct sysfs_dirent *sd);
-extern void sysfs_addrm_start(struct sysfs_addrm_cxt *acxt,
- struct sysfs_dirent *parent_sd);
-extern void sysfs_add_one(struct sysfs_addrm_cxt *acxt,
- struct sysfs_dirent *sd);
-extern void sysfs_remove_one(struct sysfs_addrm_cxt *acxt,
- struct sysfs_dirent *sd);
-extern int sysfs_addrm_finish(struct sysfs_addrm_cxt *acxt);
-
-extern void sysfs_delete_inode(struct inode *inode);
-extern void sysfs_init_inode(struct sysfs_dirent *sd, struct inode *inode);
-extern struct inode * sysfs_get_inode(struct sysfs_dirent *sd);
-extern void sysfs_instantiate(struct dentry *dentry, struct inode *inode);
-
-extern void release_sysfs_dirent(struct sysfs_dirent * sd);
-extern struct sysfs_dirent *sysfs_find_dirent(struct sysfs_dirent *parent_sd,
- const unsigned char *name);
-extern struct sysfs_dirent *sysfs_get_dirent(struct sysfs_dirent *parent_sd,
- const unsigned char *name);
-extern struct sysfs_dirent *sysfs_new_dirent(const char *name, umode_t mode,
- int type);
-
-extern int sysfs_add_file(struct sysfs_dirent *dir_sd,
- const struct attribute *attr, int type);
-extern int sysfs_hash_and_remove(struct sysfs_dirent *dir_sd, const char *name);
-extern struct sysfs_dirent *sysfs_find(struct sysfs_dirent *dir, const char * name);
-
-extern int sysfs_create_subdir(struct kobject *kobj, const char *name,
- struct sysfs_dirent **p_sd);
-extern void sysfs_remove_subdir(struct sysfs_dirent *sd);
-
-extern int sysfs_setattr(struct dentry *dentry, struct iattr *iattr);
-
-extern spinlock_t sysfs_assoc_lock;
+/*
+ * dir.c
+ */
extern struct mutex sysfs_mutex;
-extern struct super_block * sysfs_sb;
+extern struct mutex sysfs_rename_mutex;
+extern spinlock_t sysfs_assoc_lock;
+
extern const struct file_operations sysfs_dir_operations;
-extern const struct file_operations sysfs_file_operations;
-extern const struct file_operations bin_fops;
extern const struct inode_operations sysfs_dir_inode_operations;
-extern const struct inode_operations sysfs_symlink_inode_operations;
-
-static inline unsigned int sysfs_type(struct sysfs_dirent *sd)
-{
- return sd->s_flags & SYSFS_TYPE_MASK;
-}
-static inline struct sysfs_dirent * sysfs_get(struct sysfs_dirent * sd)
+struct dentry *sysfs_get_dentry(struct sysfs_dirent *sd);
+struct sysfs_dirent *sysfs_get_active(struct sysfs_dirent *sd);
+void sysfs_put_active(struct sysfs_dirent *sd);
+struct sysfs_dirent *sysfs_get_active_two(struct sysfs_dirent *sd);
+void sysfs_put_active_two(struct sysfs_dirent *sd);
+void sysfs_addrm_start(struct sysfs_addrm_cxt *acxt,
+ struct sysfs_dirent *parent_sd);
+int sysfs_add_one(struct sysfs_addrm_cxt *acxt, struct sysfs_dirent *sd);
+void sysfs_remove_one(struct sysfs_addrm_cxt *acxt, struct sysfs_dirent *sd);
+void sysfs_addrm_finish(struct sysfs_addrm_cxt *acxt);
+
+struct sysfs_dirent *sysfs_find_dirent(struct sysfs_dirent *parent_sd,
+ const unsigned char *name);
+struct sysfs_dirent *sysfs_get_dirent(struct sysfs_dirent *parent_sd,
+ const unsigned char *name);
+struct sysfs_dirent *sysfs_new_dirent(const char *name, umode_t mode, int type);
+
+void release_sysfs_dirent(struct sysfs_dirent *sd);
+
+int sysfs_create_subdir(struct kobject *kobj, const char *name,
+ struct sysfs_dirent **p_sd);
+void sysfs_remove_subdir(struct sysfs_dirent *sd);
+
+static inline struct sysfs_dirent *sysfs_get(struct sysfs_dirent *sd)
{
if (sd) {
WARN_ON(!atomic_read(&sd->s_count));
@@ -117,13 +134,33 @@ static inline struct sysfs_dirent * sysfs_get(struct sysfs_dirent * sd)
return sd;
}
-static inline void sysfs_put(struct sysfs_dirent * sd)
+static inline void sysfs_put(struct sysfs_dirent *sd)
{
if (sd && atomic_dec_and_test(&sd->s_count))
release_sysfs_dirent(sd);
}
-static inline int sysfs_is_shadowed_inode(struct inode *inode)
-{
- return S_ISDIR(inode->i_mode) && inode->i_op->follow_link;
-}
+/*
+ * inode.c
+ */
+struct inode *sysfs_get_inode(struct sysfs_dirent *sd);
+int sysfs_setattr(struct dentry *dentry, struct iattr *iattr);
+int sysfs_hash_and_remove(struct sysfs_dirent *dir_sd, const char *name);
+
+/*
+ * file.c
+ */
+extern const struct file_operations sysfs_file_operations;
+
+int sysfs_add_file(struct sysfs_dirent *dir_sd,
+ const struct attribute *attr, int type);
+
+/*
+ * bin.c
+ */
+extern const struct file_operations bin_fops;
+
+/*
+ * symlink.c
+ */
+extern const struct inode_operations sysfs_symlink_inode_operations;
diff --git a/fs/sysv/inode.c b/fs/sysv/inode.c
index 5644116..7c4e5d3 100644
--- a/fs/sysv/inode.c
+++ b/fs/sysv/inode.c
@@ -342,7 +342,7 @@ int __init sysv_init_icache(void)
sysv_inode_cachep = kmem_cache_create("sysv_inode_cache",
sizeof(struct sysv_inode_info), 0,
SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
- init_once, NULL);
+ init_once);
if (!sysv_inode_cachep)
return -ENOMEM;
return 0;
diff --git a/fs/timerfd.c b/fs/timerfd.c
index af9eca5..61983f3 100644
--- a/fs/timerfd.c
+++ b/fs/timerfd.c
@@ -95,7 +95,7 @@ static ssize_t timerfd_read(struct file *file, char __user *buf, size_t count,
{
struct timerfd_ctx *ctx = file->private_data;
ssize_t res;
- u32 ticks = 0;
+ u64 ticks = 0;
DECLARE_WAITQUEUE(wait, current);
if (count < sizeof(ticks))
@@ -130,7 +130,7 @@ static ssize_t timerfd_read(struct file *file, char __user *buf, size_t count,
* callback to avoid DoS attacks specifying a very
* short timer period.
*/
- ticks = (u32)
+ ticks = (u64)
hrtimer_forward(&ctx->tmr,
hrtimer_cb_get_time(&ctx->tmr),
ctx->tintv);
@@ -140,7 +140,7 @@ static ssize_t timerfd_read(struct file *file, char __user *buf, size_t count,
}
spin_unlock_irq(&ctx->wqh.lock);
if (ticks)
- res = put_user(ticks, buf) ? -EFAULT: sizeof(ticks);
+ res = put_user(ticks, (u64 __user *) buf) ? -EFAULT: sizeof(ticks);
return res;
}
diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c
index 4cec910..87e87dc 100644
--- a/fs/udf/balloc.c
+++ b/fs/udf/balloc.c
@@ -41,18 +41,17 @@
#define uint(x) xuint(x)
#define xuint(x) __le ## x
-static inline int find_next_one_bit (void * addr, int size, int offset)
+static inline int find_next_one_bit(void *addr, int size, int offset)
{
- uintBPL_t * p = ((uintBPL_t *) addr) + (offset / BITS_PER_LONG);
- int result = offset & ~(BITS_PER_LONG-1);
+ uintBPL_t *p = ((uintBPL_t *) addr) + (offset / BITS_PER_LONG);
+ int result = offset & ~(BITS_PER_LONG - 1);
unsigned long tmp;
if (offset >= size)
return size;
size -= result;
- offset &= (BITS_PER_LONG-1);
- if (offset)
- {
+ offset &= (BITS_PER_LONG - 1);
+ if (offset) {
tmp = leBPL_to_cpup(p++);
tmp &= ~0UL << offset;
if (size < BITS_PER_LONG)
@@ -62,8 +61,7 @@ static inline int find_next_one_bit (void * addr, int size, int offset)
size -= BITS_PER_LONG;
result += BITS_PER_LONG;
}
- while (size & ~(BITS_PER_LONG-1))
- {
+ while (size & ~(BITS_PER_LONG - 1)) {
if ((tmp = leBPL_to_cpup(p++)))
goto found_middle;
result += BITS_PER_LONG;
@@ -73,7 +71,7 @@ static inline int find_next_one_bit (void * addr, int size, int offset)
return result;
tmp = leBPL_to_cpup(p);
found_first:
- tmp &= ~0UL >> (BITS_PER_LONG-size);
+ tmp &= ~0UL >> (BITS_PER_LONG - size);
found_middle:
return result + ffz(~tmp);
}
@@ -81,8 +79,9 @@ found_middle:
#define find_first_one_bit(addr, size)\
find_next_one_bit((addr), (size), 0)
-static int read_block_bitmap(struct super_block * sb,
- struct udf_bitmap *bitmap, unsigned int block, unsigned long bitmap_nr)
+static int read_block_bitmap(struct super_block *sb,
+ struct udf_bitmap *bitmap, unsigned int block,
+ unsigned long bitmap_nr)
{
struct buffer_head *bh = NULL;
int retval = 0;
@@ -92,38 +91,39 @@ static int read_block_bitmap(struct super_block * sb,
loc.partitionReferenceNum = UDF_SB_PARTITION(sb);
bh = udf_tread(sb, udf_get_lb_pblock(sb, loc, block));
- if (!bh)
- {
+ if (!bh) {
retval = -EIO;
}
bitmap->s_block_bitmap[bitmap_nr] = bh;
return retval;
}
-static int __load_block_bitmap(struct super_block * sb,
- struct udf_bitmap *bitmap, unsigned int block_group)
+static int __load_block_bitmap(struct super_block *sb,
+ struct udf_bitmap *bitmap,
+ unsigned int block_group)
{
int retval = 0;
int nr_groups = bitmap->s_nr_groups;
- if (block_group >= nr_groups)
- {
- udf_debug("block_group (%d) > nr_groups (%d)\n", block_group, nr_groups);
+ if (block_group >= nr_groups) {
+ udf_debug("block_group (%d) > nr_groups (%d)\n", block_group,
+ nr_groups);
}
- if (bitmap->s_block_bitmap[block_group])
+ if (bitmap->s_block_bitmap[block_group]) {
return block_group;
- else
- {
- retval = read_block_bitmap(sb, bitmap, block_group, block_group);
+ } else {
+ retval = read_block_bitmap(sb, bitmap, block_group,
+ block_group);
if (retval < 0)
return retval;
return block_group;
}
}
-static inline int load_block_bitmap(struct super_block * sb,
- struct udf_bitmap *bitmap, unsigned int block_group)
+static inline int load_block_bitmap(struct super_block *sb,
+ struct udf_bitmap *bitmap,
+ unsigned int block_group)
{
int slot;
@@ -138,13 +138,14 @@ static inline int load_block_bitmap(struct super_block * sb,
return slot;
}
-static void udf_bitmap_free_blocks(struct super_block * sb,
- struct inode * inode,
- struct udf_bitmap *bitmap,
- kernel_lb_addr bloc, uint32_t offset, uint32_t count)
+static void udf_bitmap_free_blocks(struct super_block *sb,
+ struct inode *inode,
+ struct udf_bitmap *bitmap,
+ kernel_lb_addr bloc, uint32_t offset,
+ uint32_t count)
{
struct udf_sb_info *sbi = UDF_SB(sb);
- struct buffer_head * bh = NULL;
+ struct buffer_head *bh = NULL;
unsigned long block;
unsigned long block_group;
unsigned long bit;
@@ -154,11 +155,10 @@ static void udf_bitmap_free_blocks(struct super_block * sb,
mutex_lock(&sbi->s_alloc_mutex);
if (bloc.logicalBlockNum < 0 ||
- (bloc.logicalBlockNum + count) > UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum))
- {
+ (bloc.logicalBlockNum + count) > UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum)) {
udf_debug("%d < %d || %d + %d > %d\n",
- bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
- UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum));
+ bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
+ UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum));
goto error_return;
}
@@ -172,8 +172,7 @@ do_more:
/*
* Check to see if we are freeing blocks across a group boundary.
*/
- if (bit + count > (sb->s_blocksize << 3))
- {
+ if (bit + count > (sb->s_blocksize << 3)) {
overflow = bit + count - (sb->s_blocksize << 3);
count -= overflow;
}
@@ -182,27 +181,21 @@ do_more:
goto error_return;
bh = bitmap->s_block_bitmap[bitmap_nr];
- for (i=0; i < count; i++)
- {
- if (udf_set_bit(bit + i, bh->b_data))
- {
+ for (i = 0; i < count; i++) {
+ if (udf_set_bit(bit + i, bh->b_data)) {
udf_debug("bit %ld already set\n", bit + i);
udf_debug("byte=%2x\n", ((char *)bh->b_data)[(bit + i) >> 3]);
- }
- else
- {
+ } else {
if (inode)
DQUOT_FREE_BLOCK(inode, 1);
- if (UDF_SB_LVIDBH(sb))
- {
+ if (UDF_SB_LVIDBH(sb)) {
UDF_SB_LVID(sb)->freeSpaceTable[UDF_SB_PARTITION(sb)] =
- cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[UDF_SB_PARTITION(sb)])+1);
+ cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[UDF_SB_PARTITION(sb)]) + 1);
}
}
}
mark_buffer_dirty(bh);
- if (overflow)
- {
+ if (overflow) {
block += count;
count = overflow;
goto do_more;
@@ -215,10 +208,11 @@ error_return:
return;
}
-static int udf_bitmap_prealloc_blocks(struct super_block * sb,
- struct inode * inode,
- struct udf_bitmap *bitmap, uint16_t partition, uint32_t first_block,
- uint32_t block_count)
+static int udf_bitmap_prealloc_blocks(struct super_block *sb,
+ struct inode *inode,
+ struct udf_bitmap *bitmap,
+ uint16_t partition, uint32_t first_block,
+ uint32_t block_count)
{
struct udf_sb_info *sbi = UDF_SB(sb);
int alloc_count = 0;
@@ -235,7 +229,8 @@ static int udf_bitmap_prealloc_blocks(struct super_block * sb,
repeat:
nr_groups = (UDF_SB_PARTLEN(sb, partition) +
- (sizeof(struct spaceBitmapDesc) << 3) + (sb->s_blocksize * 8) - 1) / (sb->s_blocksize * 8);
+ (sizeof(struct spaceBitmapDesc) << 3) +
+ (sb->s_blocksize * 8) - 1) / (sb->s_blocksize * 8);
block = first_block + (sizeof(struct spaceBitmapDesc) << 3);
block_group = block >> (sb->s_blocksize_bits + 3);
group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
@@ -247,31 +242,28 @@ repeat:
bit = block % (sb->s_blocksize << 3);
- while (bit < (sb->s_blocksize << 3) && block_count > 0)
- {
- if (!udf_test_bit(bit, bh->b_data))
+ while (bit < (sb->s_blocksize << 3) && block_count > 0) {
+ if (!udf_test_bit(bit, bh->b_data)) {
goto out;
- else if (DQUOT_PREALLOC_BLOCK(inode, 1))
+ } else if (DQUOT_PREALLOC_BLOCK(inode, 1)) {
goto out;
- else if (!udf_clear_bit(bit, bh->b_data))
- {
+ } else if (!udf_clear_bit(bit, bh->b_data)) {
udf_debug("bit already cleared for block %d\n", bit);
DQUOT_FREE_BLOCK(inode, 1);
goto out;
}
- block_count --;
- alloc_count ++;
- bit ++;
- block ++;
+ block_count--;
+ alloc_count++;
+ bit++;
+ block++;
}
mark_buffer_dirty(bh);
if (block_count > 0)
goto repeat;
out:
- if (UDF_SB_LVIDBH(sb))
- {
+ if (UDF_SB_LVIDBH(sb)) {
UDF_SB_LVID(sb)->freeSpaceTable[partition] =
- cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[partition])-alloc_count);
+ cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[partition]) - alloc_count);
mark_buffer_dirty(UDF_SB_LVIDBH(sb));
}
sb->s_dirt = 1;
@@ -279,12 +271,13 @@ out:
return alloc_count;
}
-static int udf_bitmap_new_block(struct super_block * sb,
- struct inode * inode,
- struct udf_bitmap *bitmap, uint16_t partition, uint32_t goal, int *err)
+static int udf_bitmap_new_block(struct super_block *sb,
+ struct inode *inode,
+ struct udf_bitmap *bitmap, uint16_t partition,
+ uint32_t goal, int *err)
{
struct udf_sb_info *sbi = UDF_SB(sb);
- int newbit, bit=0, block, block_group, group_start;
+ int newbit, bit = 0, block, block_group, group_start;
int end_goal, nr_groups, bitmap_nr, i;
struct buffer_head *bh = NULL;
char *ptr;
@@ -306,38 +299,35 @@ repeat:
if (bitmap_nr < 0)
goto error_return;
bh = bitmap->s_block_bitmap[bitmap_nr];
- ptr = memscan((char *)bh->b_data + group_start, 0xFF, sb->s_blocksize - group_start);
+ ptr = memscan((char *)bh->b_data + group_start, 0xFF,
+ sb->s_blocksize - group_start);
- if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize)
- {
+ if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) {
bit = block % (sb->s_blocksize << 3);
-
if (udf_test_bit(bit, bh->b_data))
- {
goto got_block;
- }
+
end_goal = (bit + 63) & ~63;
bit = udf_find_next_one_bit(bh->b_data, end_goal, bit);
if (bit < end_goal)
goto got_block;
+
ptr = memscan((char *)bh->b_data + (bit >> 3), 0xFF, sb->s_blocksize - ((bit + 7) >> 3));
newbit = (ptr - ((char *)bh->b_data)) << 3;
- if (newbit < sb->s_blocksize << 3)
- {
+ if (newbit < sb->s_blocksize << 3) {
bit = newbit;
goto search_back;
}
+
newbit = udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3, bit);
- if (newbit < sb->s_blocksize << 3)
- {
+ if (newbit < sb->s_blocksize << 3) {
bit = newbit;
goto got_block;
}
}
- for (i=0; i<(nr_groups*2); i++)
- {
- block_group ++;
+ for (i = 0; i < (nr_groups * 2); i++) {
+ block_group++;
if (block_group >= nr_groups)
block_group = 0;
group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
@@ -346,24 +336,22 @@ repeat:
if (bitmap_nr < 0)
goto error_return;
bh = bitmap->s_block_bitmap[bitmap_nr];
- if (i < nr_groups)
- {
- ptr = memscan((char *)bh->b_data + group_start, 0xFF, sb->s_blocksize - group_start);
- if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize)
- {
+ if (i < nr_groups) {
+ ptr = memscan((char *)bh->b_data + group_start, 0xFF,
+ sb->s_blocksize - group_start);
+ if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) {
bit = (ptr - ((char *)bh->b_data)) << 3;
break;
}
- }
- else
- {
- bit = udf_find_next_one_bit((char *)bh->b_data, sb->s_blocksize << 3, group_start << 3);
+ } else {
+ bit = udf_find_next_one_bit((char *)bh->b_data,
+ sb->s_blocksize << 3,
+ group_start << 3);
if (bit < sb->s_blocksize << 3)
break;
}
}
- if (i >= (nr_groups*2))
- {
+ if (i >= (nr_groups * 2)) {
mutex_unlock(&sbi->s_alloc_mutex);
return newblock;
}
@@ -371,22 +359,21 @@ repeat:
goto search_back;
else
bit = udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3, group_start << 3);
- if (bit >= sb->s_blocksize << 3)
- {
+ if (bit >= sb->s_blocksize << 3) {
mutex_unlock(&sbi->s_alloc_mutex);
return 0;
}
search_back:
- for (i=0; i<7 && bit > (group_start << 3) && udf_test_bit(bit - 1, bh->b_data); i++, bit--);
+ for (i = 0; i < 7 && bit > (group_start << 3) && udf_test_bit(bit - 1, bh->b_data); i++, bit--)
+ ; /* empty loop */
got_block:
/*
* Check quota for allocation of this block.
*/
- if (inode && DQUOT_ALLOC_BLOCK(inode, 1))
- {
+ if (inode && DQUOT_ALLOC_BLOCK(inode, 1)) {
mutex_unlock(&sbi->s_alloc_mutex);
*err = -EDQUOT;
return 0;
@@ -395,18 +382,16 @@ got_block:
newblock = bit + (block_group << (sb->s_blocksize_bits + 3)) -
(sizeof(struct spaceBitmapDesc) << 3);
- if (!udf_clear_bit(bit, bh->b_data))
- {
+ if (!udf_clear_bit(bit, bh->b_data)) {
udf_debug("bit already cleared for block %d\n", bit);
goto repeat;
}
mark_buffer_dirty(bh);
- if (UDF_SB_LVIDBH(sb))
- {
+ if (UDF_SB_LVIDBH(sb)) {
UDF_SB_LVID(sb)->freeSpaceTable[partition] =
- cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[partition])-1);
+ cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[partition]) - 1);
mark_buffer_dirty(UDF_SB_LVIDBH(sb));
}
sb->s_dirt = 1;
@@ -420,10 +405,11 @@ error_return:
return 0;
}
-static void udf_table_free_blocks(struct super_block * sb,
- struct inode * inode,
- struct inode * table,
- kernel_lb_addr bloc, uint32_t offset, uint32_t count)
+static void udf_table_free_blocks(struct super_block *sb,
+ struct inode *inode,
+ struct inode *table,
+ kernel_lb_addr bloc, uint32_t offset,
+ uint32_t count)
{
struct udf_sb_info *sbi = UDF_SB(sb);
uint32_t start, end;
@@ -435,11 +421,10 @@ static void udf_table_free_blocks(struct super_block * sb,
mutex_lock(&sbi->s_alloc_mutex);
if (bloc.logicalBlockNum < 0 ||
- (bloc.logicalBlockNum + count) > UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum))
- {
+ (bloc.logicalBlockNum + count) > UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum)) {
udf_debug("%d < %d || %d + %d > %d\n",
- bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
- UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum));
+ bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
+ UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum));
goto error_return;
}
@@ -447,10 +432,9 @@ static void udf_table_free_blocks(struct super_block * sb,
but.. oh well */
if (inode)
DQUOT_FREE_BLOCK(inode, count);
- if (UDF_SB_LVIDBH(sb))
- {
+ if (UDF_SB_LVIDBH(sb)) {
UDF_SB_LVID(sb)->freeSpaceTable[UDF_SB_PARTITION(sb)] =
- cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[UDF_SB_PARTITION(sb)])+count);
+ cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[UDF_SB_PARTITION(sb)]) + count);
mark_buffer_dirty(UDF_SB_LVIDBH(sb));
}
@@ -462,74 +446,59 @@ static void udf_table_free_blocks(struct super_block * sb,
epos.block = oepos.block = UDF_I_LOCATION(table);
epos.bh = oepos.bh = NULL;
- while (count && (etype =
- udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1)
- {
- if (((eloc.logicalBlockNum + (elen >> sb->s_blocksize_bits)) ==
- start))
- {
- if ((0x3FFFFFFF - elen) < (count << sb->s_blocksize_bits))
- {
+ while (count &&
+ (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
+ if (((eloc.logicalBlockNum + (elen >> sb->s_blocksize_bits)) == start)) {
+ if ((0x3FFFFFFF - elen) < (count << sb->s_blocksize_bits)) {
count -= ((0x3FFFFFFF - elen) >> sb->s_blocksize_bits);
start += ((0x3FFFFFFF - elen) >> sb->s_blocksize_bits);
elen = (etype << 30) | (0x40000000 - sb->s_blocksize);
- }
- else
- {
- elen = (etype << 30) |
- (elen + (count << sb->s_blocksize_bits));
+ } else {
+ elen = (etype << 30) | (elen + (count << sb->s_blocksize_bits));
start += count;
count = 0;
}
udf_write_aext(table, &oepos, eloc, elen, 1);
- }
- else if (eloc.logicalBlockNum == (end + 1))
- {
- if ((0x3FFFFFFF - elen) < (count << sb->s_blocksize_bits))
- {
+ } else if (eloc.logicalBlockNum == (end + 1)) {
+ if ((0x3FFFFFFF - elen) < (count << sb->s_blocksize_bits)) {
count -= ((0x3FFFFFFF - elen) >> sb->s_blocksize_bits);
end -= ((0x3FFFFFFF - elen) >> sb->s_blocksize_bits);
- eloc.logicalBlockNum -=
- ((0x3FFFFFFF - elen) >> sb->s_blocksize_bits);
+ eloc.logicalBlockNum -= ((0x3FFFFFFF - elen) >> sb->s_blocksize_bits);
elen = (etype << 30) | (0x40000000 - sb->s_blocksize);
- }
- else
- {
+ } else {
eloc.logicalBlockNum = start;
- elen = (etype << 30) |
- (elen + (count << sb->s_blocksize_bits));
+ elen = (etype << 30) | (elen + (count << sb->s_blocksize_bits));
end -= count;
count = 0;
}
udf_write_aext(table, &oepos, eloc, elen, 1);
}
- if (epos.bh != oepos.bh)
- {
+ if (epos.bh != oepos.bh) {
i = -1;
oepos.block = epos.block;
brelse(oepos.bh);
get_bh(epos.bh);
oepos.bh = epos.bh;
oepos.offset = 0;
- }
- else
+ } else {
oepos.offset = epos.offset;
+ }
}
- if (count)
- {
- /* NOTE: we CANNOT use udf_add_aext here, as it can try to allocate
- a new block, and since we hold the super block lock already
- very bad things would happen :)
-
- We copy the behavior of udf_add_aext, but instead of
- trying to allocate a new block close to the existing one,
- we just steal a block from the extent we are trying to add.
-
- It would be nice if the blocks were close together, but it
- isn't required.
- */
+ if (count) {
+ /*
+ * NOTE: we CANNOT use udf_add_aext here, as it can try to allocate
+ * a new block, and since we hold the super block lock already
+ * very bad things would happen :)
+ *
+ * We copy the behavior of udf_add_aext, but instead of
+ * trying to allocate a new block close to the existing one,
+ * we just steal a block from the extent we are trying to add.
+ *
+ * It would be nice if the blocks were close together, but it
+ * isn't required.
+ */
int adsize;
short_ad *sad = NULL;
@@ -540,115 +509,94 @@ static void udf_table_free_blocks(struct super_block * sb,
elen = EXT_RECORDED_ALLOCATED |
(count << sb->s_blocksize_bits);
- if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_SHORT)
+ if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_SHORT) {
adsize = sizeof(short_ad);
- else if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_LONG)
+ } else if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_LONG) {
adsize = sizeof(long_ad);
- else
- {
+ } else {
brelse(oepos.bh);
brelse(epos.bh);
goto error_return;
}
- if (epos.offset + (2 * adsize) > sb->s_blocksize)
- {
+ if (epos.offset + (2 * adsize) > sb->s_blocksize) {
char *sptr, *dptr;
int loffset;
-
+
brelse(oepos.bh);
oepos = epos;
/* Steal a block from the extent being free'd */
epos.block.logicalBlockNum = eloc.logicalBlockNum;
- eloc.logicalBlockNum ++;
+ eloc.logicalBlockNum++;
elen -= sb->s_blocksize;
- if (!(epos.bh = udf_tread(sb,
- udf_get_lb_pblock(sb, epos.block, 0))))
- {
+ if (!(epos.bh = udf_tread(sb, udf_get_lb_pblock(sb, epos.block, 0)))) {
brelse(oepos.bh);
goto error_return;
}
aed = (struct allocExtDesc *)(epos.bh->b_data);
aed->previousAllocExtLocation = cpu_to_le32(oepos.block.logicalBlockNum);
- if (epos.offset + adsize > sb->s_blocksize)
- {
+ if (epos.offset + adsize > sb->s_blocksize) {
loffset = epos.offset;
aed->lengthAllocDescs = cpu_to_le32(adsize);
- sptr = UDF_I_DATA(inode) + epos.offset -
- udf_file_entry_alloc_offset(inode) +
- UDF_I_LENEATTR(inode) - adsize;
+ sptr = UDF_I_DATA(table) + epos.offset - adsize;
dptr = epos.bh->b_data + sizeof(struct allocExtDesc);
memcpy(dptr, sptr, adsize);
epos.offset = sizeof(struct allocExtDesc) + adsize;
- }
- else
- {
+ } else {
loffset = epos.offset + adsize;
aed->lengthAllocDescs = cpu_to_le32(0);
- sptr = oepos.bh->b_data + epos.offset;
- epos.offset = sizeof(struct allocExtDesc);
-
- if (oepos.bh)
- {
+ if (oepos.bh) {
+ sptr = oepos.bh->b_data + epos.offset;
aed = (struct allocExtDesc *)oepos.bh->b_data;
aed->lengthAllocDescs =
cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
- }
- else
- {
+ } else {
+ sptr = UDF_I_DATA(table) + epos.offset;
UDF_I_LENALLOC(table) += adsize;
mark_inode_dirty(table);
}
+ epos.offset = sizeof(struct allocExtDesc);
}
if (UDF_SB_UDFREV(sb) >= 0x0200)
udf_new_tag(epos.bh->b_data, TAG_IDENT_AED, 3, 1,
- epos.block.logicalBlockNum, sizeof(tag));
+ epos.block.logicalBlockNum, sizeof(tag));
else
udf_new_tag(epos.bh->b_data, TAG_IDENT_AED, 2, 1,
- epos.block.logicalBlockNum, sizeof(tag));
- switch (UDF_I_ALLOCTYPE(table))
- {
+ epos.block.logicalBlockNum, sizeof(tag));
+
+ switch (UDF_I_ALLOCTYPE(table)) {
case ICBTAG_FLAG_AD_SHORT:
- {
sad = (short_ad *)sptr;
sad->extLength = cpu_to_le32(
EXT_NEXT_EXTENT_ALLOCDECS |
sb->s_blocksize);
sad->extPosition = cpu_to_le32(epos.block.logicalBlockNum);
break;
- }
case ICBTAG_FLAG_AD_LONG:
- {
lad = (long_ad *)sptr;
lad->extLength = cpu_to_le32(
EXT_NEXT_EXTENT_ALLOCDECS |
sb->s_blocksize);
lad->extLocation = cpu_to_lelb(epos.block);
break;
- }
}
- if (oepos.bh)
- {
+ if (oepos.bh) {
udf_update_tag(oepos.bh->b_data, loffset);
mark_buffer_dirty(oepos.bh);
- }
- else
+ } else {
mark_inode_dirty(table);
+ }
}
- if (elen) /* It's possible that stealing the block emptied the extent */
- {
+ if (elen) { /* It's possible that stealing the block emptied the extent */
udf_write_aext(table, &epos, eloc, elen, 1);
- if (!epos.bh)
- {
+ if (!epos.bh) {
UDF_I_LENALLOC(table) += adsize;
mark_inode_dirty(table);
- }
- else
- {
+ } else {
aed = (struct allocExtDesc *)epos.bh->b_data;
aed->lengthAllocDescs =
cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
@@ -667,10 +615,10 @@ error_return:
return;
}
-static int udf_table_prealloc_blocks(struct super_block * sb,
- struct inode * inode,
- struct inode *table, uint16_t partition, uint32_t first_block,
- uint32_t block_count)
+static int udf_table_prealloc_blocks(struct super_block *sb,
+ struct inode *inode,
+ struct inode *table, uint16_t partition,
+ uint32_t first_block, uint32_t block_count)
{
struct udf_sb_info *sbi = UDF_SB(sb);
int alloc_count = 0;
@@ -695,40 +643,36 @@ static int udf_table_prealloc_blocks(struct super_block * sb,
epos.bh = NULL;
eloc.logicalBlockNum = 0xFFFFFFFF;
- while (first_block != eloc.logicalBlockNum && (etype =
- udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1)
- {
+ while (first_block != eloc.logicalBlockNum &&
+ (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
udf_debug("eloc=%d, elen=%d, first_block=%d\n",
- eloc.logicalBlockNum, elen, first_block);
+ eloc.logicalBlockNum, elen, first_block);
; /* empty loop body */
}
- if (first_block == eloc.logicalBlockNum)
- {
+ if (first_block == eloc.logicalBlockNum) {
epos.offset -= adsize;
alloc_count = (elen >> sb->s_blocksize_bits);
- if (inode && DQUOT_PREALLOC_BLOCK(inode, alloc_count > block_count ? block_count : alloc_count))
+ if (inode && DQUOT_PREALLOC_BLOCK(inode, alloc_count > block_count ? block_count : alloc_count)) {
alloc_count = 0;
- else if (alloc_count > block_count)
- {
+ } else if (alloc_count > block_count) {
alloc_count = block_count;
eloc.logicalBlockNum += alloc_count;
elen -= (alloc_count << sb->s_blocksize_bits);
udf_write_aext(table, &epos, eloc, (etype << 30) | elen, 1);
- }
- else
+ } else {
udf_delete_aext(table, epos, eloc, (etype << 30) | elen);
- }
- else
+ }
+ } else {
alloc_count = 0;
+ }
brelse(epos.bh);
- if (alloc_count && UDF_SB_LVIDBH(sb))
- {
+ if (alloc_count && UDF_SB_LVIDBH(sb)) {
UDF_SB_LVID(sb)->freeSpaceTable[partition] =
- cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[partition])-alloc_count);
+ cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[partition]) - alloc_count);
mark_buffer_dirty(UDF_SB_LVIDBH(sb));
sb->s_dirt = 1;
}
@@ -736,9 +680,10 @@ static int udf_table_prealloc_blocks(struct super_block * sb,
return alloc_count;
}
-static int udf_table_new_block(struct super_block * sb,
- struct inode * inode,
- struct inode *table, uint16_t partition, uint32_t goal, int *err)
+static int udf_table_new_block(struct super_block *sb,
+ struct inode *inode,
+ struct inode *table, uint16_t partition,
+ uint32_t goal, int *err)
{
struct udf_sb_info *sbi = UDF_SB(sb);
uint32_t spread = 0xFFFFFFFF, nspread = 0xFFFFFFFF;
@@ -765,30 +710,26 @@ static int udf_table_new_block(struct super_block * sb,
we stop. Otherwise we keep going till we run out of extents.
We store the buffer_head, bloc, and extoffset of the current closest
match and use that when we are done.
- */
+ */
epos.offset = sizeof(struct unallocSpaceEntry);
epos.block = UDF_I_LOCATION(table);
epos.bh = goal_epos.bh = NULL;
- while (spread && (etype =
- udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1)
- {
- if (goal >= eloc.logicalBlockNum)
- {
+ while (spread &&
+ (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
+ if (goal >= eloc.logicalBlockNum) {
if (goal < eloc.logicalBlockNum + (elen >> sb->s_blocksize_bits))
nspread = 0;
else
nspread = goal - eloc.logicalBlockNum -
(elen >> sb->s_blocksize_bits);
- }
- else
+ } else {
nspread = eloc.logicalBlockNum - goal;
+ }
- if (nspread < spread)
- {
+ if (nspread < spread) {
spread = nspread;
- if (goal_epos.bh != epos.bh)
- {
+ if (goal_epos.bh != epos.bh) {
brelse(goal_epos.bh);
goal_epos.bh = epos.bh;
get_bh(goal_epos.bh);
@@ -802,8 +743,7 @@ static int udf_table_new_block(struct super_block * sb,
brelse(epos.bh);
- if (spread == 0xFFFFFFFF)
- {
+ if (spread == 0xFFFFFFFF) {
brelse(goal_epos.bh);
mutex_unlock(&sbi->s_alloc_mutex);
return 0;
@@ -815,11 +755,10 @@ static int udf_table_new_block(struct super_block * sb,
/* This works, but very poorly.... */
newblock = goal_eloc.logicalBlockNum;
- goal_eloc.logicalBlockNum ++;
+ goal_eloc.logicalBlockNum++;
goal_elen -= sb->s_blocksize;
- if (inode && DQUOT_ALLOC_BLOCK(inode, 1))
- {
+ if (inode && DQUOT_ALLOC_BLOCK(inode, 1)) {
brelse(goal_epos.bh);
mutex_unlock(&sbi->s_alloc_mutex);
*err = -EDQUOT;
@@ -832,10 +771,9 @@ static int udf_table_new_block(struct super_block * sb,
udf_delete_aext(table, goal_epos, goal_eloc, goal_elen);
brelse(goal_epos.bh);
- if (UDF_SB_LVIDBH(sb))
- {
+ if (UDF_SB_LVIDBH(sb)) {
UDF_SB_LVID(sb)->freeSpaceTable[partition] =
- cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[partition])-1);
+ cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[partition]) - 1);
mark_buffer_dirty(UDF_SB_LVIDBH(sb));
}
@@ -845,105 +783,84 @@ static int udf_table_new_block(struct super_block * sb,
return newblock;
}
-inline void udf_free_blocks(struct super_block * sb,
- struct inode * inode,
- kernel_lb_addr bloc, uint32_t offset, uint32_t count)
+inline void udf_free_blocks(struct super_block *sb,
+ struct inode *inode,
+ kernel_lb_addr bloc, uint32_t offset,
+ uint32_t count)
{
uint16_t partition = bloc.partitionReferenceNum;
- if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_BITMAP)
- {
+ if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_BITMAP) {
return udf_bitmap_free_blocks(sb, inode,
- UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_bitmap,
- bloc, offset, count);
- }
- else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_TABLE)
- {
+ UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_bitmap,
+ bloc, offset, count);
+ } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_TABLE) {
return udf_table_free_blocks(sb, inode,
- UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_table,
- bloc, offset, count);
- }
- else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_BITMAP)
- {
+ UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_table,
+ bloc, offset, count);
+ } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_BITMAP) {
return udf_bitmap_free_blocks(sb, inode,
- UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_bitmap,
- bloc, offset, count);
- }
- else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_TABLE)
- {
+ UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_bitmap,
+ bloc, offset, count);
+ } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_TABLE) {
return udf_table_free_blocks(sb, inode,
- UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_table,
- bloc, offset, count);
- }
- else
+ UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_table,
+ bloc, offset, count);
+ } else {
return;
+ }
}
-inline int udf_prealloc_blocks(struct super_block * sb,
- struct inode * inode,
- uint16_t partition, uint32_t first_block, uint32_t block_count)
+inline int udf_prealloc_blocks(struct super_block *sb,
+ struct inode *inode,
+ uint16_t partition, uint32_t first_block,
+ uint32_t block_count)
{
- if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_BITMAP)
- {
+ if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_BITMAP) {
return udf_bitmap_prealloc_blocks(sb, inode,
- UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_bitmap,
- partition, first_block, block_count);
- }
- else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_TABLE)
- {
+ UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_bitmap,
+ partition, first_block, block_count);
+ } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_TABLE) {
return udf_table_prealloc_blocks(sb, inode,
- UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_table,
- partition, first_block, block_count);
- }
- else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_BITMAP)
- {
+ UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_table,
+ partition, first_block, block_count);
+ } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_BITMAP) {
return udf_bitmap_prealloc_blocks(sb, inode,
- UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_bitmap,
- partition, first_block, block_count);
- }
- else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_TABLE)
- {
+ UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_bitmap,
+ partition, first_block, block_count);
+ } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_TABLE) {
return udf_table_prealloc_blocks(sb, inode,
- UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_table,
- partition, first_block, block_count);
- }
- else
+ UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_table,
+ partition, first_block, block_count);
+ } else {
return 0;
+ }
}
-inline int udf_new_block(struct super_block * sb,
- struct inode * inode,
- uint16_t partition, uint32_t goal, int *err)
+inline int udf_new_block(struct super_block *sb,
+ struct inode *inode,
+ uint16_t partition, uint32_t goal, int *err)
{
int ret;
- if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_BITMAP)
- {
+ if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_BITMAP) {
ret = udf_bitmap_new_block(sb, inode,
- UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_bitmap,
- partition, goal, err);
+ UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_bitmap,
+ partition, goal, err);
return ret;
- }
- else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_TABLE)
- {
+ } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_TABLE) {
return udf_table_new_block(sb, inode,
- UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_table,
- partition, goal, err);
- }
- else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_BITMAP)
- {
+ UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_table,
+ partition, goal, err);
+ } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_BITMAP) {
return udf_bitmap_new_block(sb, inode,
- UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_bitmap,
- partition, goal, err);
- }
- else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_TABLE)
- {
+ UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_bitmap,
+ partition, goal, err);
+ } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_TABLE) {
return udf_table_new_block(sb, inode,
- UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_table,
- partition, goal, err);
- }
- else
- {
+ UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_table,
+ partition, goal, err);
+ } else {
*err = -EIO;
return 0;
}
diff --git a/fs/udf/crc.c b/fs/udf/crc.c
index ef2bfaa..85aaee5 100644
--- a/fs/udf/crc.c
+++ b/fs/udf/crc.c
@@ -79,8 +79,7 @@ static uint16_t crc_table[256] = {
* July 21, 1997 - Andrew E. Mileski
* Adapted from OSTA-UDF(tm) 1.50 standard.
*/
-uint16_t
-udf_crc(uint8_t *data, uint32_t size, uint16_t crc)
+uint16_t udf_crc(uint8_t * data, uint32_t size, uint16_t crc)
{
while (size--)
crc = crc_table[(crc >> 8 ^ *(data++)) & 0xffU] ^ (crc << 8);
@@ -138,7 +137,7 @@ int main(int argc, char **argv)
/* Get the polynomial */
sscanf(argv[1], "%lo", &poly);
- if (poly & 0xffff0000U){
+ if (poly & 0xffff0000U) {
fprintf(stderr, "polynomial is too large\en");
exit(1);
}
@@ -147,22 +146,22 @@ int main(int argc, char **argv)
/* Create a table */
printf("static unsigned short crc_table[256] = {\n");
- for (n = 0; n < 256; n++){
+ for (n = 0; n < 256; n++) {
if (n % 8 == 0)
printf("\t");
crc = n << 8;
- for (i = 0; i < 8; i++){
- if(crc & 0x8000U)
+ for (i = 0; i < 8; i++) {
+ if (crc & 0x8000U)
crc = (crc << 1) ^ poly;
else
crc <<= 1;
- crc &= 0xFFFFU;
+ crc &= 0xFFFFU;
}
if (n == 255)
printf("0x%04xU ", crc);
else
printf("0x%04xU, ", crc);
- if(n % 8 == 7)
+ if (n % 8 == 7)
printf("\n");
}
printf("};\n");
diff --git a/fs/udf/dir.c b/fs/udf/dir.c
index e45f86b..9e3b9f9 100644
--- a/fs/udf/dir.c
+++ b/fs/udf/dir.c
@@ -82,14 +82,12 @@ int udf_readdir(struct file *filp, void *dirent, filldir_t filldir)
lock_kernel();
- if ( filp->f_pos == 0 )
- {
- if (filldir(dirent, ".", 1, filp->f_pos, dir->i_ino, DT_DIR) < 0)
- {
+ if (filp->f_pos == 0) {
+ if (filldir(dirent, ".", 1, filp->f_pos, dir->i_ino, DT_DIR) < 0) {
unlock_kernel();
return 0;
}
- filp->f_pos ++;
+ filp->f_pos++;
}
result = do_udf_readdir(dir, filp, filldir, dirent);
@@ -97,11 +95,12 @@ int udf_readdir(struct file *filp, void *dirent, filldir_t filldir)
return result;
}
-static int
-do_udf_readdir(struct inode * dir, struct file *filp, filldir_t filldir, void *dirent)
+static int
+do_udf_readdir(struct inode *dir, struct file *filp, filldir_t filldir,
+ void *dirent)
{
struct udf_fileident_bh fibh;
- struct fileIdentDesc *fi=NULL;
+ struct fileIdentDesc *fi = NULL;
struct fileIdentDesc cfi;
int block, iblock;
loff_t nf_pos = filp->f_pos - 1;
@@ -117,7 +116,7 @@ do_udf_readdir(struct inode * dir, struct file *filp, filldir_t filldir, void *d
sector_t offset;
int i, num;
unsigned int dt_type;
- struct extent_position epos = { NULL, 0, {0, 0}};
+ struct extent_position epos = { NULL, 0, {0, 0} };
if (nf_pos >= size)
return 0;
@@ -126,64 +125,54 @@ do_udf_readdir(struct inode * dir, struct file *filp, filldir_t filldir, void *d
nf_pos = (udf_ext0_offset(dir) >> 2);
fibh.soffset = fibh.eoffset = (nf_pos & ((dir->i_sb->s_blocksize - 1) >> 2)) << 2;
- if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB)
+ if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB) {
fibh.sbh = fibh.ebh = NULL;
- else if (inode_bmap(dir, nf_pos >> (dir->i_sb->s_blocksize_bits - 2),
- &epos, &eloc, &elen, &offset) == (EXT_RECORDED_ALLOCATED >> 30))
- {
+ } else if (inode_bmap(dir, nf_pos >> (dir->i_sb->s_blocksize_bits - 2),
+ &epos, &eloc, &elen, &offset) == (EXT_RECORDED_ALLOCATED >> 30)) {
block = udf_get_lb_pblock(dir->i_sb, eloc, offset);
- if ((++offset << dir->i_sb->s_blocksize_bits) < elen)
- {
+ if ((++offset << dir->i_sb->s_blocksize_bits) < elen) {
if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_SHORT)
epos.offset -= sizeof(short_ad);
else if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_LONG)
epos.offset -= sizeof(long_ad);
- }
- else
+ } else {
offset = 0;
+ }
- if (!(fibh.sbh = fibh.ebh = udf_tread(dir->i_sb, block)))
- {
+ if (!(fibh.sbh = fibh.ebh = udf_tread(dir->i_sb, block))) {
brelse(epos.bh);
return -EIO;
}
-
- if (!(offset & ((16 >> (dir->i_sb->s_blocksize_bits - 9))-1)))
- {
+
+ if (!(offset & ((16 >> (dir->i_sb->s_blocksize_bits - 9)) - 1))) {
i = 16 >> (dir->i_sb->s_blocksize_bits - 9);
- if (i+offset > (elen >> dir->i_sb->s_blocksize_bits))
- i = (elen >> dir->i_sb->s_blocksize_bits)-offset;
- for (num=0; i>0; i--)
- {
- block = udf_get_lb_pblock(dir->i_sb, eloc, offset+i);
+ if (i + offset > (elen >> dir->i_sb->s_blocksize_bits))
+ i = (elen >> dir->i_sb->s_blocksize_bits) - offset;
+ for (num = 0; i > 0; i--) {
+ block = udf_get_lb_pblock(dir->i_sb, eloc, offset + i);
tmp = udf_tgetblk(dir->i_sb, block);
if (tmp && !buffer_uptodate(tmp) && !buffer_locked(tmp))
bha[num++] = tmp;
else
brelse(tmp);
}
- if (num)
- {
+ if (num) {
ll_rw_block(READA, num, bha);
- for (i=0; i<num; i++)
+ for (i = 0; i < num; i++)
brelse(bha[i]);
}
}
- }
- else
- {
+ } else {
brelse(epos.bh);
return -ENOENT;
}
- while ( nf_pos < size )
- {
+ while (nf_pos < size) {
filp->f_pos = nf_pos + 1;
- fi = udf_fileident_read(dir, &nf_pos, &fibh, &cfi, &epos, &eloc, &elen, &offset);
-
- if (!fi)
- {
+ fi = udf_fileident_read(dir, &nf_pos, &fibh, &cfi, &epos, &eloc,
+ &elen, &offset);
+ if (!fi) {
if (fibh.sbh != fibh.ebh)
brelse(fibh.ebh);
brelse(fibh.sbh);
@@ -194,45 +183,40 @@ do_udf_readdir(struct inode * dir, struct file *filp, filldir_t filldir, void *d
liu = le16_to_cpu(cfi.lengthOfImpUse);
lfi = cfi.lengthFileIdent;
- if (fibh.sbh == fibh.ebh)
+ if (fibh.sbh == fibh.ebh) {
nameptr = fi->fileIdent + liu;
- else
- {
+ } else {
int poffset; /* Unpaded ending offset */
poffset = fibh.soffset + sizeof(struct fileIdentDesc) + liu + lfi;
- if (poffset >= lfi)
+ if (poffset >= lfi) {
nameptr = (char *)(fibh.ebh->b_data + poffset - lfi);
- else
- {
+ } else {
nameptr = fname;
- memcpy(nameptr, fi->fileIdent + liu, lfi - poffset);
- memcpy(nameptr + lfi - poffset, fibh.ebh->b_data, poffset);
+ memcpy(nameptr, fi->fileIdent + liu,
+ lfi - poffset);
+ memcpy(nameptr + lfi - poffset,
+ fibh.ebh->b_data, poffset);
}
}
- if ( (cfi.fileCharacteristics & FID_FILE_CHAR_DELETED) != 0 )
- {
- if ( !UDF_QUERY_FLAG(dir->i_sb, UDF_FLAG_UNDELETE) )
+ if ((cfi.fileCharacteristics & FID_FILE_CHAR_DELETED) != 0) {
+ if (!UDF_QUERY_FLAG(dir->i_sb, UDF_FLAG_UNDELETE))
continue;
}
-
- if ( (cfi.fileCharacteristics & FID_FILE_CHAR_HIDDEN) != 0 )
- {
- if ( !UDF_QUERY_FLAG(dir->i_sb, UDF_FLAG_UNHIDE) )
+
+ if ((cfi.fileCharacteristics & FID_FILE_CHAR_HIDDEN) != 0) {
+ if (!UDF_QUERY_FLAG(dir->i_sb, UDF_FLAG_UNHIDE))
continue;
}
- if ( cfi.fileCharacteristics & FID_FILE_CHAR_PARENT )
- {
+ if (cfi.fileCharacteristics & FID_FILE_CHAR_PARENT) {
iblock = parent_ino(filp->f_path.dentry);
flen = 2;
memcpy(fname, "..", flen);
dt_type = DT_DIR;
- }
- else
- {
+ } else {
kernel_lb_addr tloc = lelb_to_cpu(cfi.icb.extLocation);
iblock = udf_get_lb_pblock(dir->i_sb, tloc, 0);
@@ -240,10 +224,8 @@ do_udf_readdir(struct inode * dir, struct file *filp, filldir_t filldir, void *d
dt_type = DT_UNKNOWN;
}
- if (flen)
- {
- if (filldir(dirent, fname, flen, filp->f_pos, iblock, dt_type) < 0)
- {
+ if (flen) {
+ if (filldir(dirent, fname, flen, filp->f_pos, iblock, dt_type) < 0) {
if (fibh.sbh != fibh.ebh)
brelse(fibh.ebh);
brelse(fibh.sbh);
diff --git a/fs/udf/directory.c b/fs/udf/directory.c
index 198caa3..ff8c08f 100644
--- a/fs/udf/directory.c
+++ b/fs/udf/directory.c
@@ -19,10 +19,10 @@
#include <linux/buffer_head.h>
#if 0
-static uint8_t *
-udf_filead_read(struct inode *dir, uint8_t *tmpad, uint8_t ad_size,
- kernel_lb_addr fe_loc, int *pos, int *offset,
- struct buffer_head **bh, int *error)
+static uint8_t *udf_filead_read(struct inode *dir, uint8_t * tmpad,
+ uint8_t ad_size, kernel_lb_addr fe_loc,
+ int *pos, int *offset, struct buffer_head **bh,
+ int *error)
{
int loffset = *offset;
int block;
@@ -34,24 +34,20 @@ udf_filead_read(struct inode *dir, uint8_t *tmpad, uint8_t ad_size,
ad = (uint8_t *)(*bh)->b_data + *offset;
*offset += ad_size;
- if (!ad)
- {
+ if (!ad) {
brelse(*bh);
*error = 1;
return NULL;
}
- if (*offset == dir->i_sb->s_blocksize)
- {
+ if (*offset == dir->i_sb->s_blocksize) {
brelse(*bh);
block = udf_get_lb_pblock(dir->i_sb, fe_loc, ++*pos);
if (!block)
return NULL;
if (!(*bh = udf_tread(dir->i_sb, block)))
return NULL;
- }
- else if (*offset > dir->i_sb->s_blocksize)
- {
+ } else if (*offset > dir->i_sb->s_blocksize) {
ad = tmpad;
remainder = dir->i_sb->s_blocksize - loffset;
@@ -67,53 +63,51 @@ udf_filead_read(struct inode *dir, uint8_t *tmpad, uint8_t ad_size,
memcpy((uint8_t *)ad + remainder, (*bh)->b_data, ad_size - remainder);
*offset = ad_size - remainder;
}
+
return ad;
}
#endif
-struct fileIdentDesc *
-udf_fileident_read(struct inode *dir, loff_t *nf_pos,
- struct udf_fileident_bh *fibh,
- struct fileIdentDesc *cfi,
- struct extent_position *epos,
- kernel_lb_addr *eloc, uint32_t *elen,
- sector_t *offset)
+struct fileIdentDesc *udf_fileident_read(struct inode *dir, loff_t * nf_pos,
+ struct udf_fileident_bh *fibh,
+ struct fileIdentDesc *cfi,
+ struct extent_position *epos,
+ kernel_lb_addr * eloc, uint32_t * elen,
+ sector_t * offset)
{
struct fileIdentDesc *fi;
int i, num, block;
- struct buffer_head * tmp, * bha[16];
+ struct buffer_head *tmp, *bha[16];
fibh->soffset = fibh->eoffset;
- if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB)
- {
+ if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB) {
fi = udf_get_fileident(UDF_I_DATA(dir) -
- (UDF_I_EFE(dir) ?
- sizeof(struct extendedFileEntry) :
- sizeof(struct fileEntry)),
- dir->i_sb->s_blocksize, &(fibh->eoffset));
-
+ (UDF_I_EFE(dir) ?
+ sizeof(struct extendedFileEntry) :
+ sizeof(struct fileEntry)),
+ dir->i_sb->s_blocksize, &(fibh->eoffset));
if (!fi)
return NULL;
*nf_pos += ((fibh->eoffset - fibh->soffset) >> 2);
- memcpy((uint8_t *)cfi, (uint8_t *)fi, sizeof(struct fileIdentDesc));
+ memcpy((uint8_t *)cfi, (uint8_t *)fi,
+ sizeof(struct fileIdentDesc));
return fi;
}
- if (fibh->eoffset == dir->i_sb->s_blocksize)
- {
+ if (fibh->eoffset == dir->i_sb->s_blocksize) {
int lextoffset = epos->offset;
if (udf_next_aext(dir, epos, eloc, elen, 1) !=
- (EXT_RECORDED_ALLOCATED >> 30))
+ (EXT_RECORDED_ALLOCATED >> 30))
return NULL;
block = udf_get_lb_pblock(dir->i_sb, *eloc, *offset);
- (*offset) ++;
+ (*offset)++;
if ((*offset << dir->i_sb->s_blocksize_bits) >= *elen)
*offset = 0;
@@ -125,57 +119,50 @@ udf_fileident_read(struct inode *dir, loff_t *nf_pos,
return NULL;
fibh->soffset = fibh->eoffset = 0;
- if (!(*offset & ((16 >> (dir->i_sb->s_blocksize_bits - 9))-1)))
- {
+ if (!(*offset & ((16 >> (dir->i_sb->s_blocksize_bits - 9)) - 1))) {
i = 16 >> (dir->i_sb->s_blocksize_bits - 9);
- if (i+*offset > (*elen >> dir->i_sb->s_blocksize_bits))
+ if (i + *offset > (*elen >> dir->i_sb->s_blocksize_bits))
i = (*elen >> dir->i_sb->s_blocksize_bits)-*offset;
- for (num=0; i>0; i--)
- {
- block = udf_get_lb_pblock(dir->i_sb, *eloc, *offset+i);
+ for (num = 0; i > 0; i--) {
+ block = udf_get_lb_pblock(dir->i_sb, *eloc, *offset + i);
tmp = udf_tgetblk(dir->i_sb, block);
if (tmp && !buffer_uptodate(tmp) && !buffer_locked(tmp))
bha[num++] = tmp;
else
brelse(tmp);
}
- if (num)
- {
+ if (num) {
ll_rw_block(READA, num, bha);
- for (i=0; i<num; i++)
+ for (i = 0; i < num; i++)
brelse(bha[i]);
}
}
- }
- else if (fibh->sbh != fibh->ebh)
- {
+ } else if (fibh->sbh != fibh->ebh) {
brelse(fibh->sbh);
fibh->sbh = fibh->ebh;
}
fi = udf_get_fileident(fibh->sbh->b_data, dir->i_sb->s_blocksize,
- &(fibh->eoffset));
+ &(fibh->eoffset));
if (!fi)
return NULL;
*nf_pos += ((fibh->eoffset - fibh->soffset) >> 2);
- if (fibh->eoffset <= dir->i_sb->s_blocksize)
- {
- memcpy((uint8_t *)cfi, (uint8_t *)fi, sizeof(struct fileIdentDesc));
- }
- else if (fibh->eoffset > dir->i_sb->s_blocksize)
- {
+ if (fibh->eoffset <= dir->i_sb->s_blocksize) {
+ memcpy((uint8_t *)cfi, (uint8_t *)fi,
+ sizeof(struct fileIdentDesc));
+ } else if (fibh->eoffset > dir->i_sb->s_blocksize) {
int lextoffset = epos->offset;
if (udf_next_aext(dir, epos, eloc, elen, 1) !=
- (EXT_RECORDED_ALLOCATED >> 30))
+ (EXT_RECORDED_ALLOCATED >> 30))
return NULL;
block = udf_get_lb_pblock(dir->i_sb, *eloc, *offset);
- (*offset) ++;
+ (*offset)++;
if ((*offset << dir->i_sb->s_blocksize_bits) >= *elen)
*offset = 0;
@@ -188,62 +175,59 @@ udf_fileident_read(struct inode *dir, loff_t *nf_pos,
if (!(fibh->ebh = udf_tread(dir->i_sb, block)))
return NULL;
- if (sizeof(struct fileIdentDesc) > - fibh->soffset)
- {
+ if (sizeof(struct fileIdentDesc) > -fibh->soffset) {
int fi_len;
- memcpy((uint8_t *)cfi, (uint8_t *)fi, - fibh->soffset);
+ memcpy((uint8_t *)cfi, (uint8_t *)fi, -fibh->soffset);
memcpy((uint8_t *)cfi - fibh->soffset, fibh->ebh->b_data,
- sizeof(struct fileIdentDesc) + fibh->soffset);
+ sizeof(struct fileIdentDesc) + fibh->soffset);
fi_len = (sizeof(struct fileIdentDesc) + cfi->lengthFileIdent +
- le16_to_cpu(cfi->lengthOfImpUse) + 3) & ~3;
+ le16_to_cpu(cfi->lengthOfImpUse) + 3) & ~3;
*nf_pos += ((fi_len - (fibh->eoffset - fibh->soffset)) >> 2);
fibh->eoffset = fibh->soffset + fi_len;
- }
- else
- {
- memcpy((uint8_t *)cfi, (uint8_t *)fi, sizeof(struct fileIdentDesc));
+ } else {
+ memcpy((uint8_t *)cfi, (uint8_t *)fi,
+ sizeof(struct fileIdentDesc));
}
}
return fi;
}
-struct fileIdentDesc *
-udf_get_fileident(void * buffer, int bufsize, int * offset)
+struct fileIdentDesc *udf_get_fileident(void *buffer, int bufsize, int *offset)
{
struct fileIdentDesc *fi;
int lengthThisIdent;
- uint8_t * ptr;
+ uint8_t *ptr;
int padlen;
- if ( (!buffer) || (!offset) ) {
- udf_debug("invalidparms\n, buffer=%p, offset=%p\n", buffer, offset);
+ if ((!buffer) || (!offset)) {
+ udf_debug("invalidparms\n, buffer=%p, offset=%p\n", buffer,
+ offset);
return NULL;
}
ptr = buffer;
- if ( (*offset > 0) && (*offset < bufsize) ) {
+ if ((*offset > 0) && (*offset < bufsize)) {
ptr += *offset;
}
- fi=(struct fileIdentDesc *)ptr;
- if (le16_to_cpu(fi->descTag.tagIdent) != TAG_IDENT_FID)
- {
+ fi = (struct fileIdentDesc *)ptr;
+ if (le16_to_cpu(fi->descTag.tagIdent) != TAG_IDENT_FID) {
udf_debug("0x%x != TAG_IDENT_FID\n",
- le16_to_cpu(fi->descTag.tagIdent));
+ le16_to_cpu(fi->descTag.tagIdent));
udf_debug("offset: %u sizeof: %lu bufsize: %u\n",
- *offset, (unsigned long)sizeof(struct fileIdentDesc), bufsize);
+ *offset, (unsigned long)sizeof(struct fileIdentDesc),
+ bufsize);
return NULL;
}
- if ( (*offset + sizeof(struct fileIdentDesc)) > bufsize )
- {
+ if ((*offset + sizeof(struct fileIdentDesc)) > bufsize) {
lengthThisIdent = sizeof(struct fileIdentDesc);
- }
- else
+ } else {
lengthThisIdent = sizeof(struct fileIdentDesc) +
fi->lengthFileIdent + le16_to_cpu(fi->lengthOfImpUse);
+ }
/* we need to figure padding, too! */
padlen = lengthThisIdent % UDF_NAME_PAD;
@@ -255,32 +239,28 @@ udf_get_fileident(void * buffer, int bufsize, int * offset)
}
#if 0
-static extent_ad *
-udf_get_fileextent(void * buffer, int bufsize, int * offset)
+static extent_ad *udf_get_fileextent(void *buffer, int bufsize, int *offset)
{
- extent_ad * ext;
+ extent_ad *ext;
struct fileEntry *fe;
- uint8_t * ptr;
+ uint8_t *ptr;
- if ( (!buffer) || (!offset) )
- {
+ if ((!buffer) || (!offset)) {
printk(KERN_ERR "udf: udf_get_fileextent() invalidparms\n");
return NULL;
}
fe = (struct fileEntry *)buffer;
- if ( le16_to_cpu(fe->descTag.tagIdent) != TAG_IDENT_FE )
- {
+ if (le16_to_cpu(fe->descTag.tagIdent) != TAG_IDENT_FE) {
udf_debug("0x%x != TAG_IDENT_FE\n",
- le16_to_cpu(fe->descTag.tagIdent));
+ le16_to_cpu(fe->descTag.tagIdent));
return NULL;
}
- ptr=(uint8_t *)(fe->extendedAttr) + le32_to_cpu(fe->lengthExtendedAttr);
+ ptr = (uint8_t *)(fe->extendedAttr) + le32_to_cpu(fe->lengthExtendedAttr);
- if ( (*offset > 0) && (*offset < le32_to_cpu(fe->lengthAllocDescs)) )
- {
+ if ((*offset > 0) && (*offset < le32_to_cpu(fe->lengthAllocDescs))) {
ptr += *offset;
}
@@ -291,18 +271,17 @@ udf_get_fileextent(void * buffer, int bufsize, int * offset)
}
#endif
-short_ad *
-udf_get_fileshortad(uint8_t *ptr, int maxoffset, int *offset, int inc)
+short_ad *udf_get_fileshortad(uint8_t *ptr, int maxoffset, int *offset,
+ int inc)
{
short_ad *sa;
- if ( (!ptr) || (!offset) )
- {
+ if ((!ptr) || (!offset)) {
printk(KERN_ERR "udf: udf_get_fileshortad() invalidparms\n");
return NULL;
}
- if ( (*offset < 0) || ((*offset + sizeof(short_ad)) > maxoffset) )
+ if ((*offset < 0) || ((*offset + sizeof(short_ad)) > maxoffset))
return NULL;
else if ((sa = (short_ad *)ptr)->extLength == 0)
return NULL;
@@ -312,18 +291,16 @@ udf_get_fileshortad(uint8_t *ptr, int maxoffset, int *offset, int inc)
return sa;
}
-long_ad *
-udf_get_filelongad(uint8_t *ptr, int maxoffset, int * offset, int inc)
+long_ad *udf_get_filelongad(uint8_t *ptr, int maxoffset, int *offset, int inc)
{
long_ad *la;
- if ( (!ptr) || (!offset) )
- {
+ if ((!ptr) || (!offset)) {
printk(KERN_ERR "udf: udf_get_filelongad() invalidparms\n");
return NULL;
}
- if ( (*offset < 0) || ((*offset + sizeof(long_ad)) > maxoffset) )
+ if ((*offset < 0) || ((*offset + sizeof(long_ad)) > maxoffset))
return NULL;
else if ((la = (long_ad *)ptr)->extLength == 0)
return NULL;
diff --git a/fs/udf/ecma_167.h b/fs/udf/ecma_167.h
index f81f2eb..5638771 100644
--- a/fs/udf/ecma_167.h
+++ b/fs/udf/ecma_167.h
@@ -38,8 +38,7 @@
#define _ECMA_167_H 1
/* Character set specification (ECMA 167r3 1/7.2.1) */
-typedef struct
-{
+typedef struct {
uint8_t charSetType;
uint8_t charSetInfo[63];
} __attribute__ ((packed)) charspec;
@@ -58,8 +57,7 @@ typedef struct
typedef uint8_t dstring;
/* Timestamp (ECMA 167r3 1/7.3) */
-typedef struct
-{
+typedef struct {
__le16 typeAndTimezone;
__le16 year;
uint8_t month;
@@ -72,8 +70,7 @@ typedef struct
uint8_t microseconds;
} __attribute__ ((packed)) timestamp;
-typedef struct
-{
+typedef struct {
uint16_t typeAndTimezone;
int16_t year;
uint8_t month;
@@ -94,8 +91,7 @@ typedef struct
#define TIMESTAMP_TIMEZONE_MASK 0x0FFF
/* Entity identifier (ECMA 167r3 1/7.4) */
-typedef struct
-{
+typedef struct {
uint8_t flags;
uint8_t ident[23];
uint8_t identSuffix[8];
@@ -107,8 +103,7 @@ typedef struct
/* Volume Structure Descriptor (ECMA 167r3 2/9.1) */
#define VSD_STD_ID_LEN 5
-struct volStructDesc
-{
+struct volStructDesc {
uint8_t structType;
uint8_t stdIdent[VSD_STD_ID_LEN];
uint8_t structVersion;
@@ -127,8 +122,7 @@ struct volStructDesc
#define VSD_STD_ID_TEA01 "TEA01" /* (2/9.3) */
/* Beginning Extended Area Descriptor (ECMA 167r3 2/9.2) */
-struct beginningExtendedAreaDesc
-{
+struct beginningExtendedAreaDesc {
uint8_t structType;
uint8_t stdIdent[VSD_STD_ID_LEN];
uint8_t structVersion;
@@ -136,8 +130,7 @@ struct beginningExtendedAreaDesc
} __attribute__ ((packed));
/* Terminating Extended Area Descriptor (ECMA 167r3 2/9.3) */
-struct terminatingExtendedAreaDesc
-{
+struct terminatingExtendedAreaDesc {
uint8_t structType;
uint8_t stdIdent[VSD_STD_ID_LEN];
uint8_t structVersion;
@@ -145,8 +138,7 @@ struct terminatingExtendedAreaDesc
} __attribute__ ((packed));
/* Boot Descriptor (ECMA 167r3 2/9.4) */
-struct bootDesc
-{
+struct bootDesc {
uint8_t structType;
uint8_t stdIdent[VSD_STD_ID_LEN];
uint8_t structVersion;
@@ -167,21 +159,18 @@ struct bootDesc
#define BOOT_FLAGS_ERASE 0x01
/* Extent Descriptor (ECMA 167r3 3/7.1) */
-typedef struct
-{
+typedef struct {
__le32 extLength;
__le32 extLocation;
} __attribute__ ((packed)) extent_ad;
-typedef struct
-{
+typedef struct {
uint32_t extLength;
uint32_t extLocation;
} kernel_extent_ad;
/* Descriptor Tag (ECMA 167r3 3/7.2) */
-typedef struct
-{
+typedef struct {
__le16 tagIdent;
__le16 descVersion;
uint8_t tagChecksum;
@@ -204,18 +193,16 @@ typedef struct
#define TAG_IDENT_LVID 0x0009
/* NSR Descriptor (ECMA 167r3 3/9.1) */
-struct NSRDesc
-{
+struct NSRDesc {
uint8_t structType;
uint8_t stdIdent[VSD_STD_ID_LEN];
uint8_t structVersion;
uint8_t reserved;
uint8_t structData[2040];
} __attribute__ ((packed));
-
+
/* Primary Volume Descriptor (ECMA 167r3 3/10.1) */
-struct primaryVolDesc
-{
+struct primaryVolDesc {
tag descTag;
__le32 volDescSeqNum;
__le32 primaryVolDescNum;
@@ -244,8 +231,7 @@ struct primaryVolDesc
#define PVD_FLAGS_VSID_COMMON 0x0001
/* Anchor Volume Descriptor Pointer (ECMA 167r3 3/10.2) */
-struct anchorVolDescPtr
-{
+struct anchorVolDescPtr {
tag descTag;
extent_ad mainVolDescSeqExt;
extent_ad reserveVolDescSeqExt;
@@ -253,8 +239,7 @@ struct anchorVolDescPtr
} __attribute__ ((packed));
/* Volume Descriptor Pointer (ECMA 167r3 3/10.3) */
-struct volDescPtr
-{
+struct volDescPtr {
tag descTag;
__le32 volDescSeqNum;
extent_ad nextVolDescSeqExt;
@@ -262,8 +247,7 @@ struct volDescPtr
} __attribute__ ((packed));
/* Implementation Use Volume Descriptor (ECMA 167r3 3/10.4) */
-struct impUseVolDesc
-{
+struct impUseVolDesc {
tag descTag;
__le32 volDescSeqNum;
regid impIdent;
@@ -271,20 +255,19 @@ struct impUseVolDesc
} __attribute__ ((packed));
/* Partition Descriptor (ECMA 167r3 3/10.5) */
-struct partitionDesc
-{
- tag descTag;
- __le32 volDescSeqNum;
- __le16 partitionFlags;
- __le16 partitionNumber;
- regid partitionContents;
- uint8_t partitionContentsUse[128];
- __le32 accessType;
- __le32 partitionStartingLocation;
- __le32 partitionLength;
- regid impIdent;
- uint8_t impUse[128];
- uint8_t reserved[156];
+struct partitionDesc {
+ tag descTag;
+ __le32 volDescSeqNum;
+ __le16 partitionFlags;
+ __le16 partitionNumber;
+ regid partitionContents;
+ uint8_t partitionContentsUse[128];
+ __le32 accessType;
+ __le32 partitionStartingLocation;
+ __le32 partitionLength;
+ regid impIdent;
+ uint8_t impUse[128];
+ uint8_t reserved[156];
} __attribute__ ((packed));
/* Partition Flags (ECMA 167r3 3/10.5.3) */
@@ -307,8 +290,7 @@ struct partitionDesc
#define PD_ACCESS_TYPE_OVERWRITABLE 0x00000004
/* Logical Volume Descriptor (ECMA 167r3 3/10.6) */
-struct logicalVolDesc
-{
+struct logicalVolDesc {
tag descTag;
__le32 volDescSeqNum;
charspec descCharSet;
@@ -325,8 +307,7 @@ struct logicalVolDesc
} __attribute__ ((packed));
/* Generic Partition Map (ECMA 167r3 3/10.7.1) */
-struct genericPartitionMap
-{
+struct genericPartitionMap {
uint8_t partitionMapType;
uint8_t partitionMapLength;
uint8_t partitionMapping[0];
@@ -338,8 +319,7 @@ struct genericPartitionMap
#define GP_PARTITION_MAP_TYPE_2 0x02
/* Type 1 Partition Map (ECMA 167r3 3/10.7.2) */
-struct genericPartitionMap1
-{
+struct genericPartitionMap1 {
uint8_t partitionMapType;
uint8_t partitionMapLength;
__le16 volSeqNum;
@@ -347,16 +327,14 @@ struct genericPartitionMap1
} __attribute__ ((packed));
/* Type 2 Partition Map (ECMA 167r3 3/10.7.3) */
-struct genericPartitionMap2
-{
+struct genericPartitionMap2 {
uint8_t partitionMapType;
- uint8_t partitionMapLength;
+ uint8_t partitionMapLength;
uint8_t partitionIdent[62];
} __attribute__ ((packed));
/* Unallocated Space Descriptor (ECMA 167r3 3/10.8) */
-struct unallocSpaceDesc
-{
+struct unallocSpaceDesc {
tag descTag;
__le32 volDescSeqNum;
__le32 numAllocDescs;
@@ -364,15 +342,13 @@ struct unallocSpaceDesc
} __attribute__ ((packed));
/* Terminating Descriptor (ECMA 167r3 3/10.9) */
-struct terminatingDesc
-{
+struct terminatingDesc {
tag descTag;
uint8_t reserved[496];
} __attribute__ ((packed));
/* Logical Volume Integrity Descriptor (ECMA 167r3 3/10.10) */
-struct logicalVolIntegrityDesc
-{
+struct logicalVolIntegrityDesc {
tag descTag;
timestamp recordingDateAndTime;
__le32 integrityType;
@@ -390,52 +366,45 @@ struct logicalVolIntegrityDesc
#define LVID_INTEGRITY_TYPE_CLOSE 0x00000001
/* Recorded Address (ECMA 167r3 4/7.1) */
-typedef struct
-{
+typedef struct {
__le32 logicalBlockNum;
__le16 partitionReferenceNum;
} __attribute__ ((packed)) lb_addr;
/* ... and its in-core analog */
-typedef struct
-{
+typedef struct {
uint32_t logicalBlockNum;
uint16_t partitionReferenceNum;
} kernel_lb_addr;
/* Short Allocation Descriptor (ECMA 167r3 4/14.14.1) */
-typedef struct
-{
+typedef struct {
__le32 extLength;
__le32 extPosition;
} __attribute__ ((packed)) short_ad;
/* Long Allocation Descriptor (ECMA 167r3 4/14.14.2) */
-typedef struct
-{
+typedef struct {
__le32 extLength;
lb_addr extLocation;
uint8_t impUse[6];
} __attribute__ ((packed)) long_ad;
-typedef struct
-{
+typedef struct {
uint32_t extLength;
kernel_lb_addr extLocation;
uint8_t impUse[6];
} kernel_long_ad;
/* Extended Allocation Descriptor (ECMA 167r3 4/14.14.3) */
-typedef struct
-{
+typedef struct {
__le32 extLength;
__le32 recordedLength;
__le32 informationLength;
lb_addr extLocation;
} __attribute__ ((packed)) ext_ad;
-typedef struct
-{
+typedef struct {
uint32_t extLength;
uint32_t recordedLength;
uint32_t informationLength;
@@ -458,8 +427,7 @@ typedef struct
#define TAG_IDENT_EFE 0x010A
/* File Set Descriptor (ECMA 167r3 4/14.1) */
-struct fileSetDesc
-{
+struct fileSetDesc {
tag descTag;
timestamp recordingDateAndTime;
__le16 interchangeLvl;
@@ -482,8 +450,7 @@ struct fileSetDesc
} __attribute__ ((packed));
/* Partition Header Descriptor (ECMA 167r3 4/14.3) */
-struct partitionHeaderDesc
-{
+struct partitionHeaderDesc {
short_ad unallocSpaceTable;
short_ad unallocSpaceBitmap;
short_ad partitionIntegrityTable;
@@ -493,8 +460,7 @@ struct partitionHeaderDesc
} __attribute__ ((packed));
/* File Identifier Descriptor (ECMA 167r3 4/14.4) */
-struct fileIdentDesc
-{
+struct fileIdentDesc {
tag descTag;
__le16 fileVersionNum;
uint8_t fileCharacteristics;
@@ -514,16 +480,14 @@ struct fileIdentDesc
#define FID_FILE_CHAR_METADATA 0x10
/* Allocation Ext Descriptor (ECMA 167r3 4/14.5) */
-struct allocExtDesc
-{
+struct allocExtDesc {
tag descTag;
__le32 previousAllocExtLocation;
__le32 lengthAllocDescs;
} __attribute__ ((packed));
/* ICB Tag (ECMA 167r3 4/14.6) */
-typedef struct
-{
+typedef struct {
__le32 priorRecordedNumDirectEntries;
__le16 strategyType;
__le16 strategyParameter;
@@ -576,23 +540,20 @@ typedef struct
#define ICBTAG_FLAG_STREAM 0x2000
/* Indirect Entry (ECMA 167r3 4/14.7) */
-struct indirectEntry
-{
+struct indirectEntry {
tag descTag;
icbtag icbTag;
long_ad indirectICB;
} __attribute__ ((packed));
/* Terminal Entry (ECMA 167r3 4/14.8) */
-struct terminalEntry
-{
+struct terminalEntry {
tag descTag;
icbtag icbTag;
} __attribute__ ((packed));
/* File Entry (ECMA 167r3 4/14.9) */
-struct fileEntry
-{
+struct fileEntry {
tag descTag;
icbtag icbTag;
__le32 uid;
@@ -655,16 +616,14 @@ struct fileEntry
#define FE_RECORD_DISPLAY_ATTR_3 0x03
/* Extended Attribute Header Descriptor (ECMA 167r3 4/14.10.1) */
-struct extendedAttrHeaderDesc
-{
+struct extendedAttrHeaderDesc {
tag descTag;
__le32 impAttrLocation;
__le32 appAttrLocation;
} __attribute__ ((packed));
/* Generic Format (ECMA 167r3 4/14.10.2) */
-struct genericFormat
-{
+struct genericFormat {
__le32 attrType;
uint8_t attrSubtype;
uint8_t reserved[3];
@@ -673,8 +632,7 @@ struct genericFormat
} __attribute__ ((packed));
/* Character Set Information (ECMA 167r3 4/14.10.3) */
-struct charSetInfo
-{
+struct charSetInfo {
__le32 attrType;
uint8_t attrSubtype;
uint8_t reserved[3];
@@ -685,8 +643,7 @@ struct charSetInfo
} __attribute__ ((packed));
/* Alternate Permissions (ECMA 167r3 4/14.10.4) */
-struct altPerms
-{
+struct altPerms {
__le32 attrType;
uint8_t attrSubtype;
uint8_t reserved[3];
@@ -697,8 +654,7 @@ struct altPerms
} __attribute__ ((packed));
/* File Times Extended Attribute (ECMA 167r3 4/14.10.5) */
-struct fileTimesExtAttr
-{
+struct fileTimesExtAttr {
__le32 attrType;
uint8_t attrSubtype;
uint8_t reserved[3];
@@ -715,8 +671,7 @@ struct fileTimesExtAttr
#define FTE_BACKUP 0x00000002
/* Information Times Extended Attribute (ECMA 167r3 4/14.10.6) */
-struct infoTimesExtAttr
-{
+struct infoTimesExtAttr {
__le32 attrType;
uint8_t attrSubtype;
uint8_t reserved[3];
@@ -727,8 +682,7 @@ struct infoTimesExtAttr
} __attribute__ ((packed));
/* Device Specification (ECMA 167r3 4/14.10.7) */
-struct deviceSpec
-{
+struct deviceSpec {
__le32 attrType;
uint8_t attrSubtype;
uint8_t reserved[3];
@@ -740,8 +694,7 @@ struct deviceSpec
} __attribute__ ((packed));
/* Implementation Use Extended Attr (ECMA 167r3 4/14.10.8) */
-struct impUseExtAttr
-{
+struct impUseExtAttr {
__le32 attrType;
uint8_t attrSubtype;
uint8_t reserved[3];
@@ -752,8 +705,7 @@ struct impUseExtAttr
} __attribute__ ((packed));
/* Application Use Extended Attribute (ECMA 167r3 4/14.10.9) */
-struct appUseExtAttr
-{
+struct appUseExtAttr {
__le32 attrType;
uint8_t attrSubtype;
uint8_t reserved[3];
@@ -771,10 +723,8 @@ struct appUseExtAttr
#define EXTATTR_IMP_USE 2048
#define EXTATTR_APP_USE 65536
-
/* Unallocated Space Entry (ECMA 167r3 4/14.11) */
-struct unallocSpaceEntry
-{
+struct unallocSpaceEntry {
tag descTag;
icbtag icbTag;
__le32 lengthAllocDescs;
@@ -782,8 +732,7 @@ struct unallocSpaceEntry
} __attribute__ ((packed));
/* Space Bitmap Descriptor (ECMA 167r3 4/14.12) */
-struct spaceBitmapDesc
-{
+struct spaceBitmapDesc {
tag descTag;
__le32 numOfBits;
__le32 numOfBytes;
@@ -791,8 +740,7 @@ struct spaceBitmapDesc
} __attribute__ ((packed));
/* Partition Integrity Entry (ECMA 167r3 4/14.13) */
-struct partitionIntegrityEntry
-{
+struct partitionIntegrityEntry {
tag descTag;
icbtag icbTag;
timestamp recordingDateAndTime;
@@ -815,15 +763,13 @@ struct partitionIntegrityEntry
/* Extended Allocation Descriptor (ECMA 167r3 4/14.14.3) */
/* Logical Volume Header Descriptor (ECMA 167r3 4/14.15) */
-struct logicalVolHeaderDesc
-{
+struct logicalVolHeaderDesc {
__le64 uniqueID;
uint8_t reserved[24];
} __attribute__ ((packed));
/* Path Component (ECMA 167r3 4/14.16.1) */
-struct pathComponent
-{
+struct pathComponent {
uint8_t componentType;
uint8_t lengthComponentIdent;
__le16 componentFileVersionNum;
@@ -831,8 +777,7 @@ struct pathComponent
} __attribute__ ((packed));
/* File Entry (ECMA 167r3 4/14.17) */
-struct extendedFileEntry
-{
+struct extendedFileEntry {
tag descTag;
icbtag icbTag;
__le32 uid;
diff --git a/fs/udf/file.c b/fs/udf/file.c
index df070be..5d7a4ea 100644
--- a/fs/udf/file.c
+++ b/fs/udf/file.c
@@ -41,7 +41,7 @@
#include "udf_i.h"
#include "udf_sb.h"
-static int udf_adinicb_readpage(struct file *file, struct page * page)
+static int udf_adinicb_readpage(struct file *file, struct page *page)
{
struct inode *inode = page->mapping->host;
char *kaddr;
@@ -55,6 +55,7 @@ static int udf_adinicb_readpage(struct file *file, struct page * page)
SetPageUptodate(page);
kunmap(page);
unlock_page(page);
+
return 0;
}
@@ -71,22 +72,25 @@ static int udf_adinicb_writepage(struct page *page, struct writeback_control *wb
SetPageUptodate(page);
kunmap(page);
unlock_page(page);
+
return 0;
}
-static int udf_adinicb_prepare_write(struct file *file, struct page *page, unsigned offset, unsigned to)
+static int udf_adinicb_prepare_write(struct file *file, struct page *page,
+ unsigned offset, unsigned to)
{
kmap(page);
return 0;
}
-static int udf_adinicb_commit_write(struct file *file, struct page *page, unsigned offset, unsigned to)
+static int udf_adinicb_commit_write(struct file *file, struct page *page,
+ unsigned offset, unsigned to)
{
struct inode *inode = page->mapping->host;
char *kaddr = page_address(page);
memcpy(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode) + offset,
- kaddr + offset, to - offset);
+ kaddr + offset, to - offset);
mark_inode_dirty(inode);
SetPageUptodate(page);
kunmap(page);
@@ -97,15 +101,15 @@ static int udf_adinicb_commit_write(struct file *file, struct page *page, unsign
}
const struct address_space_operations udf_adinicb_aops = {
- .readpage = udf_adinicb_readpage,
- .writepage = udf_adinicb_writepage,
- .sync_page = block_sync_page,
- .prepare_write = udf_adinicb_prepare_write,
- .commit_write = udf_adinicb_commit_write,
+ .readpage = udf_adinicb_readpage,
+ .writepage = udf_adinicb_writepage,
+ .sync_page = block_sync_page,
+ .prepare_write = udf_adinicb_prepare_write,
+ .commit_write = udf_adinicb_commit_write,
};
static ssize_t udf_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
- unsigned long nr_segs, loff_t ppos)
+ unsigned long nr_segs, loff_t ppos)
{
ssize_t retval;
struct file *file = iocb->ki_filp;
@@ -113,25 +117,20 @@ static ssize_t udf_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
int err, pos;
size_t count = iocb->ki_left;
- if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
- {
+ if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB) {
if (file->f_flags & O_APPEND)
pos = inode->i_size;
else
pos = ppos;
if (inode->i_sb->s_blocksize < (udf_file_entry_alloc_offset(inode) +
- pos + count))
- {
+ pos + count)) {
udf_expand_file_adinicb(inode, pos + count, &err);
- if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
- {
+ if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB) {
udf_debug("udf_expand_adinicb: err=%d\n", err);
return err;
}
- }
- else
- {
+ } else {
if (pos + count > inode->i_size)
UDF_I_LENALLOC(inode) = pos + count;
else
@@ -140,9 +139,9 @@ static ssize_t udf_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
}
retval = generic_file_aio_write(iocb, iov, nr_segs, ppos);
-
if (retval > 0)
mark_inode_dirty(inode);
+
return retval;
}
@@ -181,48 +180,42 @@ static ssize_t udf_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
* Written, tested, and released.
*/
int udf_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
- unsigned long arg)
+ unsigned long arg)
{
+ long old_block, new_block;
int result = -EINVAL;
- if ( file_permission(filp, MAY_READ) != 0 )
- {
+ if (file_permission(filp, MAY_READ) != 0) {
udf_debug("no permission to access inode %lu\n",
- inode->i_ino);
+ inode->i_ino);
return -EPERM;
}
- if ( !arg )
- {
+ if (!arg) {
udf_debug("invalid argument to udf_ioctl\n");
return -EINVAL;
}
- switch (cmd)
- {
- case UDF_GETVOLIDENT:
- return copy_to_user((char __user *)arg,
- UDF_SB_VOLIDENT(inode->i_sb), 32) ? -EFAULT : 0;
- case UDF_RELOCATE_BLOCKS:
- {
- long old, new;
-
- if (!capable(CAP_SYS_ADMIN)) return -EACCES;
- if (get_user(old, (long __user *)arg)) return -EFAULT;
- if ((result = udf_relocate_blocks(inode->i_sb,
- old, &new)) == 0)
- result = put_user(new, (long __user *)arg);
-
- return result;
- }
- case UDF_GETEASIZE:
- result = put_user(UDF_I_LENEATTR(inode), (int __user *)arg);
- break;
-
- case UDF_GETEABLOCK:
- result = copy_to_user((char __user *)arg, UDF_I_DATA(inode),
- UDF_I_LENEATTR(inode)) ? -EFAULT : 0;
- break;
+ switch (cmd) {
+ case UDF_GETVOLIDENT:
+ return copy_to_user((char __user *)arg,
+ UDF_SB_VOLIDENT(inode->i_sb), 32) ? -EFAULT : 0;
+ case UDF_RELOCATE_BLOCKS:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+ if (get_user(old_block, (long __user *)arg))
+ return -EFAULT;
+ if ((result = udf_relocate_blocks(inode->i_sb,
+ old_block, &new_block)) == 0)
+ result = put_user(new_block, (long __user *)arg);
+ return result;
+ case UDF_GETEASIZE:
+ result = put_user(UDF_I_LENEATTR(inode), (int __user *)arg);
+ break;
+ case UDF_GETEABLOCK:
+ result = copy_to_user((char __user *)arg, UDF_I_DATA(inode),
+ UDF_I_LENEATTR(inode)) ? -EFAULT : 0;
+ break;
}
return result;
@@ -240,10 +233,9 @@ int udf_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
* HISTORY
*
*/
-static int udf_release_file(struct inode * inode, struct file * filp)
+static int udf_release_file(struct inode *inode, struct file *filp)
{
- if (filp->f_mode & FMODE_WRITE)
- {
+ if (filp->f_mode & FMODE_WRITE) {
lock_kernel();
udf_discard_prealloc(inode);
unlock_kernel();
@@ -265,5 +257,5 @@ const struct file_operations udf_file_operations = {
};
const struct inode_operations udf_file_inode_operations = {
- .truncate = udf_truncate,
+ .truncate = udf_truncate,
};
diff --git a/fs/udf/fsync.c b/fs/udf/fsync.c
index 6ded93e..b2c472b 100644
--- a/fs/udf/fsync.c
+++ b/fs/udf/fsync.c
@@ -29,9 +29,10 @@ static int udf_fsync_inode(struct inode *, int);
* even pass file to fsync ?
*/
-int udf_fsync_file(struct file * file, struct dentry *dentry, int datasync)
+int udf_fsync_file(struct file *file, struct dentry *dentry, int datasync)
{
struct inode *inode = dentry->d_inode;
+
return udf_fsync_inode(inode, datasync);
}
@@ -45,6 +46,7 @@ static int udf_fsync_inode(struct inode *inode, int datasync)
if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
return err;
- err |= udf_sync_inode (inode);
+ err |= udf_sync_inode(inode);
+
return err ? -EIO : 0;
}
diff --git a/fs/udf/ialloc.c b/fs/udf/ialloc.c
index 10f3188..636d8f6 100644
--- a/fs/udf/ialloc.c
+++ b/fs/udf/ialloc.c
@@ -28,7 +28,7 @@
#include "udf_i.h"
#include "udf_sb.h"
-void udf_free_inode(struct inode * inode)
+void udf_free_inode(struct inode *inode)
{
struct super_block *sb = inode->i_sb;
struct udf_sb_info *sbi = UDF_SB(sb);
@@ -58,18 +58,17 @@ void udf_free_inode(struct inode * inode)
udf_free_blocks(sb, NULL, UDF_I_LOCATION(inode), 0, 1);
}
-struct inode * udf_new_inode (struct inode *dir, int mode, int * err)
+struct inode *udf_new_inode(struct inode *dir, int mode, int *err)
{
struct super_block *sb = dir->i_sb;
struct udf_sb_info *sbi = UDF_SB(sb);
- struct inode * inode;
+ struct inode *inode;
int block;
uint32_t start = UDF_I_LOCATION(dir).logicalBlockNum;
inode = new_inode(sb);
- if (!inode)
- {
+ if (!inode) {
*err = -ENOMEM;
return NULL;
}
@@ -82,16 +81,14 @@ struct inode * udf_new_inode (struct inode *dir, int mode, int * err)
UDF_I_STRAT4096(inode) = 0;
block = udf_new_block(dir->i_sb, NULL, UDF_I_LOCATION(dir).partitionReferenceNum,
- start, err);
- if (*err)
- {
+ start, err);
+ if (*err) {
iput(inode);
return NULL;
}
mutex_lock(&sbi->s_alloc_mutex);
- if (UDF_SB_LVIDBH(sb))
- {
+ if (UDF_SB_LVIDBH(sb)) {
struct logicalVolHeaderDesc *lvhd;
uint64_t uniqueID;
lvhd = (struct logicalVolHeaderDesc *)(UDF_SB_LVID(sb)->logicalVolContentsUse);
@@ -109,14 +106,13 @@ struct inode * udf_new_inode (struct inode *dir, int mode, int * err)
}
inode->i_mode = mode;
inode->i_uid = current->fsuid;
- if (dir->i_mode & S_ISGID)
- {
+ if (dir->i_mode & S_ISGID) {
inode->i_gid = dir->i_gid;
if (S_ISDIR(mode))
mode |= S_ISGID;
- }
- else
+ } else {
inode->i_gid = current->fsgid;
+ }
UDF_I_LOCATION(inode).logicalBlockNum = block;
UDF_I_LOCATION(inode).partitionReferenceNum = UDF_I_LOCATION(dir).partitionReferenceNum;
@@ -125,19 +121,15 @@ struct inode * udf_new_inode (struct inode *dir, int mode, int * err)
UDF_I_LENEATTR(inode) = 0;
UDF_I_LENALLOC(inode) = 0;
UDF_I_USE(inode) = 0;
- if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_EXTENDED_FE))
- {
+ if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_EXTENDED_FE)) {
UDF_I_EFE(inode) = 1;
UDF_UPDATE_UDFREV(inode->i_sb, UDF_VERS_USE_EXTENDED_FE);
UDF_I_DATA(inode) = kzalloc(inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry), GFP_KERNEL);
- }
- else
- {
+ } else {
UDF_I_EFE(inode) = 0;
UDF_I_DATA(inode) = kzalloc(inode->i_sb->s_blocksize - sizeof(struct fileEntry), GFP_KERNEL);
}
- if (!UDF_I_DATA(inode))
- {
+ if (!UDF_I_DATA(inode)) {
iput(inode);
*err = -ENOMEM;
mutex_unlock(&sbi->s_alloc_mutex);
@@ -155,8 +147,7 @@ struct inode * udf_new_inode (struct inode *dir, int mode, int * err)
mark_inode_dirty(inode);
mutex_unlock(&sbi->s_alloc_mutex);
- if (DQUOT_ALLOC_INODE(inode))
- {
+ if (DQUOT_ALLOC_INODE(inode)) {
DQUOT_DROP(inode);
inode->i_flags |= S_NOQUOTA;
inode->i_nlink = 0;
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index 5b82e48..1652b2c 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -51,18 +51,18 @@ static int udf_update_inode(struct inode *, int);
static void udf_fill_inode(struct inode *, struct buffer_head *);
static int udf_alloc_i_data(struct inode *inode, size_t size);
static struct buffer_head *inode_getblk(struct inode *, sector_t, int *,
- long *, int *);
+ long *, int *);
static int8_t udf_insert_aext(struct inode *, struct extent_position,
- kernel_lb_addr, uint32_t);
+ kernel_lb_addr, uint32_t);
static void udf_split_extents(struct inode *, int *, int, int,
- kernel_long_ad [EXTENT_MERGE_SIZE], int *);
+ kernel_long_ad[EXTENT_MERGE_SIZE], int *);
static void udf_prealloc_extents(struct inode *, int, int,
- kernel_long_ad [EXTENT_MERGE_SIZE], int *);
+ kernel_long_ad[EXTENT_MERGE_SIZE], int *);
static void udf_merge_extents(struct inode *,
- kernel_long_ad [EXTENT_MERGE_SIZE], int *);
+ kernel_long_ad[EXTENT_MERGE_SIZE], int *);
static void udf_update_extents(struct inode *,
- kernel_long_ad [EXTENT_MERGE_SIZE], int, int,
- struct extent_position *);
+ kernel_long_ad[EXTENT_MERGE_SIZE], int, int,
+ struct extent_position *);
static int udf_get_block(struct inode *, sector_t, struct buffer_head *, int);
/*
@@ -81,7 +81,7 @@ static int udf_get_block(struct inode *, sector_t, struct buffer_head *, int);
*
* Called at the last iput() if i_nlink is zero.
*/
-void udf_delete_inode(struct inode * inode)
+void udf_delete_inode(struct inode *inode)
{
truncate_inode_pages(&inode->i_data, 0);
@@ -97,6 +97,7 @@ void udf_delete_inode(struct inode * inode)
unlock_kernel();
return;
+
no_delete:
clear_inode(inode);
}
@@ -132,26 +133,27 @@ static int udf_readpage(struct file *file, struct page *page)
return block_read_full_page(page, udf_get_block);
}
-static int udf_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to)
+static int udf_prepare_write(struct file *file, struct page *page,
+ unsigned from, unsigned to)
{
return block_prepare_write(page, from, to, udf_get_block);
}
static sector_t udf_bmap(struct address_space *mapping, sector_t block)
{
- return generic_block_bmap(mapping,block,udf_get_block);
+ return generic_block_bmap(mapping, block, udf_get_block);
}
const struct address_space_operations udf_aops = {
- .readpage = udf_readpage,
- .writepage = udf_writepage,
- .sync_page = block_sync_page,
- .prepare_write = udf_prepare_write,
- .commit_write = generic_commit_write,
- .bmap = udf_bmap,
+ .readpage = udf_readpage,
+ .writepage = udf_writepage,
+ .sync_page = block_sync_page,
+ .prepare_write = udf_prepare_write,
+ .commit_write = generic_commit_write,
+ .bmap = udf_bmap,
};
-void udf_expand_file_adinicb(struct inode * inode, int newsize, int * err)
+void udf_expand_file_adinicb(struct inode *inode, int newsize, int *err)
{
struct page *page;
char *kaddr;
@@ -163,8 +165,7 @@ void udf_expand_file_adinicb(struct inode * inode, int newsize, int * err)
/* from now on we have normal address_space methods */
inode->i_data.a_ops = &udf_aops;
- if (!UDF_I_LENALLOC(inode))
- {
+ if (!UDF_I_LENALLOC(inode)) {
if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_SHORT;
else
@@ -176,19 +177,18 @@ void udf_expand_file_adinicb(struct inode * inode, int newsize, int * err)
page = grab_cache_page(inode->i_mapping, 0);
BUG_ON(!PageLocked(page));
- if (!PageUptodate(page))
- {
+ if (!PageUptodate(page)) {
kaddr = kmap(page);
memset(kaddr + UDF_I_LENALLOC(inode), 0x00,
- PAGE_CACHE_SIZE - UDF_I_LENALLOC(inode));
+ PAGE_CACHE_SIZE - UDF_I_LENALLOC(inode));
memcpy(kaddr, UDF_I_DATA(inode) + UDF_I_LENEATTR(inode),
- UDF_I_LENALLOC(inode));
+ UDF_I_LENALLOC(inode));
flush_dcache_page(page);
SetPageUptodate(page);
kunmap(page);
}
memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode), 0x00,
- UDF_I_LENALLOC(inode));
+ UDF_I_LENALLOC(inode));
UDF_I_LENALLOC(inode) = 0;
if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_SHORT;
@@ -201,7 +201,8 @@ void udf_expand_file_adinicb(struct inode * inode, int newsize, int * err)
mark_inode_dirty(inode);
}
-struct buffer_head * udf_expand_dir_adinicb(struct inode *inode, int *block, int *err)
+struct buffer_head *udf_expand_dir_adinicb(struct inode *inode, int *block,
+ int *err)
{
int newblock;
struct buffer_head *dbh = NULL;
@@ -220,8 +221,7 @@ struct buffer_head * udf_expand_dir_adinicb(struct inode *inode, int *block, int
else
alloctype = ICBTAG_FLAG_AD_LONG;
- if (!inode->i_size)
- {
+ if (!inode->i_size) {
UDF_I_ALLOCTYPE(inode) = alloctype;
mark_inode_dirty(inode);
return NULL;
@@ -229,13 +229,12 @@ struct buffer_head * udf_expand_dir_adinicb(struct inode *inode, int *block, int
/* alloc block, and copy data to it */
*block = udf_new_block(inode->i_sb, inode,
- UDF_I_LOCATION(inode).partitionReferenceNum,
- UDF_I_LOCATION(inode).logicalBlockNum, err);
-
+ UDF_I_LOCATION(inode).partitionReferenceNum,
+ UDF_I_LOCATION(inode).logicalBlockNum, err);
if (!(*block))
return NULL;
newblock = udf_get_pblock(inode->i_sb, *block,
- UDF_I_LOCATION(inode).partitionReferenceNum, 0);
+ UDF_I_LOCATION(inode).partitionReferenceNum, 0);
if (!newblock)
return NULL;
dbh = udf_tgetblk(inode->i_sb, newblock);
@@ -251,12 +250,10 @@ struct buffer_head * udf_expand_dir_adinicb(struct inode *inode, int *block, int
sfibh.sbh = sfibh.ebh = NULL;
dfibh.soffset = dfibh.eoffset = 0;
dfibh.sbh = dfibh.ebh = dbh;
- while ( (f_pos < size) )
- {
+ while ((f_pos < size)) {
UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_IN_ICB;
sfi = udf_fileident_read(inode, &f_pos, &sfibh, &cfi, NULL, NULL, NULL, NULL);
- if (!sfi)
- {
+ if (!sfi) {
brelse(dbh);
return NULL;
}
@@ -266,8 +263,7 @@ struct buffer_head * udf_expand_dir_adinicb(struct inode *inode, int *block, int
dfibh.eoffset += (sfibh.eoffset - sfibh.soffset);
dfi = (struct fileIdentDesc *)(dbh->b_data + dfibh.soffset);
if (udf_write_fi(inode, sfi, dfi, &dfibh, sfi->impUse,
- sfi->fileIdent + le16_to_cpu(sfi->lengthOfImpUse)))
- {
+ sfi->fileIdent + le16_to_cpu(sfi->lengthOfImpUse))) {
UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_IN_ICB;
brelse(dbh);
return NULL;
@@ -292,14 +288,14 @@ struct buffer_head * udf_expand_dir_adinicb(struct inode *inode, int *block, int
return dbh;
}
-static int udf_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_result, int create)
+static int udf_get_block(struct inode *inode, sector_t block,
+ struct buffer_head *bh_result, int create)
{
int err, new;
struct buffer_head *bh;
unsigned long phys;
- if (!create)
- {
+ if (!create) {
phys = udf_block_map(inode, block);
if (phys)
map_bh(bh_result, inode->i_sb, phys);
@@ -315,10 +311,9 @@ static int udf_get_block(struct inode *inode, sector_t block, struct buffer_head
if (block < 0)
goto abort_negative;
- if (block == UDF_I_NEXT_ALLOC_BLOCK(inode) + 1)
- {
- UDF_I_NEXT_ALLOC_BLOCK(inode) ++;
- UDF_I_NEXT_ALLOC_GOAL(inode) ++;
+ if (block == UDF_I_NEXT_ALLOC_BLOCK(inode) + 1) {
+ UDF_I_NEXT_ALLOC_BLOCK(inode)++;
+ UDF_I_NEXT_ALLOC_GOAL(inode)++;
}
err = 0;
@@ -332,6 +327,7 @@ static int udf_get_block(struct inode *inode, sector_t block, struct buffer_head
if (new)
set_buffer_new(bh_result);
map_bh(bh_result, inode->i_sb, phys);
+
abort:
unlock_kernel();
return err;
@@ -341,20 +337,18 @@ abort_negative:
goto abort;
}
-static struct buffer_head *
-udf_getblk(struct inode *inode, long block, int create, int *err)
+static struct buffer_head *udf_getblk(struct inode *inode, long block,
+ int create, int *err)
{
+ struct buffer_head *bh;
struct buffer_head dummy;
dummy.b_state = 0;
dummy.b_blocknr = -1000;
*err = udf_get_block(inode, block, &dummy, create);
- if (!*err && buffer_mapped(&dummy))
- {
- struct buffer_head *bh;
+ if (!*err && buffer_mapped(&dummy)) {
bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
- if (buffer_new(&dummy))
- {
+ if (buffer_new(&dummy)) {
lock_buffer(bh);
memset(bh->b_data, 0x00, inode->i_sb->s_blocksize);
set_buffer_uptodate(bh);
@@ -363,33 +357,36 @@ udf_getblk(struct inode *inode, long block, int create, int *err)
}
return bh;
}
+
return NULL;
}
/* Extend the file by 'blocks' blocks, return the number of extents added */
int udf_extend_file(struct inode *inode, struct extent_position *last_pos,
- kernel_long_ad *last_ext, sector_t blocks)
+ kernel_long_ad * last_ext, sector_t blocks)
{
sector_t add;
int count = 0, fake = !(last_ext->extLength & UDF_EXTENT_LENGTH_MASK);
struct super_block *sb = inode->i_sb;
- kernel_lb_addr prealloc_loc = {0, 0};
+ kernel_lb_addr prealloc_loc = {};
int prealloc_len = 0;
/* The previous extent is fake and we should not extend by anything
* - there's nothing to do... */
if (!blocks && fake)
return 0;
+
/* Round the last extent up to a multiple of block size */
if (last_ext->extLength & (sb->s_blocksize - 1)) {
last_ext->extLength =
(last_ext->extLength & UDF_EXTENT_FLAG_MASK) |
(((last_ext->extLength & UDF_EXTENT_LENGTH_MASK) +
- sb->s_blocksize - 1) & ~(sb->s_blocksize - 1));
+ sb->s_blocksize - 1) & ~(sb->s_blocksize - 1));
UDF_I_LENEXTENTS(inode) =
(UDF_I_LENEXTENTS(inode) + sb->s_blocksize - 1) &
- ~(sb->s_blocksize - 1);
+ ~(sb->s_blocksize - 1);
}
+
/* Last extent are just preallocated blocks? */
if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) == EXT_NOT_RECORDED_ALLOCATED) {
/* Save the extent so that we can reattach it to the end */
@@ -401,10 +398,11 @@ int udf_extend_file(struct inode *inode, struct extent_position *last_pos,
last_ext->extLocation.logicalBlockNum = 0;
last_ext->extLocation.partitionReferenceNum = 0;
}
+
/* Can we merge with the previous extent? */
if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) == EXT_NOT_RECORDED_NOT_ALLOCATED) {
- add = ((1<<30) - sb->s_blocksize - (last_ext->extLength &
- UDF_EXTENT_LENGTH_MASK)) >> sb->s_blocksize_bits;
+ add = ((1 << 30) - sb->s_blocksize - (last_ext->extLength &
+ UDF_EXTENT_LENGTH_MASK)) >> sb->s_blocksize_bits;
if (add > blocks)
add = blocks;
blocks -= add;
@@ -413,11 +411,12 @@ int udf_extend_file(struct inode *inode, struct extent_position *last_pos,
if (fake) {
udf_add_aext(inode, last_pos, last_ext->extLocation,
- last_ext->extLength, 1);
+ last_ext->extLength, 1);
count++;
- }
- else
+ } else {
udf_write_aext(inode, last_pos, last_ext->extLocation, last_ext->extLength, 1);
+ }
+
/* Managed to do everything necessary? */
if (!blocks)
goto out;
@@ -427,11 +426,12 @@ int udf_extend_file(struct inode *inode, struct extent_position *last_pos,
last_ext->extLocation.partitionReferenceNum = 0;
add = (1 << (30-sb->s_blocksize_bits)) - 1;
last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | (add << sb->s_blocksize_bits);
+
/* Create enough extents to cover the whole hole */
while (blocks > add) {
blocks -= add;
if (udf_add_aext(inode, last_pos, last_ext->extLocation,
- last_ext->extLength, 1) == -1)
+ last_ext->extLength, 1) == -1)
return -1;
count++;
}
@@ -439,10 +439,11 @@ int udf_extend_file(struct inode *inode, struct extent_position *last_pos,
last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
(blocks << sb->s_blocksize_bits);
if (udf_add_aext(inode, last_pos, last_ext->extLocation,
- last_ext->extLength, 1) == -1)
+ last_ext->extLength, 1) == -1)
return -1;
count++;
}
+
out:
/* Do we have some preallocated blocks saved? */
if (prealloc_len) {
@@ -452,6 +453,7 @@ out:
last_ext->extLength = prealloc_len;
count++;
}
+
/* last_pos should point to the last written extent... */
if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT)
last_pos->offset -= sizeof(short_ad);
@@ -459,11 +461,12 @@ out:
last_pos->offset -= sizeof(long_ad);
else
return -1;
+
return count;
}
-static struct buffer_head * inode_getblk(struct inode * inode, sector_t block,
- int *err, long *phys, int *new)
+static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
+ int *err, long *phys, int *new)
{
static sector_t last_block;
struct buffer_head *result = NULL;
@@ -487,18 +490,15 @@ static struct buffer_head * inode_getblk(struct inode * inode, sector_t block,
b_off = (loff_t)block << inode->i_sb->s_blocksize_bits;
/* find the extent which contains the block we are looking for.
- alternate between laarr[0] and laarr[1] for locations of the
- current extent, and the previous extent */
- do
- {
- if (prev_epos.bh != cur_epos.bh)
- {
+ alternate between laarr[0] and laarr[1] for locations of the
+ current extent, and the previous extent */
+ do {
+ if (prev_epos.bh != cur_epos.bh) {
brelse(prev_epos.bh);
get_bh(cur_epos.bh);
prev_epos.bh = cur_epos.bh;
}
- if (cur_epos.bh != next_epos.bh)
- {
+ if (cur_epos.bh != next_epos.bh) {
brelse(cur_epos.bh);
get_bh(next_epos.bh);
cur_epos.bh = next_epos.bh;
@@ -523,9 +523,9 @@ static struct buffer_head * inode_getblk(struct inode * inode, sector_t block,
if (etype != (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
pgoal = eloc.logicalBlockNum +
((elen + inode->i_sb->s_blocksize - 1) >>
- inode->i_sb->s_blocksize_bits);
+ inode->i_sb->s_blocksize_bits);
- count ++;
+ count++;
} while (lbcount + elen <= b_off);
b_off -= lbcount;
@@ -538,15 +538,13 @@ static struct buffer_head * inode_getblk(struct inode * inode, sector_t block,
udf_next_aext(inode, &cur_epos, &tmpeloc, &tmpelen, 0);
/* if the extent is allocated and recorded, return the block
- if the extent is not a multiple of the blocksize, round up */
+ if the extent is not a multiple of the blocksize, round up */
- if (etype == (EXT_RECORDED_ALLOCATED >> 30))
- {
- if (elen & (inode->i_sb->s_blocksize - 1))
- {
+ if (etype == (EXT_RECORDED_ALLOCATED >> 30)) {
+ if (elen & (inode->i_sb->s_blocksize - 1)) {
elen = EXT_RECORDED_ALLOCATED |
((elen + inode->i_sb->s_blocksize - 1) &
- ~(inode->i_sb->s_blocksize - 1));
+ ~(inode->i_sb->s_blocksize - 1));
etype = udf_write_aext(inode, &cur_epos, eloc, elen, 1);
}
brelse(prev_epos.bh);
@@ -559,16 +557,14 @@ static struct buffer_head * inode_getblk(struct inode * inode, sector_t block,
last_block = block;
/* Are we beyond EOF? */
- if (etype == -1)
- {
+ if (etype == -1) {
int ret;
if (count) {
if (c)
laarr[0] = laarr[1];
startnum = 1;
- }
- else {
+ } else {
/* Create a fake extent when there's not one */
memset(&laarr[0].extLocation, 0x00, sizeof(kernel_lb_addr));
laarr[0].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED;
@@ -598,18 +594,16 @@ static struct buffer_head * inode_getblk(struct inode * inode, sector_t block,
laarr[c].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
inode->i_sb->s_blocksize;
memset(&laarr[c].extLocation, 0x00, sizeof(kernel_lb_addr));
- count ++;
- endnum ++;
+ count++;
+ endnum++;
}
- endnum = c+1;
+ endnum = c + 1;
lastblock = 1;
- }
- else {
+ } else {
endnum = startnum = ((count > 2) ? 2 : count);
/* if the current extent is in position 0, swap it with the previous */
- if (!c && count != 1)
- {
+ if (!c && count != 1) {
laarr[2] = laarr[0];
laarr[0] = laarr[1];
laarr[1] = laarr[2];
@@ -617,37 +611,33 @@ static struct buffer_head * inode_getblk(struct inode * inode, sector_t block,
}
/* if the current block is located in an extent, read the next extent */
- if ((etype = udf_next_aext(inode, &next_epos, &eloc, &elen, 0)) != -1)
- {
- laarr[c+1].extLength = (etype << 30) | elen;
- laarr[c+1].extLocation = eloc;
- count ++;
- startnum ++;
- endnum ++;
- }
- else {
+ if ((etype = udf_next_aext(inode, &next_epos, &eloc, &elen, 0)) != -1) {
+ laarr[c + 1].extLength = (etype << 30) | elen;
+ laarr[c + 1].extLocation = eloc;
+ count++;
+ startnum++;
+ endnum++;
+ } else {
lastblock = 1;
}
}
/* if the current extent is not recorded but allocated, get the
- block in the extent corresponding to the requested block */
- if ((laarr[c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
+ * block in the extent corresponding to the requested block */
+ if ((laarr[c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30)) {
newblocknum = laarr[c].extLocation.logicalBlockNum + offset;
- else /* otherwise, allocate a new block */
- {
+ } else { /* otherwise, allocate a new block */
if (UDF_I_NEXT_ALLOC_BLOCK(inode) == block)
goal = UDF_I_NEXT_ALLOC_GOAL(inode);
- if (!goal)
- {
+ if (!goal) {
if (!(goal = pgoal))
goal = UDF_I_LOCATION(inode).logicalBlockNum + 1;
}
if (!(newblocknum = udf_new_block(inode->i_sb, inode,
- UDF_I_LOCATION(inode).partitionReferenceNum, goal, err)))
- {
+ UDF_I_LOCATION(inode).partitionReferenceNum,
+ goal, err))) {
brelse(prev_epos.bh);
*err = -ENOSPC;
return NULL;
@@ -656,8 +646,8 @@ static struct buffer_head * inode_getblk(struct inode * inode, sector_t block,
}
/* if the extent the requsted block is located in contains multiple blocks,
- split the extent into at most three extents. blocks prior to requested
- block, requested block, and blocks after requested block */
+ * split the extent into at most three extents. blocks prior to requested
+ * block, requested block, and blocks after requested block */
udf_split_extents(inode, &c, offset, newblocknum, laarr, &endnum);
#ifdef UDF_PREALLOCATE
@@ -669,15 +659,14 @@ static struct buffer_head * inode_getblk(struct inode * inode, sector_t block,
udf_merge_extents(inode, laarr, &endnum);
/* write back the new extents, inserting new extents if the new number
- of extents is greater than the old number, and deleting extents if
- the new number of extents is less than the old number */
+ * of extents is greater than the old number, and deleting extents if
+ * the new number of extents is less than the old number */
udf_update_extents(inode, laarr, startnum, endnum, &prev_epos);
brelse(prev_epos.bh);
if (!(newblock = udf_get_pblock(inode->i_sb, newblocknum,
- UDF_I_LOCATION(inode).partitionReferenceNum, 0)))
- {
+ UDF_I_LOCATION(inode).partitionReferenceNum, 0))) {
return NULL;
}
*phys = newblock;
@@ -691,49 +680,46 @@ static struct buffer_head * inode_getblk(struct inode * inode, sector_t block,
udf_sync_inode(inode);
else
mark_inode_dirty(inode);
+
return result;
}
-static void udf_split_extents(struct inode *inode, int *c, int offset, int newblocknum,
- kernel_long_ad laarr[EXTENT_MERGE_SIZE], int *endnum)
+static void udf_split_extents(struct inode *inode, int *c, int offset,
+ int newblocknum,
+ kernel_long_ad laarr[EXTENT_MERGE_SIZE],
+ int *endnum)
{
if ((laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30) ||
- (laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
- {
+ (laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) {
int curr = *c;
int blen = ((laarr[curr].extLength & UDF_EXTENT_LENGTH_MASK) +
- inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits;
+ inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits;
int8_t etype = (laarr[curr].extLength >> 30);
- if (blen == 1)
+ if (blen == 1) {
;
- else if (!offset || blen == offset + 1)
- {
- laarr[curr+2] = laarr[curr+1];
- laarr[curr+1] = laarr[curr];
- }
- else
- {
- laarr[curr+3] = laarr[curr+1];
- laarr[curr+2] = laarr[curr+1] = laarr[curr];
+ } else if (!offset || blen == offset + 1) {
+ laarr[curr + 2] = laarr[curr + 1];
+ laarr[curr + 1] = laarr[curr];
+ } else {
+ laarr[curr + 3] = laarr[curr + 1];
+ laarr[curr + 2] = laarr[curr + 1] = laarr[curr];
}
- if (offset)
- {
- if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30))
- {
+ if (offset) {
+ if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) {
udf_free_blocks(inode->i_sb, inode, laarr[curr].extLocation, 0, offset);
laarr[curr].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
(offset << inode->i_sb->s_blocksize_bits);
laarr[curr].extLocation.logicalBlockNum = 0;
laarr[curr].extLocation.partitionReferenceNum = 0;
- }
- else
+ } else {
laarr[curr].extLength = (etype << 30) |
(offset << inode->i_sb->s_blocksize_bits);
- curr ++;
- (*c) ++;
- (*endnum) ++;
+ }
+ curr++;
+ (*c)++;
+ (*endnum)++;
}
laarr[curr].extLocation.logicalBlockNum = newblocknum;
@@ -742,105 +728,91 @@ static void udf_split_extents(struct inode *inode, int *c, int offset, int newbl
UDF_I_LOCATION(inode).partitionReferenceNum;
laarr[curr].extLength = EXT_RECORDED_ALLOCATED |
inode->i_sb->s_blocksize;
- curr ++;
+ curr++;
- if (blen != offset + 1)
- {
+ if (blen != offset + 1) {
if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30))
laarr[curr].extLocation.logicalBlockNum += (offset + 1);
laarr[curr].extLength = (etype << 30) |
((blen - (offset + 1)) << inode->i_sb->s_blocksize_bits);
- curr ++;
- (*endnum) ++;
+ curr++;
+ (*endnum)++;
}
}
}
static void udf_prealloc_extents(struct inode *inode, int c, int lastblock,
- kernel_long_ad laarr[EXTENT_MERGE_SIZE], int *endnum)
+ kernel_long_ad laarr[EXTENT_MERGE_SIZE],
+ int *endnum)
{
int start, length = 0, currlength = 0, i;
- if (*endnum >= (c+1))
- {
+ if (*endnum >= (c + 1)) {
if (!lastblock)
return;
else
start = c;
- }
- else
- {
- if ((laarr[c+1].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
- {
- start = c+1;
- length = currlength = (((laarr[c+1].extLength & UDF_EXTENT_LENGTH_MASK) +
- inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
- }
- else
+ } else {
+ if ((laarr[c + 1].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30)) {
+ start = c + 1;
+ length = currlength = (((laarr[c + 1].extLength & UDF_EXTENT_LENGTH_MASK) +
+ inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
+ } else {
start = c;
+ }
}
- for (i=start+1; i<=*endnum; i++)
- {
- if (i == *endnum)
- {
+ for (i = start + 1; i <= *endnum; i++) {
+ if (i == *endnum) {
if (lastblock)
length += UDF_DEFAULT_PREALLOC_BLOCKS;
- }
- else if ((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
+ } else if ((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) {
length += (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
- inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
- else
+ inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
+ } else {
break;
+ }
}
- if (length)
- {
+ if (length) {
int next = laarr[start].extLocation.logicalBlockNum +
(((laarr[start].extLength & UDF_EXTENT_LENGTH_MASK) +
- inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
+ inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
int numalloc = udf_prealloc_blocks(inode->i_sb, inode,
- laarr[start].extLocation.partitionReferenceNum,
- next, (UDF_DEFAULT_PREALLOC_BLOCKS > length ? length :
- UDF_DEFAULT_PREALLOC_BLOCKS) - currlength);
-
- if (numalloc)
- {
- if (start == (c+1))
+ laarr[start].extLocation.partitionReferenceNum,
+ next, (UDF_DEFAULT_PREALLOC_BLOCKS > length ? length :
+ UDF_DEFAULT_PREALLOC_BLOCKS) - currlength);
+ if (numalloc) {
+ if (start == (c + 1)) {
laarr[start].extLength +=
(numalloc << inode->i_sb->s_blocksize_bits);
- else
- {
- memmove(&laarr[c+2], &laarr[c+1],
- sizeof(long_ad) * (*endnum - (c+1)));
- (*endnum) ++;
- laarr[c+1].extLocation.logicalBlockNum = next;
- laarr[c+1].extLocation.partitionReferenceNum =
+ } else {
+ memmove(&laarr[c + 2], &laarr[c + 1],
+ sizeof(long_ad) * (*endnum - (c + 1)));
+ (*endnum)++;
+ laarr[c + 1].extLocation.logicalBlockNum = next;
+ laarr[c + 1].extLocation.partitionReferenceNum =
laarr[c].extLocation.partitionReferenceNum;
- laarr[c+1].extLength = EXT_NOT_RECORDED_ALLOCATED |
+ laarr[c + 1].extLength = EXT_NOT_RECORDED_ALLOCATED |
(numalloc << inode->i_sb->s_blocksize_bits);
- start = c+1;
+ start = c + 1;
}
- for (i=start+1; numalloc && i<*endnum; i++)
- {
+ for (i = start + 1; numalloc && i < *endnum; i++) {
int elen = ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
- inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits;
+ inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits;
- if (elen > numalloc)
- {
+ if (elen > numalloc) {
laarr[i].extLength -=
(numalloc << inode->i_sb->s_blocksize_bits);
numalloc = 0;
- }
- else
- {
+ } else {
numalloc -= elen;
- if (*endnum > (i+1))
- memmove(&laarr[i], &laarr[i+1],
- sizeof(long_ad) * (*endnum - (i+1)));
- i --;
- (*endnum) --;
+ if (*endnum > (i + 1))
+ memmove(&laarr[i], &laarr[i + 1],
+ sizeof(long_ad) * (*endnum - (i + 1)));
+ i--;
+ (*endnum)--;
}
}
UDF_I_LENEXTENTS(inode) += numalloc << inode->i_sb->s_blocksize_bits;
@@ -849,82 +821,70 @@ static void udf_prealloc_extents(struct inode *inode, int c, int lastblock,
}
static void udf_merge_extents(struct inode *inode,
- kernel_long_ad laarr[EXTENT_MERGE_SIZE], int *endnum)
+ kernel_long_ad laarr[EXTENT_MERGE_SIZE],
+ int *endnum)
{
int i;
- for (i=0; i<(*endnum-1); i++)
- {
- if ((laarr[i].extLength >> 30) == (laarr[i+1].extLength >> 30))
- {
+ for (i = 0; i < (*endnum - 1); i++) {
+ if ((laarr[i].extLength >> 30) == (laarr[i + 1].extLength >> 30)) {
if (((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) ||
- ((laarr[i+1].extLocation.logicalBlockNum - laarr[i].extLocation.logicalBlockNum) ==
- (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
- inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits)))
- {
+ ((laarr[i + 1].extLocation.logicalBlockNum - laarr[i].extLocation.logicalBlockNum) ==
+ (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
+ inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits))) {
if (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
- (laarr[i+1].extLength & UDF_EXTENT_LENGTH_MASK) +
- inode->i_sb->s_blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK)
- {
- laarr[i+1].extLength = (laarr[i+1].extLength -
- (laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
- UDF_EXTENT_LENGTH_MASK) & ~(inode->i_sb->s_blocksize-1);
+ (laarr[i + 1].extLength & UDF_EXTENT_LENGTH_MASK) +
+ inode->i_sb->s_blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK) {
+ laarr[i + 1].extLength = (laarr[i + 1].extLength -
+ (laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
+ UDF_EXTENT_LENGTH_MASK) & ~(inode->i_sb->s_blocksize - 1);
laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_FLAG_MASK) +
(UDF_EXTENT_LENGTH_MASK + 1) - inode->i_sb->s_blocksize;
- laarr[i+1].extLocation.logicalBlockNum =
+ laarr[i + 1].extLocation.logicalBlockNum =
laarr[i].extLocation.logicalBlockNum +
((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) >>
- inode->i_sb->s_blocksize_bits);
- }
- else
- {
- laarr[i].extLength = laarr[i+1].extLength +
+ inode->i_sb->s_blocksize_bits);
+ } else {
+ laarr[i].extLength = laarr[i + 1].extLength +
(((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
- inode->i_sb->s_blocksize - 1) & ~(inode->i_sb->s_blocksize-1));
- if (*endnum > (i+2))
- memmove(&laarr[i+1], &laarr[i+2],
- sizeof(long_ad) * (*endnum - (i+2)));
- i --;
- (*endnum) --;
+ inode->i_sb->s_blocksize - 1) & ~(inode->i_sb->s_blocksize - 1));
+ if (*endnum > (i + 2))
+ memmove(&laarr[i + 1], &laarr[i + 2],
+ sizeof(long_ad) * (*endnum - (i + 2)));
+ i--;
+ (*endnum)--;
}
}
- }
- else if (((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30)) &&
- ((laarr[i+1].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)))
- {
+ } else if (((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30)) &&
+ ((laarr[i + 1].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))) {
udf_free_blocks(inode->i_sb, inode, laarr[i].extLocation, 0,
- ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
- inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
+ ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
+ inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
laarr[i].extLocation.logicalBlockNum = 0;
laarr[i].extLocation.partitionReferenceNum = 0;
if (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
- (laarr[i+1].extLength & UDF_EXTENT_LENGTH_MASK) +
- inode->i_sb->s_blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK)
- {
- laarr[i+1].extLength = (laarr[i+1].extLength -
- (laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
- UDF_EXTENT_LENGTH_MASK) & ~(inode->i_sb->s_blocksize-1);
+ (laarr[i + 1].extLength & UDF_EXTENT_LENGTH_MASK) +
+ inode->i_sb->s_blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK) {
+ laarr[i + 1].extLength = (laarr[i + 1].extLength -
+ (laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
+ UDF_EXTENT_LENGTH_MASK) & ~(inode->i_sb->s_blocksize - 1);
laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_FLAG_MASK) +
(UDF_EXTENT_LENGTH_MASK + 1) - inode->i_sb->s_blocksize;
- }
- else
- {
- laarr[i].extLength = laarr[i+1].extLength +
+ } else {
+ laarr[i].extLength = laarr[i + 1].extLength +
(((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
- inode->i_sb->s_blocksize - 1) & ~(inode->i_sb->s_blocksize-1));
- if (*endnum > (i+2))
- memmove(&laarr[i+1], &laarr[i+2],
- sizeof(long_ad) * (*endnum - (i+2)));
- i --;
- (*endnum) --;
+ inode->i_sb->s_blocksize - 1) & ~(inode->i_sb->s_blocksize - 1));
+ if (*endnum > (i + 2))
+ memmove(&laarr[i + 1], &laarr[i + 2],
+ sizeof(long_ad) * (*endnum - (i + 2)));
+ i--;
+ (*endnum)--;
}
- }
- else if ((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
- {
+ } else if ((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30)) {
udf_free_blocks(inode->i_sb, inode, laarr[i].extLocation, 0,
- ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
- inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
+ ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
+ inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
laarr[i].extLocation.logicalBlockNum = 0;
laarr[i].extLocation.partitionReferenceNum = 0;
laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) |
@@ -934,43 +894,39 @@ static void udf_merge_extents(struct inode *inode,
}
static void udf_update_extents(struct inode *inode,
- kernel_long_ad laarr[EXTENT_MERGE_SIZE], int startnum, int endnum,
- struct extent_position *epos)
+ kernel_long_ad laarr[EXTENT_MERGE_SIZE],
+ int startnum, int endnum,
+ struct extent_position *epos)
{
int start = 0, i;
kernel_lb_addr tmploc;
uint32_t tmplen;
- if (startnum > endnum)
- {
- for (i=0; i<(startnum-endnum); i++)
+ if (startnum > endnum) {
+ for (i = 0; i < (startnum - endnum); i++)
udf_delete_aext(inode, *epos, laarr[i].extLocation,
- laarr[i].extLength);
- }
- else if (startnum < endnum)
- {
- for (i=0; i<(endnum-startnum); i++)
- {
+ laarr[i].extLength);
+ } else if (startnum < endnum) {
+ for (i = 0; i < (endnum - startnum); i++) {
udf_insert_aext(inode, *epos, laarr[i].extLocation,
- laarr[i].extLength);
+ laarr[i].extLength);
udf_next_aext(inode, epos, &laarr[i].extLocation,
- &laarr[i].extLength, 1);
- start ++;
+ &laarr[i].extLength, 1);
+ start++;
}
}
- for (i=start; i<endnum; i++)
- {
+ for (i = start; i < endnum; i++) {
udf_next_aext(inode, epos, &tmploc, &tmplen, 0);
udf_write_aext(inode, epos, laarr[i].extLocation,
- laarr[i].extLength, 1);
+ laarr[i].extLength, 1);
}
}
-struct buffer_head * udf_bread(struct inode * inode, int block,
- int create, int * err)
+struct buffer_head *udf_bread(struct inode *inode, int block,
+ int create, int *err)
{
- struct buffer_head * bh = NULL;
+ struct buffer_head *bh = NULL;
bh = udf_getblk(inode, block, create, err);
if (!bh)
@@ -978,65 +934,61 @@ struct buffer_head * udf_bread(struct inode * inode, int block,
if (buffer_uptodate(bh))
return bh;
+
ll_rw_block(READ, 1, &bh);
+
wait_on_buffer(bh);
if (buffer_uptodate(bh))
return bh;
+
brelse(bh);
*err = -EIO;
return NULL;
}
-void udf_truncate(struct inode * inode)
+void udf_truncate(struct inode *inode)
{
int offset;
int err;
if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
- S_ISLNK(inode->i_mode)))
+ S_ISLNK(inode->i_mode)))
return;
if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
return;
lock_kernel();
- if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
- {
+ if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB) {
if (inode->i_sb->s_blocksize < (udf_file_entry_alloc_offset(inode) +
- inode->i_size))
- {
+ inode->i_size)) {
udf_expand_file_adinicb(inode, inode->i_size, &err);
- if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
- {
+ if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB) {
inode->i_size = UDF_I_LENALLOC(inode);
unlock_kernel();
return;
- }
- else
+ } else {
udf_truncate_extents(inode);
- }
- else
- {
+ }
+ } else {
offset = inode->i_size & (inode->i_sb->s_blocksize - 1);
- memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode) + offset, 0x00, inode->i_sb->s_blocksize - offset - udf_file_entry_alloc_offset(inode));
+ memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode) + offset, 0x00,
+ inode->i_sb->s_blocksize - offset - udf_file_entry_alloc_offset(inode));
UDF_I_LENALLOC(inode) = inode->i_size;
}
- }
- else
- {
+ } else {
block_truncate_page(inode->i_mapping, inode->i_size, udf_get_block);
udf_truncate_extents(inode);
}
inode->i_mtime = inode->i_ctime = current_fs_time(inode->i_sb);
if (IS_SYNC(inode))
- udf_sync_inode (inode);
+ udf_sync_inode(inode);
else
mark_inode_dirty(inode);
unlock_kernel();
}
-static void
-__udf_read_inode(struct inode *inode)
+static void __udf_read_inode(struct inode *inode)
{
struct buffer_head *bh = NULL;
struct fileEntry *fe;
@@ -1055,20 +1007,17 @@ __udf_read_inode(struct inode *inode)
* i_op = NULL;
*/
bh = udf_read_ptagged(inode->i_sb, UDF_I_LOCATION(inode), 0, &ident);
-
- if (!bh)
- {
+ if (!bh) {
printk(KERN_ERR "udf: udf_read_inode(ino %ld) failed !bh\n",
- inode->i_ino);
+ inode->i_ino);
make_bad_inode(inode);
return;
}
if (ident != TAG_IDENT_FE && ident != TAG_IDENT_EFE &&
- ident != TAG_IDENT_USE)
- {
+ ident != TAG_IDENT_USE) {
printk(KERN_ERR "udf: udf_read_inode(ino %ld) failed ident=%d\n",
- inode->i_ino, ident);
+ inode->i_ino, ident);
brelse(bh);
make_bad_inode(inode);
return;
@@ -1076,51 +1025,43 @@ __udf_read_inode(struct inode *inode)
fe = (struct fileEntry *)bh->b_data;
- if (le16_to_cpu(fe->icbTag.strategyType) == 4096)
- {
+ if (le16_to_cpu(fe->icbTag.strategyType) == 4096) {
struct buffer_head *ibh = NULL, *nbh = NULL;
struct indirectEntry *ie;
ibh = udf_read_ptagged(inode->i_sb, UDF_I_LOCATION(inode), 1, &ident);
- if (ident == TAG_IDENT_IE)
- {
- if (ibh)
- {
+ if (ident == TAG_IDENT_IE) {
+ if (ibh) {
kernel_lb_addr loc;
ie = (struct indirectEntry *)ibh->b_data;
loc = lelb_to_cpu(ie->indirectICB.extLocation);
if (ie->indirectICB.extLength &&
- (nbh = udf_read_ptagged(inode->i_sb, loc, 0, &ident)))
- {
+ (nbh = udf_read_ptagged(inode->i_sb, loc, 0, &ident))) {
if (ident == TAG_IDENT_FE ||
- ident == TAG_IDENT_EFE)
- {
- memcpy(&UDF_I_LOCATION(inode), &loc, sizeof(kernel_lb_addr));
+ ident == TAG_IDENT_EFE) {
+ memcpy(&UDF_I_LOCATION(inode), &loc,
+ sizeof(kernel_lb_addr));
brelse(bh);
brelse(ibh);
brelse(nbh);
__udf_read_inode(inode);
return;
- }
- else
- {
+ } else {
brelse(nbh);
brelse(ibh);
}
- }
- else
+ } else {
brelse(ibh);
+ }
}
- }
- else
+ } else {
brelse(ibh);
- }
- else if (le16_to_cpu(fe->icbTag.strategyType) != 4)
- {
+ }
+ } else if (le16_to_cpu(fe->icbTag.strategyType) != 4) {
printk(KERN_ERR "udf: unsupported strategy type: %d\n",
- le16_to_cpu(fe->icbTag.strategyType));
+ le16_to_cpu(fe->icbTag.strategyType));
brelse(bh);
make_bad_inode(inode);
return;
@@ -1153,52 +1094,48 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
UDF_I_LENALLOC(inode) = 0;
UDF_I_NEXT_ALLOC_BLOCK(inode) = 0;
UDF_I_NEXT_ALLOC_GOAL(inode) = 0;
- if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_EFE)
- {
+ if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_EFE) {
UDF_I_EFE(inode) = 1;
UDF_I_USE(inode) = 0;
- if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry)))
- {
+ if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry))) {
make_bad_inode(inode);
return;
}
- memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct extendedFileEntry), inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry));
- }
- else if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_FE)
- {
+ memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct extendedFileEntry),
+ inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry));
+ } else if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_FE) {
UDF_I_EFE(inode) = 0;
UDF_I_USE(inode) = 0;
- if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize - sizeof(struct fileEntry)))
- {
+ if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize - sizeof(struct fileEntry))) {
make_bad_inode(inode);
return;
}
- memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct fileEntry), inode->i_sb->s_blocksize - sizeof(struct fileEntry));
- }
- else if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_USE)
- {
+ memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct fileEntry),
+ inode->i_sb->s_blocksize - sizeof(struct fileEntry));
+ } else if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_USE) {
UDF_I_EFE(inode) = 0;
UDF_I_USE(inode) = 1;
UDF_I_LENALLOC(inode) =
- le32_to_cpu(
- ((struct unallocSpaceEntry *)bh->b_data)->lengthAllocDescs);
- if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry)))
- {
+ le32_to_cpu(((struct unallocSpaceEntry *)bh->b_data)->lengthAllocDescs);
+ if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry))) {
make_bad_inode(inode);
return;
}
- memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct unallocSpaceEntry), inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry));
+ memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct unallocSpaceEntry),
+ inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry));
return;
}
inode->i_uid = le32_to_cpu(fe->uid);
- if (inode->i_uid == -1 || UDF_QUERY_FLAG(inode->i_sb,
- UDF_FLAG_UID_IGNORE))
+ if (inode->i_uid == -1 ||
+ UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_IGNORE) ||
+ UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_SET))
inode->i_uid = UDF_SB(inode->i_sb)->s_uid;
inode->i_gid = le32_to_cpu(fe->gid);
- if (inode->i_gid == -1 || UDF_QUERY_FLAG(inode->i_sb,
- UDF_FLAG_GID_IGNORE))
+ if (inode->i_gid == -1 ||
+ UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_IGNORE) ||
+ UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_SET))
inode->i_gid = UDF_SB(inode->i_sb)->s_gid;
inode->i_nlink = le16_to_cpu(fe->fileLinkCount);
@@ -1211,41 +1148,31 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
inode->i_mode = udf_convert_permissions(fe);
inode->i_mode &= ~UDF_SB(inode->i_sb)->s_umask;
- if (UDF_I_EFE(inode) == 0)
- {
+ if (UDF_I_EFE(inode) == 0) {
inode->i_blocks = le64_to_cpu(fe->logicalBlocksRecorded) <<
(inode->i_sb->s_blocksize_bits - 9);
- if ( udf_stamp_to_time(&convtime, &convtime_usec,
- lets_to_cpu(fe->accessTime)) )
- {
+ if (udf_stamp_to_time(&convtime, &convtime_usec,
+ lets_to_cpu(fe->accessTime))) {
inode->i_atime.tv_sec = convtime;
inode->i_atime.tv_nsec = convtime_usec * 1000;
- }
- else
- {
+ } else {
inode->i_atime = UDF_SB_RECORDTIME(inode->i_sb);
}
- if ( udf_stamp_to_time(&convtime, &convtime_usec,
- lets_to_cpu(fe->modificationTime)) )
- {
+ if (udf_stamp_to_time(&convtime, &convtime_usec,
+ lets_to_cpu(fe->modificationTime))) {
inode->i_mtime.tv_sec = convtime;
inode->i_mtime.tv_nsec = convtime_usec * 1000;
- }
- else
- {
+ } else {
inode->i_mtime = UDF_SB_RECORDTIME(inode->i_sb);
}
- if ( udf_stamp_to_time(&convtime, &convtime_usec,
- lets_to_cpu(fe->attrTime)) )
- {
+ if (udf_stamp_to_time(&convtime, &convtime_usec,
+ lets_to_cpu(fe->attrTime))) {
inode->i_ctime.tv_sec = convtime;
inode->i_ctime.tv_nsec = convtime_usec * 1000;
- }
- else
- {
+ } else {
inode->i_ctime = UDF_SB_RECORDTIME(inode->i_sb);
}
@@ -1253,53 +1180,39 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
UDF_I_LENEATTR(inode) = le32_to_cpu(fe->lengthExtendedAttr);
UDF_I_LENALLOC(inode) = le32_to_cpu(fe->lengthAllocDescs);
offset = sizeof(struct fileEntry) + UDF_I_LENEATTR(inode);
- }
- else
- {
+ } else {
inode->i_blocks = le64_to_cpu(efe->logicalBlocksRecorded) <<
- (inode->i_sb->s_blocksize_bits - 9);
+ (inode->i_sb->s_blocksize_bits - 9);
- if ( udf_stamp_to_time(&convtime, &convtime_usec,
- lets_to_cpu(efe->accessTime)) )
- {
+ if (udf_stamp_to_time(&convtime, &convtime_usec,
+ lets_to_cpu(efe->accessTime))) {
inode->i_atime.tv_sec = convtime;
inode->i_atime.tv_nsec = convtime_usec * 1000;
- }
- else
- {
+ } else {
inode->i_atime = UDF_SB_RECORDTIME(inode->i_sb);
}
- if ( udf_stamp_to_time(&convtime, &convtime_usec,
- lets_to_cpu(efe->modificationTime)) )
- {
+ if (udf_stamp_to_time(&convtime, &convtime_usec,
+ lets_to_cpu(efe->modificationTime))) {
inode->i_mtime.tv_sec = convtime;
inode->i_mtime.tv_nsec = convtime_usec * 1000;
- }
- else
- {
+ } else {
inode->i_mtime = UDF_SB_RECORDTIME(inode->i_sb);
}
- if ( udf_stamp_to_time(&convtime, &convtime_usec,
- lets_to_cpu(efe->createTime)) )
- {
+ if (udf_stamp_to_time(&convtime, &convtime_usec,
+ lets_to_cpu(efe->createTime))) {
UDF_I_CRTIME(inode).tv_sec = convtime;
UDF_I_CRTIME(inode).tv_nsec = convtime_usec * 1000;
- }
- else
- {
+ } else {
UDF_I_CRTIME(inode) = UDF_SB_RECORDTIME(inode->i_sb);
}
- if ( udf_stamp_to_time(&convtime, &convtime_usec,
- lets_to_cpu(efe->attrTime)) )
- {
+ if (udf_stamp_to_time(&convtime, &convtime_usec,
+ lets_to_cpu(efe->attrTime))) {
inode->i_ctime.tv_sec = convtime;
inode->i_ctime.tv_nsec = convtime_usec * 1000;
- }
- else
- {
+ } else {
inode->i_ctime = UDF_SB_RECORDTIME(inode->i_sb);
}
@@ -1309,79 +1222,55 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
offset = sizeof(struct extendedFileEntry) + UDF_I_LENEATTR(inode);
}
- switch (fe->icbTag.fileType)
- {
- case ICBTAG_FILE_TYPE_DIRECTORY:
- {
- inode->i_op = &udf_dir_inode_operations;
- inode->i_fop = &udf_dir_operations;
- inode->i_mode |= S_IFDIR;
- inc_nlink(inode);
- break;
- }
- case ICBTAG_FILE_TYPE_REALTIME:
- case ICBTAG_FILE_TYPE_REGULAR:
- case ICBTAG_FILE_TYPE_UNDEF:
- {
- if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
- inode->i_data.a_ops = &udf_adinicb_aops;
- else
- inode->i_data.a_ops = &udf_aops;
- inode->i_op = &udf_file_inode_operations;
- inode->i_fop = &udf_file_operations;
- inode->i_mode |= S_IFREG;
- break;
- }
- case ICBTAG_FILE_TYPE_BLOCK:
- {
- inode->i_mode |= S_IFBLK;
- break;
- }
- case ICBTAG_FILE_TYPE_CHAR:
- {
- inode->i_mode |= S_IFCHR;
- break;
- }
- case ICBTAG_FILE_TYPE_FIFO:
- {
- init_special_inode(inode, inode->i_mode | S_IFIFO, 0);
- break;
- }
- case ICBTAG_FILE_TYPE_SOCKET:
- {
- init_special_inode(inode, inode->i_mode | S_IFSOCK, 0);
- break;
- }
- case ICBTAG_FILE_TYPE_SYMLINK:
- {
- inode->i_data.a_ops = &udf_symlink_aops;
- inode->i_op = &page_symlink_inode_operations;
- inode->i_mode = S_IFLNK|S_IRWXUGO;
- break;
- }
- default:
- {
- printk(KERN_ERR "udf: udf_fill_inode(ino %ld) failed unknown file type=%d\n",
- inode->i_ino, fe->icbTag.fileType);
- make_bad_inode(inode);
- return;
- }
+ switch (fe->icbTag.fileType) {
+ case ICBTAG_FILE_TYPE_DIRECTORY:
+ inode->i_op = &udf_dir_inode_operations;
+ inode->i_fop = &udf_dir_operations;
+ inode->i_mode |= S_IFDIR;
+ inc_nlink(inode);
+ break;
+ case ICBTAG_FILE_TYPE_REALTIME:
+ case ICBTAG_FILE_TYPE_REGULAR:
+ case ICBTAG_FILE_TYPE_UNDEF:
+ if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
+ inode->i_data.a_ops = &udf_adinicb_aops;
+ else
+ inode->i_data.a_ops = &udf_aops;
+ inode->i_op = &udf_file_inode_operations;
+ inode->i_fop = &udf_file_operations;
+ inode->i_mode |= S_IFREG;
+ break;
+ case ICBTAG_FILE_TYPE_BLOCK:
+ inode->i_mode |= S_IFBLK;
+ break;
+ case ICBTAG_FILE_TYPE_CHAR:
+ inode->i_mode |= S_IFCHR;
+ break;
+ case ICBTAG_FILE_TYPE_FIFO:
+ init_special_inode(inode, inode->i_mode | S_IFIFO, 0);
+ break;
+ case ICBTAG_FILE_TYPE_SOCKET:
+ init_special_inode(inode, inode->i_mode | S_IFSOCK, 0);
+ break;
+ case ICBTAG_FILE_TYPE_SYMLINK:
+ inode->i_data.a_ops = &udf_symlink_aops;
+ inode->i_op = &page_symlink_inode_operations;
+ inode->i_mode = S_IFLNK | S_IRWXUGO;
+ break;
+ default:
+ printk(KERN_ERR "udf: udf_fill_inode(ino %ld) failed unknown file type=%d\n",
+ inode->i_ino, fe->icbTag.fileType);
+ make_bad_inode(inode);
+ return;
}
- if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
- {
- struct deviceSpec *dsea =
- (struct deviceSpec *)
- udf_get_extendedattr(inode, 12, 1);
-
- if (dsea)
- {
- init_special_inode(inode, inode->i_mode, MKDEV(
- le32_to_cpu(dsea->majorDeviceIdent),
- le32_to_cpu(dsea->minorDeviceIdent)));
+ if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
+ struct deviceSpec *dsea = (struct deviceSpec *)udf_get_extendedattr(inode, 12, 1);
+ if (dsea) {
+ init_special_inode(inode, inode->i_mode,
+ MKDEV(le32_to_cpu(dsea->majorDeviceIdent),
+ le32_to_cpu(dsea->minorDeviceIdent)));
/* Developer ID ??? */
- }
- else
- {
+ } else {
make_bad_inode(inode);
}
}
@@ -1391,8 +1280,7 @@ static int udf_alloc_i_data(struct inode *inode, size_t size)
{
UDF_I_DATA(inode) = kmalloc(size, GFP_KERNEL);
- if (!UDF_I_DATA(inode))
- {
+ if (!UDF_I_DATA(inode)) {
printk(KERN_ERR "udf:udf_alloc_i_data (ino %ld) no free memory\n",
inode->i_ino);
return -ENOMEM;
@@ -1401,8 +1289,7 @@ static int udf_alloc_i_data(struct inode *inode, size_t size)
return 0;
}
-static mode_t
-udf_convert_permissions(struct fileEntry *fe)
+static mode_t udf_convert_permissions(struct fileEntry *fe)
{
mode_t mode;
uint32_t permissions;
@@ -1436,22 +1323,23 @@ udf_convert_permissions(struct fileEntry *fe)
* Written, tested, and released.
*/
-int udf_write_inode(struct inode * inode, int sync)
+int udf_write_inode(struct inode *inode, int sync)
{
int ret;
+
lock_kernel();
ret = udf_update_inode(inode, sync);
unlock_kernel();
+
return ret;
}
-int udf_sync_inode(struct inode * inode)
+int udf_sync_inode(struct inode *inode)
{
return udf_update_inode(inode, 1);
}
-static int
-udf_update_inode(struct inode *inode, int do_sync)
+static int udf_update_inode(struct inode *inode, int do_sync)
{
struct buffer_head *bh = NULL;
struct fileEntry *fe;
@@ -1463,11 +1351,8 @@ udf_update_inode(struct inode *inode, int do_sync)
kernel_timestamp cpu_time;
int err = 0;
- bh = udf_tread(inode->i_sb,
- udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0));
-
- if (!bh)
- {
+ bh = udf_tread(inode->i_sb, udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0));
+ if (!bh) {
udf_debug("bread failure\n");
return -EIO;
}
@@ -1477,23 +1362,23 @@ udf_update_inode(struct inode *inode, int do_sync)
fe = (struct fileEntry *)bh->b_data;
efe = (struct extendedFileEntry *)bh->b_data;
- if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_USE)
- {
+ if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_USE) {
struct unallocSpaceEntry *use =
(struct unallocSpaceEntry *)bh->b_data;
use->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
- memcpy(bh->b_data + sizeof(struct unallocSpaceEntry), UDF_I_DATA(inode), inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry));
- crclen = sizeof(struct unallocSpaceEntry) + UDF_I_LENALLOC(inode) -
- sizeof(tag);
+ memcpy(bh->b_data + sizeof(struct unallocSpaceEntry), UDF_I_DATA(inode),
+ inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry));
+ crclen = sizeof(struct unallocSpaceEntry) + UDF_I_LENALLOC(inode) - sizeof(tag);
use->descTag.tagLocation = cpu_to_le32(UDF_I_LOCATION(inode).logicalBlockNum);
use->descTag.descCRCLength = cpu_to_le16(crclen);
use->descTag.descCRC = cpu_to_le16(udf_crc((char *)use + sizeof(tag), crclen, 0));
use->descTag.tagChecksum = 0;
- for (i=0; i<16; i++)
+ for (i = 0; i < 16; i++) {
if (i != 4)
use->descTag.tagChecksum += ((uint8_t *)&(use->descTag))[i];
+ }
mark_buffer_dirty(bh);
brelse(bh);
@@ -1502,11 +1387,13 @@ udf_update_inode(struct inode *inode, int do_sync)
if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_FORGET))
fe->uid = cpu_to_le32(-1);
- else fe->uid = cpu_to_le32(inode->i_uid);
+ else
+ fe->uid = cpu_to_le32(inode->i_uid);
if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_FORGET))
fe->gid = cpu_to_le32(-1);
- else fe->gid = cpu_to_le32(inode->i_gid);
+ else
+ fe->gid = cpu_to_le32(inode->i_gid);
udfperms = ((inode->i_mode & S_IRWXO) ) |
((inode->i_mode & S_IRWXG) << 2) |
@@ -1525,23 +1412,19 @@ udf_update_inode(struct inode *inode, int do_sync)
fe->informationLength = cpu_to_le64(inode->i_size);
- if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
- {
+ if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
regid *eid;
struct deviceSpec *dsea =
- (struct deviceSpec *)
- udf_get_extendedattr(inode, 12, 1);
-
- if (!dsea)
- {
+ (struct deviceSpec *)udf_get_extendedattr(inode, 12, 1);
+ if (!dsea) {
dsea = (struct deviceSpec *)
udf_add_extendedattr(inode,
- sizeof(struct deviceSpec) +
- sizeof(regid), 12, 0x3);
+ sizeof(struct deviceSpec) +
+ sizeof(regid), 12, 0x3);
dsea->attrType = cpu_to_le32(12);
dsea->attrSubtype = 1;
dsea->attrLength = cpu_to_le32(sizeof(struct deviceSpec) +
- sizeof(regid));
+ sizeof(regid));
dsea->impUseLength = cpu_to_le32(sizeof(regid));
}
eid = (regid *)dsea->impUse;
@@ -1553,9 +1436,9 @@ udf_update_inode(struct inode *inode, int do_sync)
dsea->minorDeviceIdent = cpu_to_le32(iminor(inode));
}
- if (UDF_I_EFE(inode) == 0)
- {
- memcpy(bh->b_data + sizeof(struct fileEntry), UDF_I_DATA(inode), inode->i_sb->s_blocksize - sizeof(struct fileEntry));
+ if (UDF_I_EFE(inode) == 0) {
+ memcpy(bh->b_data + sizeof(struct fileEntry), UDF_I_DATA(inode),
+ inode->i_sb->s_blocksize - sizeof(struct fileEntry));
fe->logicalBlocksRecorded = cpu_to_le64(
(inode->i_blocks + (1 << (inode->i_sb->s_blocksize_bits - 9)) - 1) >>
(inode->i_sb->s_blocksize_bits - 9));
@@ -1575,31 +1458,27 @@ udf_update_inode(struct inode *inode, int do_sync)
fe->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
fe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_FE);
crclen = sizeof(struct fileEntry);
- }
- else
- {
- memcpy(bh->b_data + sizeof(struct extendedFileEntry), UDF_I_DATA(inode), inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry));
+ } else {
+ memcpy(bh->b_data + sizeof(struct extendedFileEntry), UDF_I_DATA(inode),
+ inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry));
efe->objectSize = cpu_to_le64(inode->i_size);
efe->logicalBlocksRecorded = cpu_to_le64(
(inode->i_blocks + (1 << (inode->i_sb->s_blocksize_bits - 9)) - 1) >>
(inode->i_sb->s_blocksize_bits - 9));
if (UDF_I_CRTIME(inode).tv_sec > inode->i_atime.tv_sec ||
- (UDF_I_CRTIME(inode).tv_sec == inode->i_atime.tv_sec &&
- UDF_I_CRTIME(inode).tv_nsec > inode->i_atime.tv_nsec))
- {
+ (UDF_I_CRTIME(inode).tv_sec == inode->i_atime.tv_sec &&
+ UDF_I_CRTIME(inode).tv_nsec > inode->i_atime.tv_nsec)) {
UDF_I_CRTIME(inode) = inode->i_atime;
}
if (UDF_I_CRTIME(inode).tv_sec > inode->i_mtime.tv_sec ||
- (UDF_I_CRTIME(inode).tv_sec == inode->i_mtime.tv_sec &&
- UDF_I_CRTIME(inode).tv_nsec > inode->i_mtime.tv_nsec))
- {
+ (UDF_I_CRTIME(inode).tv_sec == inode->i_mtime.tv_sec &&
+ UDF_I_CRTIME(inode).tv_nsec > inode->i_mtime.tv_nsec)) {
UDF_I_CRTIME(inode) = inode->i_mtime;
}
if (UDF_I_CRTIME(inode).tv_sec > inode->i_ctime.tv_sec ||
- (UDF_I_CRTIME(inode).tv_sec == inode->i_ctime.tv_sec &&
- UDF_I_CRTIME(inode).tv_nsec > inode->i_ctime.tv_nsec))
- {
+ (UDF_I_CRTIME(inode).tv_sec == inode->i_ctime.tv_sec &&
+ UDF_I_CRTIME(inode).tv_nsec > inode->i_ctime.tv_nsec)) {
UDF_I_CRTIME(inode) = inode->i_ctime;
}
@@ -1622,14 +1501,11 @@ udf_update_inode(struct inode *inode, int do_sync)
efe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_EFE);
crclen = sizeof(struct extendedFileEntry);
}
- if (UDF_I_STRAT4096(inode))
- {
+ if (UDF_I_STRAT4096(inode)) {
fe->icbTag.strategyType = cpu_to_le16(4096);
fe->icbTag.strategyParameter = cpu_to_le16(1);
fe->icbTag.numEntries = cpu_to_le16(2);
- }
- else
- {
+ } else {
fe->icbTag.strategyType = cpu_to_le16(4);
fe->icbTag.numEntries = cpu_to_le16(1);
}
@@ -1669,28 +1545,27 @@ udf_update_inode(struct inode *inode, int do_sync)
fe->descTag.descCRC = cpu_to_le16(udf_crc((char *)fe + sizeof(tag), crclen, 0));
fe->descTag.tagChecksum = 0;
- for (i=0; i<16; i++)
+ for (i = 0; i < 16; i++) {
if (i != 4)
fe->descTag.tagChecksum += ((uint8_t *)&(fe->descTag))[i];
+ }
/* write the data blocks */
mark_buffer_dirty(bh);
- if (do_sync)
- {
+ if (do_sync) {
sync_dirty_buffer(bh);
- if (buffer_req(bh) && !buffer_uptodate(bh))
- {
+ if (buffer_req(bh) && !buffer_uptodate(bh)) {
printk("IO error syncing udf inode [%s:%08lx]\n",
- inode->i_sb->s_id, inode->i_ino);
+ inode->i_sb->s_id, inode->i_ino);
err = -EIO;
}
}
brelse(bh);
+
return err;
}
-struct inode *
-udf_iget(struct super_block *sb, kernel_lb_addr ino)
+struct inode *udf_iget(struct super_block *sb, kernel_lb_addr ino)
{
unsigned long block = udf_get_lb_pblock(sb, ino, 0);
struct inode *inode = iget_locked(sb, block);
@@ -1709,7 +1584,7 @@ udf_iget(struct super_block *sb, kernel_lb_addr ino)
if (ino.logicalBlockNum >= UDF_SB_PARTLEN(sb, ino.partitionReferenceNum)) {
udf_debug("block=%d, partition=%d out of range\n",
- ino.logicalBlockNum, ino.partitionReferenceNum);
+ ino.logicalBlockNum, ino.partitionReferenceNum);
make_bad_inode(inode);
goto out_iput;
}
@@ -1721,8 +1596,8 @@ udf_iget(struct super_block *sb, kernel_lb_addr ino)
return NULL;
}
-int8_t udf_add_aext(struct inode *inode, struct extent_position *epos,
- kernel_lb_addr eloc, uint32_t elen, int inc)
+int8_t udf_add_aext(struct inode * inode, struct extent_position * epos,
+ kernel_lb_addr eloc, uint32_t elen, int inc)
{
int adsize;
short_ad *sad = NULL;
@@ -1743,21 +1618,19 @@ int8_t udf_add_aext(struct inode *inode, struct extent_position *epos,
else
return -1;
- if (epos->offset + (2 * adsize) > inode->i_sb->s_blocksize)
- {
+ if (epos->offset + (2 * adsize) > inode->i_sb->s_blocksize) {
char *sptr, *dptr;
struct buffer_head *nbh;
int err, loffset;
kernel_lb_addr obloc = epos->block;
if (!(epos->block.logicalBlockNum = udf_new_block(inode->i_sb, NULL,
- obloc.partitionReferenceNum, obloc.logicalBlockNum, &err)))
- {
+ obloc.partitionReferenceNum,
+ obloc.logicalBlockNum, &err))) {
return -1;
}
if (!(nbh = udf_tgetblk(inode->i_sb, udf_get_lb_pblock(inode->i_sb,
- epos->block, 0))))
- {
+ epos->block, 0)))) {
return -1;
}
lock_buffer(nbh);
@@ -1769,85 +1642,69 @@ int8_t udf_add_aext(struct inode *inode, struct extent_position *epos,
aed = (struct allocExtDesc *)(nbh->b_data);
if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT))
aed->previousAllocExtLocation = cpu_to_le32(obloc.logicalBlockNum);
- if (epos->offset + adsize > inode->i_sb->s_blocksize)
- {
+ if (epos->offset + adsize > inode->i_sb->s_blocksize) {
loffset = epos->offset;
aed->lengthAllocDescs = cpu_to_le32(adsize);
sptr = ptr - adsize;
dptr = nbh->b_data + sizeof(struct allocExtDesc);
memcpy(dptr, sptr, adsize);
epos->offset = sizeof(struct allocExtDesc) + adsize;
- }
- else
- {
+ } else {
loffset = epos->offset + adsize;
aed->lengthAllocDescs = cpu_to_le32(0);
sptr = ptr;
epos->offset = sizeof(struct allocExtDesc);
- if (epos->bh)
- {
+ if (epos->bh) {
aed = (struct allocExtDesc *)epos->bh->b_data;
aed->lengthAllocDescs =
cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
- }
- else
- {
+ } else {
UDF_I_LENALLOC(inode) += adsize;
mark_inode_dirty(inode);
}
}
if (UDF_SB_UDFREV(inode->i_sb) >= 0x0200)
udf_new_tag(nbh->b_data, TAG_IDENT_AED, 3, 1,
- epos->block.logicalBlockNum, sizeof(tag));
+ epos->block.logicalBlockNum, sizeof(tag));
else
udf_new_tag(nbh->b_data, TAG_IDENT_AED, 2, 1,
- epos->block.logicalBlockNum, sizeof(tag));
- switch (UDF_I_ALLOCTYPE(inode))
- {
- case ICBTAG_FLAG_AD_SHORT:
- {
- sad = (short_ad *)sptr;
- sad->extLength = cpu_to_le32(
- EXT_NEXT_EXTENT_ALLOCDECS |
- inode->i_sb->s_blocksize);
- sad->extPosition = cpu_to_le32(epos->block.logicalBlockNum);
- break;
- }
- case ICBTAG_FLAG_AD_LONG:
- {
- lad = (long_ad *)sptr;
- lad->extLength = cpu_to_le32(
- EXT_NEXT_EXTENT_ALLOCDECS |
- inode->i_sb->s_blocksize);
- lad->extLocation = cpu_to_lelb(epos->block);
- memset(lad->impUse, 0x00, sizeof(lad->impUse));
- break;
- }
+ epos->block.logicalBlockNum, sizeof(tag));
+ switch (UDF_I_ALLOCTYPE(inode)) {
+ case ICBTAG_FLAG_AD_SHORT:
+ sad = (short_ad *)sptr;
+ sad->extLength = cpu_to_le32(EXT_NEXT_EXTENT_ALLOCDECS |
+ inode->i_sb->s_blocksize);
+ sad->extPosition = cpu_to_le32(epos->block.logicalBlockNum);
+ break;
+ case ICBTAG_FLAG_AD_LONG:
+ lad = (long_ad *)sptr;
+ lad->extLength = cpu_to_le32(EXT_NEXT_EXTENT_ALLOCDECS |
+ inode->i_sb->s_blocksize);
+ lad->extLocation = cpu_to_lelb(epos->block);
+ memset(lad->impUse, 0x00, sizeof(lad->impUse));
+ break;
}
- if (epos->bh)
- {
- if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
+ if (epos->bh) {
+ if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
+ UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
udf_update_tag(epos->bh->b_data, loffset);
else
udf_update_tag(epos->bh->b_data, sizeof(struct allocExtDesc));
mark_buffer_dirty_inode(epos->bh, inode);
brelse(epos->bh);
- }
- else
+ } else {
mark_inode_dirty(inode);
+ }
epos->bh = nbh;
}
etype = udf_write_aext(inode, epos, eloc, elen, inc);
- if (!epos->bh)
- {
+ if (!epos->bh) {
UDF_I_LENALLOC(inode) += adsize;
mark_inode_dirty(inode);
- }
- else
- {
+ } else {
aed = (struct allocExtDesc *)epos->bh->b_data;
aed->lengthAllocDescs =
cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
@@ -1861,73 +1718,68 @@ int8_t udf_add_aext(struct inode *inode, struct extent_position *epos,
return etype;
}
-int8_t udf_write_aext(struct inode *inode, struct extent_position *epos,
- kernel_lb_addr eloc, uint32_t elen, int inc)
+int8_t udf_write_aext(struct inode * inode, struct extent_position * epos,
+ kernel_lb_addr eloc, uint32_t elen, int inc)
{
int adsize;
uint8_t *ptr;
+ short_ad *sad;
+ long_ad *lad;
if (!epos->bh)
ptr = UDF_I_DATA(inode) + epos->offset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
else
ptr = epos->bh->b_data + epos->offset;
- switch (UDF_I_ALLOCTYPE(inode))
- {
- case ICBTAG_FLAG_AD_SHORT:
- {
- short_ad *sad = (short_ad *)ptr;
- sad->extLength = cpu_to_le32(elen);
- sad->extPosition = cpu_to_le32(eloc.logicalBlockNum);
- adsize = sizeof(short_ad);
- break;
- }
- case ICBTAG_FLAG_AD_LONG:
- {
- long_ad *lad = (long_ad *)ptr;
- lad->extLength = cpu_to_le32(elen);
- lad->extLocation = cpu_to_lelb(eloc);
- memset(lad->impUse, 0x00, sizeof(lad->impUse));
- adsize = sizeof(long_ad);
- break;
- }
- default:
- return -1;
+ switch (UDF_I_ALLOCTYPE(inode)) {
+ case ICBTAG_FLAG_AD_SHORT:
+ sad = (short_ad *)ptr;
+ sad->extLength = cpu_to_le32(elen);
+ sad->extPosition = cpu_to_le32(eloc.logicalBlockNum);
+ adsize = sizeof(short_ad);
+ break;
+ case ICBTAG_FLAG_AD_LONG:
+ lad = (long_ad *)ptr;
+ lad->extLength = cpu_to_le32(elen);
+ lad->extLocation = cpu_to_lelb(eloc);
+ memset(lad->impUse, 0x00, sizeof(lad->impUse));
+ adsize = sizeof(long_ad);
+ break;
+ default:
+ return -1;
}
- if (epos->bh)
- {
- if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
- {
+ if (epos->bh) {
+ if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
+ UDF_SB_UDFREV(inode->i_sb) >= 0x0201) {
struct allocExtDesc *aed = (struct allocExtDesc *)epos->bh->b_data;
udf_update_tag(epos->bh->b_data,
- le32_to_cpu(aed->lengthAllocDescs) + sizeof(struct allocExtDesc));
+ le32_to_cpu(aed->lengthAllocDescs) + sizeof(struct allocExtDesc));
}
mark_buffer_dirty_inode(epos->bh, inode);
- }
- else
+ } else {
mark_inode_dirty(inode);
+ }
if (inc)
epos->offset += adsize;
+
return (elen >> 30);
}
-int8_t udf_next_aext(struct inode *inode, struct extent_position *epos,
- kernel_lb_addr *eloc, uint32_t *elen, int inc)
+int8_t udf_next_aext(struct inode * inode, struct extent_position * epos,
+ kernel_lb_addr * eloc, uint32_t * elen, int inc)
{
int8_t etype;
while ((etype = udf_current_aext(inode, epos, eloc, elen, inc)) ==
- (EXT_NEXT_EXTENT_ALLOCDECS >> 30))
- {
+ (EXT_NEXT_EXTENT_ALLOCDECS >> 30)) {
epos->block = *eloc;
epos->offset = sizeof(struct allocExtDesc);
brelse(epos->bh);
- if (!(epos->bh = udf_tread(inode->i_sb, udf_get_lb_pblock(inode->i_sb, epos->block, 0))))
- {
+ if (!(epos->bh = udf_tread(inode->i_sb, udf_get_lb_pblock(inode->i_sb, epos->block, 0)))) {
udf_debug("reading block %d failed!\n",
- udf_get_lb_pblock(inode->i_sb, epos->block, 0));
+ udf_get_lb_pblock(inode->i_sb, epos->block, 0));
return -1;
}
}
@@ -1935,68 +1787,55 @@ int8_t udf_next_aext(struct inode *inode, struct extent_position *epos,
return etype;
}
-int8_t udf_current_aext(struct inode *inode, struct extent_position *epos,
- kernel_lb_addr *eloc, uint32_t *elen, int inc)
+int8_t udf_current_aext(struct inode * inode, struct extent_position * epos,
+ kernel_lb_addr * eloc, uint32_t * elen, int inc)
{
int alen;
int8_t etype;
uint8_t *ptr;
+ short_ad *sad;
+ long_ad *lad;
- if (!epos->bh)
- {
+
+ if (!epos->bh) {
if (!epos->offset)
epos->offset = udf_file_entry_alloc_offset(inode);
ptr = UDF_I_DATA(inode) + epos->offset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
alen = udf_file_entry_alloc_offset(inode) + UDF_I_LENALLOC(inode);
- }
- else
- {
+ } else {
if (!epos->offset)
epos->offset = sizeof(struct allocExtDesc);
ptr = epos->bh->b_data + epos->offset;
- alen = sizeof(struct allocExtDesc) + le32_to_cpu(((struct allocExtDesc *)epos->bh->b_data)->lengthAllocDescs);
+ alen = sizeof(struct allocExtDesc) +
+ le32_to_cpu(((struct allocExtDesc *)epos->bh->b_data)->lengthAllocDescs);
}
- switch (UDF_I_ALLOCTYPE(inode))
- {
- case ICBTAG_FLAG_AD_SHORT:
- {
- short_ad *sad;
-
- if (!(sad = udf_get_fileshortad(ptr, alen, &epos->offset, inc)))
- return -1;
-
- etype = le32_to_cpu(sad->extLength) >> 30;
- eloc->logicalBlockNum = le32_to_cpu(sad->extPosition);
- eloc->partitionReferenceNum = UDF_I_LOCATION(inode).partitionReferenceNum;
- *elen = le32_to_cpu(sad->extLength) & UDF_EXTENT_LENGTH_MASK;
- break;
- }
- case ICBTAG_FLAG_AD_LONG:
- {
- long_ad *lad;
-
- if (!(lad = udf_get_filelongad(ptr, alen, &epos->offset, inc)))
- return -1;
-
- etype = le32_to_cpu(lad->extLength) >> 30;
- *eloc = lelb_to_cpu(lad->extLocation);
- *elen = le32_to_cpu(lad->extLength) & UDF_EXTENT_LENGTH_MASK;
- break;
- }
- default:
- {
- udf_debug("alloc_type = %d unsupported\n", UDF_I_ALLOCTYPE(inode));
+ switch (UDF_I_ALLOCTYPE(inode)) {
+ case ICBTAG_FLAG_AD_SHORT:
+ if (!(sad = udf_get_fileshortad(ptr, alen, &epos->offset, inc)))
return -1;
- }
+ etype = le32_to_cpu(sad->extLength) >> 30;
+ eloc->logicalBlockNum = le32_to_cpu(sad->extPosition);
+ eloc->partitionReferenceNum = UDF_I_LOCATION(inode).partitionReferenceNum;
+ *elen = le32_to_cpu(sad->extLength) & UDF_EXTENT_LENGTH_MASK;
+ break;
+ case ICBTAG_FLAG_AD_LONG:
+ if (!(lad = udf_get_filelongad(ptr, alen, &epos->offset, inc)))
+ return -1;
+ etype = le32_to_cpu(lad->extLength) >> 30;
+ *eloc = lelb_to_cpu(lad->extLocation);
+ *elen = le32_to_cpu(lad->extLength) & UDF_EXTENT_LENGTH_MASK;
+ break;
+ default:
+ udf_debug("alloc_type = %d unsupported\n", UDF_I_ALLOCTYPE(inode));
+ return -1;
}
return etype;
}
-static int8_t
-udf_insert_aext(struct inode *inode, struct extent_position epos,
- kernel_lb_addr neloc, uint32_t nelen)
+static int8_t udf_insert_aext(struct inode *inode, struct extent_position epos,
+ kernel_lb_addr neloc, uint32_t nelen)
{
kernel_lb_addr oeloc;
uint32_t oelen;
@@ -2005,28 +1844,26 @@ udf_insert_aext(struct inode *inode, struct extent_position epos,
if (epos.bh)
get_bh(epos.bh);
- while ((etype = udf_next_aext(inode, &epos, &oeloc, &oelen, 0)) != -1)
- {
+ while ((etype = udf_next_aext(inode, &epos, &oeloc, &oelen, 0)) != -1) {
udf_write_aext(inode, &epos, neloc, nelen, 1);
-
neloc = oeloc;
nelen = (etype << 30) | oelen;
}
udf_add_aext(inode, &epos, neloc, nelen, 1);
brelse(epos.bh);
+
return (nelen >> 30);
}
-int8_t udf_delete_aext(struct inode *inode, struct extent_position epos,
- kernel_lb_addr eloc, uint32_t elen)
+int8_t udf_delete_aext(struct inode * inode, struct extent_position epos,
+ kernel_lb_addr eloc, uint32_t elen)
{
struct extent_position oepos;
int adsize;
int8_t etype;
struct allocExtDesc *aed;
- if (epos.bh)
- {
+ if (epos.bh) {
get_bh(epos.bh);
get_bh(epos.bh);
}
@@ -2042,11 +1879,9 @@ int8_t udf_delete_aext(struct inode *inode, struct extent_position epos,
if (udf_next_aext(inode, &epos, &eloc, &elen, 1) == -1)
return -1;
- while ((etype = udf_next_aext(inode, &epos, &eloc, &elen, 1)) != -1)
- {
+ while ((etype = udf_next_aext(inode, &epos, &eloc, &elen, 1)) != -1) {
udf_write_aext(inode, &oepos, eloc, (etype << 30) | elen, 1);
- if (oepos.bh != epos.bh)
- {
+ if (oepos.bh != epos.bh) {
oepos.block = epos.block;
brelse(oepos.bh);
get_bh(epos.bh);
@@ -2057,42 +1892,35 @@ int8_t udf_delete_aext(struct inode *inode, struct extent_position epos,
memset(&eloc, 0x00, sizeof(kernel_lb_addr));
elen = 0;
- if (epos.bh != oepos.bh)
- {
+ if (epos.bh != oepos.bh) {
udf_free_blocks(inode->i_sb, inode, epos.block, 0, 1);
udf_write_aext(inode, &oepos, eloc, elen, 1);
udf_write_aext(inode, &oepos, eloc, elen, 1);
- if (!oepos.bh)
- {
+ if (!oepos.bh) {
UDF_I_LENALLOC(inode) -= (adsize * 2);
mark_inode_dirty(inode);
- }
- else
- {
+ } else {
aed = (struct allocExtDesc *)oepos.bh->b_data;
aed->lengthAllocDescs =
- cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) - (2*adsize));
- if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
- udf_update_tag(oepos.bh->b_data, oepos.offset - (2*adsize));
+ cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) - (2 * adsize));
+ if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
+ UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
+ udf_update_tag(oepos.bh->b_data, oepos.offset - (2 * adsize));
else
udf_update_tag(oepos.bh->b_data, sizeof(struct allocExtDesc));
mark_buffer_dirty_inode(oepos.bh, inode);
}
- }
- else
- {
+ } else {
udf_write_aext(inode, &oepos, eloc, elen, 1);
- if (!oepos.bh)
- {
+ if (!oepos.bh) {
UDF_I_LENALLOC(inode) -= adsize;
mark_inode_dirty(inode);
- }
- else
- {
+ } else {
aed = (struct allocExtDesc *)oepos.bh->b_data;
aed->lengthAllocDescs =
cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) - adsize);
- if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
+ if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
+ UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
udf_update_tag(oepos.bh->b_data, epos.offset - adsize);
else
udf_update_tag(oepos.bh->b_data, sizeof(struct allocExtDesc));
@@ -2102,17 +1930,19 @@ int8_t udf_delete_aext(struct inode *inode, struct extent_position epos,
brelse(epos.bh);
brelse(oepos.bh);
+
return (elen >> 30);
}
-int8_t inode_bmap(struct inode *inode, sector_t block, struct extent_position *pos,
- kernel_lb_addr *eloc, uint32_t *elen, sector_t *offset)
+int8_t inode_bmap(struct inode * inode, sector_t block,
+ struct extent_position * pos, kernel_lb_addr * eloc,
+ uint32_t * elen, sector_t * offset)
{
- loff_t lbcount = 0, bcount = (loff_t)block << inode->i_sb->s_blocksize_bits;
+ loff_t lbcount = 0, bcount =
+ (loff_t) block << inode->i_sb->s_blocksize_bits;
int8_t etype;
- if (block < 0)
- {
+ if (block < 0) {
printk(KERN_ERR "udf: inode_bmap: block < 0\n");
return -1;
}
@@ -2122,10 +1952,8 @@ int8_t inode_bmap(struct inode *inode, sector_t block, struct extent_position *p
pos->bh = NULL;
*elen = 0;
- do
- {
- if ((etype = udf_next_aext(inode, pos, eloc, elen, 1)) == -1)
- {
+ do {
+ if ((etype = udf_next_aext(inode, pos, eloc, elen, 1)) == -1) {
*offset = (bcount - lbcount) >> inode->i_sb->s_blocksize_bits;
UDF_I_LENEXTENTS(inode) = lbcount;
return -1;
@@ -2143,7 +1971,7 @@ long udf_block_map(struct inode *inode, sector_t block)
kernel_lb_addr eloc;
uint32_t elen;
sector_t offset;
- struct extent_position epos = { NULL, 0, { 0, 0}};
+ struct extent_position epos = {};
int ret;
lock_kernel();
diff --git a/fs/udf/lowlevel.c b/fs/udf/lowlevel.c
index 0842161..579bae7 100644
--- a/fs/udf/lowlevel.c
+++ b/fs/udf/lowlevel.c
@@ -26,38 +26,33 @@
#include <linux/udf_fs.h>
#include "udf_sb.h"
-unsigned int
-udf_get_last_session(struct super_block *sb)
+unsigned int udf_get_last_session(struct super_block *sb)
{
struct cdrom_multisession ms_info;
unsigned int vol_desc_start;
struct block_device *bdev = sb->s_bdev;
int i;
- vol_desc_start=0;
- ms_info.addr_format=CDROM_LBA;
- i = ioctl_by_bdev(bdev, CDROMMULTISESSION, (unsigned long) &ms_info);
+ vol_desc_start = 0;
+ ms_info.addr_format = CDROM_LBA;
+ i = ioctl_by_bdev(bdev, CDROMMULTISESSION, (unsigned long)&ms_info);
#define WE_OBEY_THE_WRITTEN_STANDARDS 1
- if (i == 0)
- {
+ if (i == 0) {
udf_debug("XA disk: %s, vol_desc_start=%d\n",
- (ms_info.xa_flag ? "yes" : "no"), ms_info.addr.lba);
+ (ms_info.xa_flag ? "yes" : "no"), ms_info.addr.lba);
#if WE_OBEY_THE_WRITTEN_STANDARDS
if (ms_info.xa_flag) /* necessary for a valid ms_info.addr */
#endif
vol_desc_start = ms_info.addr.lba;
- }
- else
- {
+ } else {
udf_debug("CDROMMULTISESSION not supported: rc=%d\n", i);
}
return vol_desc_start;
}
-unsigned long
-udf_get_last_block(struct super_block *sb)
+unsigned long udf_get_last_block(struct super_block *sb)
{
struct block_device *bdev = sb->s_bdev;
unsigned long lblock = 0;
diff --git a/fs/udf/misc.c b/fs/udf/misc.c
index a2b2a98..15297de 100644
--- a/fs/udf/misc.c
+++ b/fs/udf/misc.c
@@ -29,8 +29,7 @@
#include "udf_i.h"
#include "udf_sb.h"
-struct buffer_head *
-udf_tgetblk(struct super_block *sb, int block)
+struct buffer_head *udf_tgetblk(struct super_block *sb, int block)
{
if (UDF_QUERY_FLAG(sb, UDF_FLAG_VARCONV))
return sb_getblk(sb, udf_fixed_to_variable(block));
@@ -38,8 +37,7 @@ udf_tgetblk(struct super_block *sb, int block)
return sb_getblk(sb, block);
}
-struct buffer_head *
-udf_tread(struct super_block *sb, int block)
+struct buffer_head *udf_tread(struct super_block *sb, int block)
{
if (UDF_QUERY_FLAG(sb, UDF_FLAG_VARCONV))
return sb_bread(sb, udf_fixed_to_variable(block));
@@ -47,9 +45,8 @@ udf_tread(struct super_block *sb, int block)
return sb_bread(sb, block);
}
-struct genericFormat *
-udf_add_extendedattr(struct inode * inode, uint32_t size, uint32_t type,
- uint8_t loc)
+struct genericFormat *udf_add_extendedattr(struct inode *inode, uint32_t size,
+ uint32_t type, uint8_t loc)
{
uint8_t *ea = NULL, *ad = NULL;
int offset;
@@ -57,10 +54,9 @@ udf_add_extendedattr(struct inode * inode, uint32_t size, uint32_t type,
int i;
ea = UDF_I_DATA(inode);
- if (UDF_I_LENEATTR(inode))
+ if (UDF_I_LENEATTR(inode)) {
ad = UDF_I_DATA(inode) + UDF_I_LENEATTR(inode);
- else
- {
+ } else {
ad = ea;
size += sizeof(struct extendedAttrHeaderDesc);
}
@@ -70,27 +66,21 @@ udf_add_extendedattr(struct inode * inode, uint32_t size, uint32_t type,
/* TODO - Check for FreeEASpace */
- if (loc & 0x01 && offset >= size)
- {
+ if (loc & 0x01 && offset >= size) {
struct extendedAttrHeaderDesc *eahd;
eahd = (struct extendedAttrHeaderDesc *)ea;
- if (UDF_I_LENALLOC(inode))
- {
+ if (UDF_I_LENALLOC(inode)) {
memmove(&ad[size], ad, UDF_I_LENALLOC(inode));
}
- if (UDF_I_LENEATTR(inode))
- {
+ if (UDF_I_LENEATTR(inode)) {
/* check checksum/crc */
if (le16_to_cpu(eahd->descTag.tagIdent) != TAG_IDENT_EAHD ||
- le32_to_cpu(eahd->descTag.tagLocation) != UDF_I_LOCATION(inode).logicalBlockNum)
- {
+ le32_to_cpu(eahd->descTag.tagLocation) != UDF_I_LOCATION(inode).logicalBlockNum) {
return NULL;
}
- }
- else
- {
+ } else {
size -= sizeof(struct extendedAttrHeaderDesc);
UDF_I_LENEATTR(inode) += sizeof(struct extendedAttrHeaderDesc);
eahd->descTag.tagIdent = cpu_to_le16(TAG_IDENT_EAHD);
@@ -105,29 +95,23 @@ udf_add_extendedattr(struct inode * inode, uint32_t size, uint32_t type,
}
offset = UDF_I_LENEATTR(inode);
- if (type < 2048)
- {
- if (le32_to_cpu(eahd->appAttrLocation) < UDF_I_LENEATTR(inode))
- {
+ if (type < 2048) {
+ if (le32_to_cpu(eahd->appAttrLocation) < UDF_I_LENEATTR(inode)) {
uint32_t aal = le32_to_cpu(eahd->appAttrLocation);
memmove(&ea[offset - aal + size],
&ea[aal], offset - aal);
offset -= aal;
eahd->appAttrLocation = cpu_to_le32(aal + size);
}
- if (le32_to_cpu(eahd->impAttrLocation) < UDF_I_LENEATTR(inode))
- {
+ if (le32_to_cpu(eahd->impAttrLocation) < UDF_I_LENEATTR(inode)) {
uint32_t ial = le32_to_cpu(eahd->impAttrLocation);
memmove(&ea[offset - ial + size],
&ea[ial], offset - ial);
offset -= ial;
eahd->impAttrLocation = cpu_to_le32(ial + size);
}
- }
- else if (type < 65536)
- {
- if (le32_to_cpu(eahd->appAttrLocation) < UDF_I_LENEATTR(inode))
- {
+ } else if (type < 65536) {
+ if (le32_to_cpu(eahd->appAttrLocation) < UDF_I_LENEATTR(inode)) {
uint32_t aal = le32_to_cpu(eahd->appAttrLocation);
memmove(&ea[offset - aal + size],
&ea[aal], offset - aal);
@@ -138,22 +122,23 @@ udf_add_extendedattr(struct inode * inode, uint32_t size, uint32_t type,
/* rewrite CRC + checksum of eahd */
crclen = sizeof(struct extendedAttrHeaderDesc) - sizeof(tag);
eahd->descTag.descCRCLength = cpu_to_le16(crclen);
- eahd->descTag.descCRC = cpu_to_le16(udf_crc((char *)eahd + sizeof(tag), crclen, 0));
+ eahd->descTag.descCRC = cpu_to_le16(udf_crc((char *)eahd +
+ sizeof(tag), crclen, 0));
eahd->descTag.tagChecksum = 0;
- for (i=0; i<16; i++)
+ for (i = 0; i < 16; i++)
if (i != 4)
eahd->descTag.tagChecksum += ((uint8_t *)&(eahd->descTag))[i];
UDF_I_LENEATTR(inode) += size;
return (struct genericFormat *)&ea[offset];
}
- if (loc & 0x02)
- {
+ if (loc & 0x02) {
}
+
return NULL;
}
-struct genericFormat *
-udf_get_extendedattr(struct inode *inode, uint32_t type, uint8_t subtype)
+struct genericFormat *udf_get_extendedattr(struct inode *inode, uint32_t type,
+ uint8_t subtype)
{
struct genericFormat *gaf;
uint8_t *ea = NULL;
@@ -161,18 +146,16 @@ udf_get_extendedattr(struct inode *inode, uint32_t type, uint8_t subtype)
ea = UDF_I_DATA(inode);
- if (UDF_I_LENEATTR(inode))
- {
+ if (UDF_I_LENEATTR(inode)) {
struct extendedAttrHeaderDesc *eahd;
eahd = (struct extendedAttrHeaderDesc *)ea;
/* check checksum/crc */
if (le16_to_cpu(eahd->descTag.tagIdent) != TAG_IDENT_EAHD ||
- le32_to_cpu(eahd->descTag.tagLocation) != UDF_I_LOCATION(inode).logicalBlockNum)
- {
+ le32_to_cpu(eahd->descTag.tagLocation) != UDF_I_LOCATION(inode).logicalBlockNum) {
return NULL;
}
-
+
if (type < 2048)
offset = sizeof(struct extendedAttrHeaderDesc);
else if (type < 65536)
@@ -180,8 +163,7 @@ udf_get_extendedattr(struct inode *inode, uint32_t type, uint8_t subtype)
else
offset = le32_to_cpu(eahd->appAttrLocation);
- while (offset < UDF_I_LENEATTR(inode))
- {
+ while (offset < UDF_I_LENEATTR(inode)) {
gaf = (struct genericFormat *)&ea[offset];
if (le32_to_cpu(gaf->attrType) == type && gaf->attrSubtype == subtype)
return gaf;
@@ -189,6 +171,7 @@ udf_get_extendedattr(struct inode *inode, uint32_t type, uint8_t subtype)
offset += le32_to_cpu(gaf->attrLength);
}
}
+
return NULL;
}
@@ -202,8 +185,8 @@ udf_get_extendedattr(struct inode *inode, uint32_t type, uint8_t subtype)
* July 1, 1997 - Andrew E. Mileski
* Written, tested, and released.
*/
-struct buffer_head *
-udf_read_tagged(struct super_block *sb, uint32_t block, uint32_t location, uint16_t *ident)
+struct buffer_head *udf_read_tagged(struct super_block *sb, uint32_t block,
+ uint32_t location, uint16_t * ident)
{
tag *tag_p;
struct buffer_head *bh = NULL;
@@ -215,9 +198,9 @@ udf_read_tagged(struct super_block *sb, uint32_t block, uint32_t location, uint1
return NULL;
bh = udf_tread(sb, block + UDF_SB_SESSION(sb));
- if (!bh)
- {
- udf_debug("block=%d, location=%d: read failed\n", block + UDF_SB_SESSION(sb), location);
+ if (!bh) {
+ udf_debug("block=%d, location=%d: read failed\n",
+ block + UDF_SB_SESSION(sb), location);
return NULL;
}
@@ -225,13 +208,12 @@ udf_read_tagged(struct super_block *sb, uint32_t block, uint32_t location, uint1
*ident = le16_to_cpu(tag_p->tagIdent);
- if ( location != le32_to_cpu(tag_p->tagLocation) )
- {
+ if (location != le32_to_cpu(tag_p->tagLocation)) {
udf_debug("location mismatch block %u, tag %u != %u\n",
- block + UDF_SB_SESSION(sb), le32_to_cpu(tag_p->tagLocation), location);
+ block + UDF_SB_SESSION(sb), le32_to_cpu(tag_p->tagLocation), location);
goto error_out;
}
-
+
/* Verify the tag checksum */
checksum = 0U;
for (i = 0; i < 4; i++)
@@ -245,33 +227,32 @@ udf_read_tagged(struct super_block *sb, uint32_t block, uint32_t location, uint1
/* Verify the tag version */
if (le16_to_cpu(tag_p->descVersion) != 0x0002U &&
- le16_to_cpu(tag_p->descVersion) != 0x0003U)
- {
+ le16_to_cpu(tag_p->descVersion) != 0x0003U) {
udf_debug("tag version 0x%04x != 0x0002 || 0x0003 block %d\n",
- le16_to_cpu(tag_p->descVersion), block);
+ le16_to_cpu(tag_p->descVersion), block);
goto error_out;
}
/* Verify the descriptor CRC */
if (le16_to_cpu(tag_p->descCRCLength) + sizeof(tag) > sb->s_blocksize ||
- le16_to_cpu(tag_p->descCRC) == udf_crc(bh->b_data + sizeof(tag),
- le16_to_cpu(tag_p->descCRCLength), 0))
- {
+ le16_to_cpu(tag_p->descCRC) == udf_crc(bh->b_data + sizeof(tag),
+ le16_to_cpu(tag_p->descCRCLength), 0)) {
return bh;
}
udf_debug("Crc failure block %d: crc = %d, crclen = %d\n",
- block + UDF_SB_SESSION(sb), le16_to_cpu(tag_p->descCRC), le16_to_cpu(tag_p->descCRCLength));
+ block + UDF_SB_SESSION(sb), le16_to_cpu(tag_p->descCRC),
+ le16_to_cpu(tag_p->descCRCLength));
error_out:
brelse(bh);
return NULL;
}
-struct buffer_head *
-udf_read_ptagged(struct super_block *sb, kernel_lb_addr loc, uint32_t offset, uint16_t *ident)
+struct buffer_head *udf_read_ptagged(struct super_block *sb, kernel_lb_addr loc,
+ uint32_t offset, uint16_t * ident)
{
return udf_read_tagged(sb, udf_get_lb_pblock(sb, loc, offset),
- loc.logicalBlockNum + offset, ident);
+ loc.logicalBlockNum + offset, ident);
}
void udf_update_tag(char *data, int length)
@@ -285,13 +266,13 @@ void udf_update_tag(char *data, int length)
tptr->descCRCLength = cpu_to_le16(length);
tptr->descCRC = cpu_to_le16(udf_crc(data + sizeof(tag), length, 0));
- for (i=0; i<16; i++)
+ for (i = 0; i < 16; i++)
if (i != 4)
tptr->tagChecksum += (uint8_t)(data[i]);
}
void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
- uint32_t loc, int length)
+ uint32_t loc, int length)
{
tag *tptr = (tag *)data;
tptr->tagIdent = cpu_to_le16(ident);
diff --git a/fs/udf/namei.c b/fs/udf/namei.c
index 51fe307..bec96a6 100644
--- a/fs/udf/namei.c
+++ b/fs/udf/namei.c
@@ -32,16 +32,18 @@
#include <linux/buffer_head.h>
#include <linux/sched.h>
-static inline int udf_match(int len1, const char *name1, int len2, const char *name2)
+static inline int udf_match(int len1, const char *name1, int len2,
+ const char *name2)
{
if (len1 != len2)
return 0;
+
return !memcmp(name1, name2, len1);
}
int udf_write_fi(struct inode *inode, struct fileIdentDesc *cfi,
- struct fileIdentDesc *sfi, struct udf_fileident_bh *fibh,
- uint8_t *impuse, uint8_t *fileident)
+ struct fileIdentDesc *sfi, struct udf_fileident_bh *fibh,
+ uint8_t * impuse, uint8_t * fileident)
{
uint16_t crclen = fibh->eoffset - fibh->soffset - sizeof(tag);
uint16_t crc;
@@ -59,14 +61,12 @@ int udf_write_fi(struct inode *inode, struct fileIdentDesc *cfi,
offset = fibh->soffset + sizeof(struct fileIdentDesc);
- if (impuse)
- {
- if (adinicb || (offset + liu < 0))
+ if (impuse) {
+ if (adinicb || (offset + liu < 0)) {
memcpy((uint8_t *)sfi->impUse, impuse, liu);
- else if (offset >= 0)
+ } else if (offset >= 0) {
memcpy(fibh->ebh->b_data + offset, impuse, liu);
- else
- {
+ } else {
memcpy((uint8_t *)sfi->impUse, impuse, -offset);
memcpy(fibh->ebh->b_data, impuse - offset, liu + offset);
}
@@ -74,14 +74,12 @@ int udf_write_fi(struct inode *inode, struct fileIdentDesc *cfi,
offset += liu;
- if (fileident)
- {
- if (adinicb || (offset + lfi < 0))
+ if (fileident) {
+ if (adinicb || (offset + lfi < 0)) {
memcpy((uint8_t *)sfi->fileIdent + liu, fileident, lfi);
- else if (offset >= 0)
+ } else if (offset >= 0) {
memcpy(fibh->ebh->b_data + offset, fileident, lfi);
- else
- {
+ } else {
memcpy((uint8_t *)sfi->fileIdent + liu, fileident, -offset);
memcpy(fibh->ebh->b_data, fileident - offset, lfi + offset);
}
@@ -89,53 +87,50 @@ int udf_write_fi(struct inode *inode, struct fileIdentDesc *cfi,
offset += lfi;
- if (adinicb || (offset + padlen < 0))
+ if (adinicb || (offset + padlen < 0)) {
memset((uint8_t *)sfi->padding + liu + lfi, 0x00, padlen);
- else if (offset >= 0)
+ } else if (offset >= 0) {
memset(fibh->ebh->b_data + offset, 0x00, padlen);
- else
- {
+ } else {
memset((uint8_t *)sfi->padding + liu + lfi, 0x00, -offset);
memset(fibh->ebh->b_data, 0x00, padlen + offset);
}
- crc = udf_crc((uint8_t *)cfi + sizeof(tag), sizeof(struct fileIdentDesc) -
- sizeof(tag), 0);
+ crc = udf_crc((uint8_t *)cfi + sizeof(tag),
+ sizeof(struct fileIdentDesc) - sizeof(tag), 0);
- if (fibh->sbh == fibh->ebh)
+ if (fibh->sbh == fibh->ebh) {
crc = udf_crc((uint8_t *)sfi->impUse,
- crclen + sizeof(tag) - sizeof(struct fileIdentDesc), crc);
- else if (sizeof(struct fileIdentDesc) >= -fibh->soffset)
+ crclen + sizeof(tag) - sizeof(struct fileIdentDesc), crc);
+ } else if (sizeof(struct fileIdentDesc) >= -fibh->soffset) {
crc = udf_crc(fibh->ebh->b_data + sizeof(struct fileIdentDesc) + fibh->soffset,
- crclen + sizeof(tag) - sizeof(struct fileIdentDesc), crc);
- else
- {
+ crclen + sizeof(tag) - sizeof(struct fileIdentDesc), crc);
+ } else {
crc = udf_crc((uint8_t *)sfi->impUse,
- -fibh->soffset - sizeof(struct fileIdentDesc), crc);
+ -fibh->soffset - sizeof(struct fileIdentDesc), crc);
crc = udf_crc(fibh->ebh->b_data, fibh->eoffset, crc);
}
cfi->descTag.descCRC = cpu_to_le16(crc);
cfi->descTag.descCRCLength = cpu_to_le16(crclen);
- for (i=0; i<16; i++)
+ for (i = 0; i < 16; i++) {
if (i != 4)
checksum += ((uint8_t *)&cfi->descTag)[i];
+ }
cfi->descTag.tagChecksum = checksum;
- if (adinicb || (sizeof(struct fileIdentDesc) <= -fibh->soffset))
+ if (adinicb || (sizeof(struct fileIdentDesc) <= -fibh->soffset)) {
memcpy((uint8_t *)sfi, (uint8_t *)cfi, sizeof(struct fileIdentDesc));
- else
- {
+ } else {
memcpy((uint8_t *)sfi, (uint8_t *)cfi, -fibh->soffset);
memcpy(fibh->ebh->b_data, (uint8_t *)cfi - fibh->soffset,
- sizeof(struct fileIdentDesc) + fibh->soffset);
+ sizeof(struct fileIdentDesc) + fibh->soffset);
}
- if (adinicb)
+ if (adinicb) {
mark_inode_dirty(inode);
- else
- {
+ } else {
if (fibh->sbh != fibh->ebh)
mark_buffer_dirty_inode(fibh->ebh, inode);
mark_buffer_dirty_inode(fibh->sbh, inode);
@@ -143,12 +138,12 @@ int udf_write_fi(struct inode *inode, struct fileIdentDesc *cfi,
return 0;
}
-static struct fileIdentDesc *
-udf_find_entry(struct inode *dir, struct dentry *dentry,
- struct udf_fileident_bh *fibh,
- struct fileIdentDesc *cfi)
+static struct fileIdentDesc *udf_find_entry(struct inode *dir,
+ struct dentry *dentry,
+ struct udf_fileident_bh *fibh,
+ struct fileIdentDesc *cfi)
{
- struct fileIdentDesc *fi=NULL;
+ struct fileIdentDesc *fi = NULL;
loff_t f_pos;
int block, flen;
char fname[UDF_NAME_LEN];
@@ -159,46 +154,39 @@ udf_find_entry(struct inode *dir, struct dentry *dentry,
kernel_lb_addr eloc;
uint32_t elen;
sector_t offset;
- struct extent_position epos = { NULL, 0, { 0, 0}};
+ struct extent_position epos = {};
size = (udf_ext0_offset(dir) + dir->i_size) >> 2;
f_pos = (udf_ext0_offset(dir) >> 2);
fibh->soffset = fibh->eoffset = (f_pos & ((dir->i_sb->s_blocksize - 1) >> 2)) << 2;
- if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB)
+ if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB) {
fibh->sbh = fibh->ebh = NULL;
- else if (inode_bmap(dir, f_pos >> (dir->i_sb->s_blocksize_bits - 2),
- &epos, &eloc, &elen, &offset) == (EXT_RECORDED_ALLOCATED >> 30))
- {
+ } else if (inode_bmap(dir, f_pos >> (dir->i_sb->s_blocksize_bits - 2),
+ &epos, &eloc, &elen, &offset) == (EXT_RECORDED_ALLOCATED >> 30)) {
block = udf_get_lb_pblock(dir->i_sb, eloc, offset);
- if ((++offset << dir->i_sb->s_blocksize_bits) < elen)
- {
+ if ((++offset << dir->i_sb->s_blocksize_bits) < elen) {
if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_SHORT)
epos.offset -= sizeof(short_ad);
else if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_LONG)
epos.offset -= sizeof(long_ad);
- }
- else
+ } else {
offset = 0;
+ }
- if (!(fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block)))
- {
+ if (!(fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block))) {
brelse(epos.bh);
return NULL;
}
- }
- else
- {
+ } else {
brelse(epos.bh);
return NULL;
}
- while ( (f_pos < size) )
- {
- fi = udf_fileident_read(dir, &f_pos, fibh, cfi, &epos, &eloc, &elen, &offset);
-
- if (!fi)
- {
+ while ((f_pos < size)) {
+ fi = udf_fileident_read(dir, &f_pos, fibh, cfi, &epos, &eloc,
+ &elen, &offset);
+ if (!fi) {
if (fibh->sbh != fibh->ebh)
brelse(fibh->ebh);
brelse(fibh->sbh);
@@ -209,54 +197,48 @@ udf_find_entry(struct inode *dir, struct dentry *dentry,
liu = le16_to_cpu(cfi->lengthOfImpUse);
lfi = cfi->lengthFileIdent;
- if (fibh->sbh == fibh->ebh)
- {
+ if (fibh->sbh == fibh->ebh) {
nameptr = fi->fileIdent + liu;
- }
- else
- {
+ } else {
int poffset; /* Unpaded ending offset */
poffset = fibh->soffset + sizeof(struct fileIdentDesc) + liu + lfi;
- if (poffset >= lfi)
+ if (poffset >= lfi) {
nameptr = (uint8_t *)(fibh->ebh->b_data + poffset - lfi);
- else
- {
+ } else {
nameptr = fname;
memcpy(nameptr, fi->fileIdent + liu, lfi - poffset);
memcpy(nameptr + lfi - poffset, fibh->ebh->b_data, poffset);
}
}
- if ( (cfi->fileCharacteristics & FID_FILE_CHAR_DELETED) != 0 )
- {
- if ( !UDF_QUERY_FLAG(dir->i_sb, UDF_FLAG_UNDELETE) )
+ if ((cfi->fileCharacteristics & FID_FILE_CHAR_DELETED) != 0) {
+ if (!UDF_QUERY_FLAG(dir->i_sb, UDF_FLAG_UNDELETE))
continue;
}
-
- if ( (cfi->fileCharacteristics & FID_FILE_CHAR_HIDDEN) != 0 )
- {
- if ( !UDF_QUERY_FLAG(dir->i_sb, UDF_FLAG_UNHIDE) )
+
+ if ((cfi->fileCharacteristics & FID_FILE_CHAR_HIDDEN) != 0) {
+ if (!UDF_QUERY_FLAG(dir->i_sb, UDF_FLAG_UNHIDE))
continue;
}
if (!lfi)
continue;
- if ((flen = udf_get_filename(dir->i_sb, nameptr, fname, lfi)))
- {
- if (udf_match(flen, fname, dentry->d_name.len, dentry->d_name.name))
- {
+ if ((flen = udf_get_filename(dir->i_sb, nameptr, fname, lfi))) {
+ if (udf_match(flen, fname, dentry->d_name.len, dentry->d_name.name)) {
brelse(epos.bh);
return fi;
}
}
}
+
if (fibh->sbh != fibh->ebh)
brelse(fibh->ebh);
brelse(fibh->sbh);
brelse(epos.bh);
+
return NULL;
}
@@ -293,25 +275,27 @@ udf_find_entry(struct inode *dir, struct dentry *dentry,
* Written, tested, and released.
*/
-static struct dentry *
-udf_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
+static struct dentry *udf_lookup(struct inode *dir, struct dentry *dentry,
+ struct nameidata *nd)
{
struct inode *inode = NULL;
struct fileIdentDesc cfi;
struct udf_fileident_bh fibh;
- if (dentry->d_name.len > UDF_NAME_LEN-2)
+ if (dentry->d_name.len > UDF_NAME_LEN - 2)
return ERR_PTR(-ENAMETOOLONG);
lock_kernel();
#ifdef UDF_RECOVERY
/* temporary shorthand for specifying files by inode number */
- if (!strncmp(dentry->d_name.name, ".B=", 3) )
- {
- kernel_lb_addr lb = { 0, simple_strtoul(dentry->d_name.name+3, NULL, 0) };
+ if (!strncmp(dentry->d_name.name, ".B=", 3)) {
+ kernel_lb_addr lb = {
+ .logicalBlockNum = 0,
+ .partitionReferenceNum = simple_strtoul(dentry->d_name.name + 3,
+ NULL, 0),
+ };
inode = udf_iget(dir->i_sb, lb);
- if (!inode)
- {
+ if (!inode) {
unlock_kernel();
return ERR_PTR(-EACCES);
}
@@ -319,31 +303,30 @@ udf_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
else
#endif /* UDF_RECOVERY */
- if (udf_find_entry(dir, dentry, &fibh, &cfi))
- {
+ if (udf_find_entry(dir, dentry, &fibh, &cfi)) {
if (fibh.sbh != fibh.ebh)
brelse(fibh.ebh);
brelse(fibh.sbh);
inode = udf_iget(dir->i_sb, lelb_to_cpu(cfi.icb.extLocation));
- if ( !inode )
- {
+ if (!inode) {
unlock_kernel();
return ERR_PTR(-EACCES);
}
}
unlock_kernel();
d_add(dentry, inode);
+
return NULL;
}
-static struct fileIdentDesc *
-udf_add_entry(struct inode *dir, struct dentry *dentry,
- struct udf_fileident_bh *fibh,
- struct fileIdentDesc *cfi, int *err)
+static struct fileIdentDesc *udf_add_entry(struct inode *dir,
+ struct dentry *dentry,
+ struct udf_fileident_bh *fibh,
+ struct fileIdentDesc *cfi, int *err)
{
struct super_block *sb;
- struct fileIdentDesc *fi=NULL;
+ struct fileIdentDesc *fi = NULL;
char name[UDF_NAME_LEN], fname[UDF_NAME_LEN];
int namelen;
loff_t f_pos;
@@ -357,50 +340,44 @@ udf_add_entry(struct inode *dir, struct dentry *dentry,
kernel_lb_addr eloc;
uint32_t elen;
sector_t offset;
- struct extent_position epos = { NULL, 0, { 0, 0 }};
+ struct extent_position epos = {};
sb = dir->i_sb;
- if (dentry)
- {
- if (!dentry->d_name.len)
- {
+ if (dentry) {
+ if (!dentry->d_name.len) {
*err = -EINVAL;
return NULL;
}
-
- if ( !(namelen = udf_put_filename(sb, dentry->d_name.name, name, dentry->d_name.len)))
- {
+ if (!(namelen = udf_put_filename(sb, dentry->d_name.name, name,
+ dentry->d_name.len))) {
*err = -ENAMETOOLONG;
return NULL;
}
- }
- else
+ } else {
namelen = 0;
+ }
nfidlen = (sizeof(struct fileIdentDesc) + namelen + 3) & ~3;
f_pos = (udf_ext0_offset(dir) >> 2);
fibh->soffset = fibh->eoffset = (f_pos & ((dir->i_sb->s_blocksize - 1) >> 2)) << 2;
- if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB)
+ if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB) {
fibh->sbh = fibh->ebh = NULL;
- else if (inode_bmap(dir, f_pos >> (dir->i_sb->s_blocksize_bits - 2),
- &epos, &eloc, &elen, &offset) == (EXT_RECORDED_ALLOCATED >> 30))
- {
+ } else if (inode_bmap(dir, f_pos >> (dir->i_sb->s_blocksize_bits - 2),
+ &epos, &eloc, &elen, &offset) == (EXT_RECORDED_ALLOCATED >> 30)) {
block = udf_get_lb_pblock(dir->i_sb, eloc, offset);
- if ((++offset << dir->i_sb->s_blocksize_bits) < elen)
- {
+ if ((++offset << dir->i_sb->s_blocksize_bits) < elen) {
if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_SHORT)
epos.offset -= sizeof(short_ad);
else if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_LONG)
epos.offset -= sizeof(long_ad);
- }
- else
+ } else {
offset = 0;
+ }
- if (!(fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block)))
- {
+ if (!(fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block))) {
brelse(epos.bh);
*err = -EIO;
return NULL;
@@ -408,21 +385,18 @@ udf_add_entry(struct inode *dir, struct dentry *dentry,
block = UDF_I_LOCATION(dir).logicalBlockNum;
- }
- else
- {
+ } else {
block = udf_get_lb_pblock(dir->i_sb, UDF_I_LOCATION(dir), 0);
fibh->sbh = fibh->ebh = NULL;
fibh->soffset = fibh->eoffset = sb->s_blocksize;
goto add;
}
- while ( (f_pos < size) )
- {
- fi = udf_fileident_read(dir, &f_pos, fibh, cfi, &epos, &eloc, &elen, &offset);
+ while ((f_pos < size)) {
+ fi = udf_fileident_read(dir, &f_pos, fibh, cfi, &epos, &eloc,
+ &elen, &offset);
- if (!fi)
- {
+ if (!fi) {
if (fibh->sbh != fibh->ebh)
brelse(fibh->ebh);
brelse(fibh->sbh);
@@ -434,38 +408,33 @@ udf_add_entry(struct inode *dir, struct dentry *dentry,
liu = le16_to_cpu(cfi->lengthOfImpUse);
lfi = cfi->lengthFileIdent;
- if (fibh->sbh == fibh->ebh)
+ if (fibh->sbh == fibh->ebh) {
nameptr = fi->fileIdent + liu;
- else
- {
+ } else {
int poffset; /* Unpaded ending offset */
poffset = fibh->soffset + sizeof(struct fileIdentDesc) + liu + lfi;
- if (poffset >= lfi)
+ if (poffset >= lfi) {
nameptr = (char *)(fibh->ebh->b_data + poffset - lfi);
- else
- {
+ } else {
nameptr = fname;
memcpy(nameptr, fi->fileIdent + liu, lfi - poffset);
memcpy(nameptr + lfi - poffset, fibh->ebh->b_data, poffset);
}
}
- if ( (cfi->fileCharacteristics & FID_FILE_CHAR_DELETED) != 0 )
- {
- if (((sizeof(struct fileIdentDesc) + liu + lfi + 3) & ~3) == nfidlen)
- {
+ if ((cfi->fileCharacteristics & FID_FILE_CHAR_DELETED) != 0) {
+ if (((sizeof(struct fileIdentDesc) + liu + lfi + 3) & ~3) == nfidlen) {
brelse(epos.bh);
cfi->descTag.tagSerialNum = cpu_to_le16(1);
cfi->fileVersionNum = cpu_to_le16(1);
cfi->fileCharacteristics = 0;
cfi->lengthFileIdent = namelen;
cfi->lengthOfImpUse = cpu_to_le16(0);
- if (!udf_write_fi(dir, cfi, fi, fibh, NULL, name))
+ if (!udf_write_fi(dir, cfi, fi, fibh, NULL, name)) {
return fi;
- else
- {
+ } else {
*err = -EIO;
return NULL;
}
@@ -476,8 +445,7 @@ udf_add_entry(struct inode *dir, struct dentry *dentry,
continue;
if ((flen = udf_get_filename(dir->i_sb, nameptr, fname, lfi)) &&
- udf_match(flen, fname, dentry->d_name.len, dentry->d_name.name))
- {
+ udf_match(flen, fname, dentry->d_name.len, dentry->d_name.name)) {
if (fibh->sbh != fibh->ebh)
brelse(fibh->ebh);
brelse(fibh->sbh);
@@ -491,8 +459,7 @@ add:
f_pos += nfidlen;
if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB &&
- sb->s_blocksize - fibh->eoffset < nfidlen)
- {
+ sb->s_blocksize - fibh->eoffset < nfidlen) {
brelse(epos.bh);
epos.bh = NULL;
fibh->soffset -= udf_ext0_offset(dir);
@@ -514,65 +481,54 @@ add:
epos.offset += sizeof(long_ad);
}
- if (sb->s_blocksize - fibh->eoffset >= nfidlen)
- {
+ if (sb->s_blocksize - fibh->eoffset >= nfidlen) {
fibh->soffset = fibh->eoffset;
fibh->eoffset += nfidlen;
- if (fibh->sbh != fibh->ebh)
- {
+ if (fibh->sbh != fibh->ebh) {
brelse(fibh->sbh);
fibh->sbh = fibh->ebh;
}
- if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB)
- {
+ if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB) {
block = UDF_I_LOCATION(dir).logicalBlockNum;
- fi = (struct fileIdentDesc *)(UDF_I_DATA(dir) + fibh->soffset - udf_ext0_offset(dir) + UDF_I_LENEATTR(dir));
- }
- else
- {
+ fi = (struct fileIdentDesc *)(UDF_I_DATA(dir) + fibh->soffset -
+ udf_ext0_offset(dir) +
+ UDF_I_LENEATTR(dir));
+ } else {
block = eloc.logicalBlockNum + ((elen - 1) >>
- dir->i_sb->s_blocksize_bits);
+ dir->i_sb->s_blocksize_bits);
fi = (struct fileIdentDesc *)(fibh->sbh->b_data + fibh->soffset);
}
- }
- else
- {
+ } else {
fibh->soffset = fibh->eoffset - sb->s_blocksize;
fibh->eoffset += nfidlen - sb->s_blocksize;
- if (fibh->sbh != fibh->ebh)
- {
+ if (fibh->sbh != fibh->ebh) {
brelse(fibh->sbh);
fibh->sbh = fibh->ebh;
}
block = eloc.logicalBlockNum + ((elen - 1) >>
- dir->i_sb->s_blocksize_bits);
-
- if (!(fibh->ebh = udf_bread(dir, f_pos >> (dir->i_sb->s_blocksize_bits - 2), 1, err)))
- {
+ dir->i_sb->s_blocksize_bits);
+ fibh->ebh = udf_bread(dir, f_pos >> (dir->i_sb->s_blocksize_bits - 2), 1, err);
+ if (!fibh->ebh) {
brelse(epos.bh);
brelse(fibh->sbh);
return NULL;
}
- if (!(fibh->soffset))
- {
+ if (!fibh->soffset) {
if (udf_next_aext(dir, &epos, &eloc, &elen, 1) ==
- (EXT_RECORDED_ALLOCATED >> 30))
- {
+ (EXT_RECORDED_ALLOCATED >> 30)) {
block = eloc.logicalBlockNum + ((elen - 1) >>
dir->i_sb->s_blocksize_bits);
+ } else {
+ block++;
}
- else
- block ++;
brelse(fibh->sbh);
fibh->sbh = fibh->ebh;
fi = (struct fileIdentDesc *)(fibh->sbh->b_data);
- }
- else
- {
+ } else {
fi = (struct fileIdentDesc *)
(fibh->sbh->b_data + sb->s_blocksize + fibh->soffset);
}
@@ -586,17 +542,14 @@ add:
cfi->fileVersionNum = cpu_to_le16(1);
cfi->lengthFileIdent = namelen;
cfi->lengthOfImpUse = cpu_to_le16(0);
- if (!udf_write_fi(dir, cfi, fi, fibh, NULL, name))
- {
+ if (!udf_write_fi(dir, cfi, fi, fibh, NULL, name)) {
brelse(epos.bh);
dir->i_size += nfidlen;
if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB)
UDF_I_LENALLOC(dir) += nfidlen;
mark_inode_dirty(dir);
return fi;
- }
- else
- {
+ } else {
brelse(epos.bh);
if (fibh->sbh != fibh->ebh)
brelse(fibh->ebh);
@@ -607,15 +560,19 @@ add:
}
static int udf_delete_entry(struct inode *inode, struct fileIdentDesc *fi,
- struct udf_fileident_bh *fibh, struct fileIdentDesc *cfi)
+ struct udf_fileident_bh *fibh,
+ struct fileIdentDesc *cfi)
{
cfi->fileCharacteristics |= FID_FILE_CHAR_DELETED;
+
if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT))
memset(&(cfi->icb), 0x00, sizeof(long_ad));
+
return udf_write_fi(inode, cfi, fi, fibh, NULL, NULL);
}
-static int udf_create(struct inode *dir, struct dentry *dentry, int mode, struct nameidata *nd)
+static int udf_create(struct inode *dir, struct dentry *dentry, int mode,
+ struct nameidata *nd)
{
struct udf_fileident_bh fibh;
struct inode *inode;
@@ -624,8 +581,7 @@ static int udf_create(struct inode *dir, struct dentry *dentry, int mode, struct
lock_kernel();
inode = udf_new_inode(dir, mode, &err);
- if (!inode)
- {
+ if (!inode) {
unlock_kernel();
return err;
}
@@ -639,9 +595,8 @@ static int udf_create(struct inode *dir, struct dentry *dentry, int mode, struct
inode->i_mode = mode;
mark_inode_dirty(inode);
- if (!(fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err)))
- {
- inode->i_nlink --;
+ if (!(fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err))) {
+ inode->i_nlink--;
mark_inode_dirty(inode);
iput(inode);
unlock_kernel();
@@ -652,8 +607,7 @@ static int udf_create(struct inode *dir, struct dentry *dentry, int mode, struct
*(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse =
cpu_to_le32(UDF_I_UNIQUE(inode) & 0x00000000FFFFFFFFUL);
udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL);
- if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB)
- {
+ if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB) {
mark_inode_dirty(dir);
}
if (fibh.sbh != fibh.ebh)
@@ -661,12 +615,14 @@ static int udf_create(struct inode *dir, struct dentry *dentry, int mode, struct
brelse(fibh.sbh);
unlock_kernel();
d_instantiate(dentry, inode);
+
return 0;
}
-static int udf_mknod(struct inode * dir, struct dentry * dentry, int mode, dev_t rdev)
+static int udf_mknod(struct inode *dir, struct dentry *dentry, int mode,
+ dev_t rdev)
{
- struct inode * inode;
+ struct inode *inode;
struct udf_fileident_bh fibh;
struct fileIdentDesc cfi, *fi;
int err;
@@ -682,9 +638,8 @@ static int udf_mknod(struct inode * dir, struct dentry * dentry, int mode, dev_t
inode->i_uid = current->fsuid;
init_special_inode(inode, mode, rdev);
- if (!(fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err)))
- {
- inode->i_nlink --;
+ if (!(fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err))) {
+ inode->i_nlink--;
mark_inode_dirty(inode);
iput(inode);
unlock_kernel();
@@ -695,8 +650,7 @@ static int udf_mknod(struct inode * dir, struct dentry * dentry, int mode, dev_t
*(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse =
cpu_to_le32(UDF_I_UNIQUE(inode) & 0x00000000FFFFFFFFUL);
udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL);
- if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB)
- {
+ if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB) {
mark_inode_dirty(dir);
}
mark_inode_dirty(inode);
@@ -706,21 +660,22 @@ static int udf_mknod(struct inode * dir, struct dentry * dentry, int mode, dev_t
brelse(fibh.sbh);
d_instantiate(dentry, inode);
err = 0;
+
out:
unlock_kernel();
return err;
}
-static int udf_mkdir(struct inode * dir, struct dentry * dentry, int mode)
+static int udf_mkdir(struct inode *dir, struct dentry *dentry, int mode)
{
- struct inode * inode;
+ struct inode *inode;
struct udf_fileident_bh fibh;
struct fileIdentDesc cfi, *fi;
int err;
lock_kernel();
err = -EMLINK;
- if (dir->i_nlink >= (256<<sizeof(dir->i_nlink))-1)
+ if (dir->i_nlink >= (256 << sizeof(dir->i_nlink)) - 1)
goto out;
err = -EIO;
@@ -730,8 +685,7 @@ static int udf_mkdir(struct inode * dir, struct dentry * dentry, int mode)
inode->i_op = &udf_dir_inode_operations;
inode->i_fop = &udf_dir_operations;
- if (!(fi = udf_add_entry(inode, NULL, &fibh, &cfi, &err)))
- {
+ if (!(fi = udf_add_entry(inode, NULL, &fibh, &cfi, &err))) {
inode->i_nlink--;
mark_inode_dirty(inode);
iput(inode);
@@ -750,8 +704,7 @@ static int udf_mkdir(struct inode * dir, struct dentry * dentry, int mode)
inode->i_mode |= S_ISGID;
mark_inode_dirty(inode);
- if (!(fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err)))
- {
+ if (!(fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err))) {
inode->i_nlink = 0;
mark_inode_dirty(inode);
iput(inode);
@@ -770,6 +723,7 @@ static int udf_mkdir(struct inode * dir, struct dentry * dentry, int mode)
brelse(fibh.ebh);
brelse(fibh.sbh);
err = 0;
+
out:
unlock_kernel();
return err;
@@ -785,47 +739,39 @@ static int empty_dir(struct inode *dir)
kernel_lb_addr eloc;
uint32_t elen;
sector_t offset;
- struct extent_position epos = { NULL, 0, { 0, 0}};
+ struct extent_position epos = {};
f_pos = (udf_ext0_offset(dir) >> 2);
fibh.soffset = fibh.eoffset = (f_pos & ((dir->i_sb->s_blocksize - 1) >> 2)) << 2;
- if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB)
+ if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB) {
fibh.sbh = fibh.ebh = NULL;
- else if (inode_bmap(dir, f_pos >> (dir->i_sb->s_blocksize_bits - 2),
- &epos, &eloc, &elen, &offset) == (EXT_RECORDED_ALLOCATED >> 30))
- {
+ } else if (inode_bmap(dir, f_pos >> (dir->i_sb->s_blocksize_bits - 2),
+ &epos, &eloc, &elen, &offset) == (EXT_RECORDED_ALLOCATED >> 30)) {
block = udf_get_lb_pblock(dir->i_sb, eloc, offset);
- if ((++offset << dir->i_sb->s_blocksize_bits) < elen)
- {
+ if ((++offset << dir->i_sb->s_blocksize_bits) < elen) {
if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_SHORT)
epos.offset -= sizeof(short_ad);
else if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_LONG)
epos.offset -= sizeof(long_ad);
- }
- else
+ } else {
offset = 0;
+ }
- if (!(fibh.sbh = fibh.ebh = udf_tread(dir->i_sb, block)))
- {
+ if (!(fibh.sbh = fibh.ebh = udf_tread(dir->i_sb, block))) {
brelse(epos.bh);
return 0;
}
- }
- else
- {
+ } else {
brelse(epos.bh);
return 0;
}
-
- while ( (f_pos < size) )
- {
- fi = udf_fileident_read(dir, &f_pos, &fibh, &cfi, &epos, &eloc, &elen, &offset);
-
- if (!fi)
- {
+ while ((f_pos < size)) {
+ fi = udf_fileident_read(dir, &f_pos, &fibh, &cfi, &epos, &eloc,
+ &elen, &offset);
+ if (!fi) {
if (fibh.sbh != fibh.ebh)
brelse(fibh.ebh);
brelse(fibh.sbh);
@@ -833,8 +779,8 @@ static int empty_dir(struct inode *dir)
return 0;
}
- if (cfi.lengthFileIdent && (cfi.fileCharacteristics & FID_FILE_CHAR_DELETED) == 0)
- {
+ if (cfi.lengthFileIdent &&
+ (cfi.fileCharacteristics & FID_FILE_CHAR_DELETED) == 0) {
if (fibh.sbh != fibh.ebh)
brelse(fibh.ebh);
brelse(fibh.sbh);
@@ -842,17 +788,19 @@ static int empty_dir(struct inode *dir)
return 0;
}
}
+
if (fibh.sbh != fibh.ebh)
brelse(fibh.ebh);
brelse(fibh.sbh);
brelse(epos.bh);
+
return 1;
}
-static int udf_rmdir(struct inode * dir, struct dentry * dentry)
+static int udf_rmdir(struct inode *dir, struct dentry *dentry)
{
int retval;
- struct inode * inode = dentry->d_inode;
+ struct inode *inode = dentry->d_inode;
struct udf_fileident_bh fibh;
struct fileIdentDesc *fi, cfi;
kernel_lb_addr tloc;
@@ -875,8 +823,8 @@ static int udf_rmdir(struct inode * dir, struct dentry * dentry)
goto end_rmdir;
if (inode->i_nlink != 2)
udf_warning(inode->i_sb, "udf_rmdir",
- "empty directory has nlink != 2 (%d)",
- inode->i_nlink);
+ "empty directory has nlink != 2 (%d)",
+ inode->i_nlink);
clear_nlink(inode);
inode->i_size = 0;
inode_dec_link_count(dir);
@@ -887,15 +835,16 @@ end_rmdir:
if (fibh.sbh != fibh.ebh)
brelse(fibh.ebh);
brelse(fibh.sbh);
+
out:
unlock_kernel();
return retval;
}
-static int udf_unlink(struct inode * dir, struct dentry * dentry)
+static int udf_unlink(struct inode *dir, struct dentry *dentry)
{
int retval;
- struct inode * inode = dentry->d_inode;
+ struct inode *inode = dentry->d_inode;
struct udf_fileident_bh fibh;
struct fileIdentDesc *fi;
struct fileIdentDesc cfi;
@@ -912,10 +861,9 @@ static int udf_unlink(struct inode * dir, struct dentry * dentry)
if (udf_get_lb_pblock(dir->i_sb, tloc, 0) != inode->i_ino)
goto end_unlink;
- if (!inode->i_nlink)
- {
+ if (!inode->i_nlink) {
udf_debug("Deleting nonexistent file (%lu), %d\n",
- inode->i_ino, inode->i_nlink);
+ inode->i_ino, inode->i_nlink);
inode->i_nlink = 1;
}
retval = udf_delete_entry(dir, fi, &fibh, &cfi);
@@ -931,18 +879,20 @@ end_unlink:
if (fibh.sbh != fibh.ebh)
brelse(fibh.ebh);
brelse(fibh.sbh);
+
out:
unlock_kernel();
return retval;
}
-static int udf_symlink(struct inode * dir, struct dentry * dentry, const char * symname)
+static int udf_symlink(struct inode *dir, struct dentry *dentry,
+ const char *symname)
{
- struct inode * inode;
+ struct inode *inode;
struct pathComponent *pc;
char *compstart;
struct udf_fileident_bh fibh;
- struct extent_position epos = { NULL, 0, {0, 0}};
+ struct extent_position epos = {};
int eoffset, elen = 0;
struct fileIdentDesc *fi;
struct fileIdentDesc cfi;
@@ -960,14 +910,13 @@ static int udf_symlink(struct inode * dir, struct dentry * dentry, const char *
inode->i_data.a_ops = &udf_symlink_aops;
inode->i_op = &page_symlink_inode_operations;
- if (UDF_I_ALLOCTYPE(inode) != ICBTAG_FLAG_AD_IN_ICB)
- {
+ if (UDF_I_ALLOCTYPE(inode) != ICBTAG_FLAG_AD_IN_ICB) {
kernel_lb_addr eloc;
uint32_t elen;
block = udf_new_block(inode->i_sb, inode,
- UDF_I_LOCATION(inode).partitionReferenceNum,
- UDF_I_LOCATION(inode).logicalBlockNum, &err);
+ UDF_I_LOCATION(inode).partitionReferenceNum,
+ UDF_I_LOCATION(inode).logicalBlockNum, &err);
if (!block)
goto out_no_entry;
epos.block = UDF_I_LOCATION(inode);
@@ -981,7 +930,7 @@ static int udf_symlink(struct inode * dir, struct dentry * dentry, const char *
brelse(epos.bh);
block = udf_get_pblock(inode->i_sb, block,
- UDF_I_LOCATION(inode).partitionReferenceNum, 0);
+ UDF_I_LOCATION(inode).partitionReferenceNum, 0);
epos.bh = udf_tread(inode->i_sb, block);
lock_buffer(epos.bh);
memset(epos.bh->b_data, 0x00, inode->i_sb->s_blocksize);
@@ -989,17 +938,15 @@ static int udf_symlink(struct inode * dir, struct dentry * dentry, const char *
unlock_buffer(epos.bh);
mark_buffer_dirty_inode(epos.bh, inode);
ea = epos.bh->b_data + udf_ext0_offset(inode);
- }
- else
+ } else {
ea = UDF_I_DATA(inode) + UDF_I_LENEATTR(inode);
+ }
eoffset = inode->i_sb->s_blocksize - udf_ext0_offset(inode);
pc = (struct pathComponent *)ea;
- if (*symname == '/')
- {
- do
- {
+ if (*symname == '/') {
+ do {
symname++;
} while (*symname == '/');
@@ -1012,8 +959,7 @@ static int udf_symlink(struct inode * dir, struct dentry * dentry, const char *
err = -ENAMETOOLONG;
- while (*symname)
- {
+ while (*symname) {
if (elen + sizeof(struct pathComponent) > eoffset)
goto out_no_entry;
@@ -1021,25 +967,24 @@ static int udf_symlink(struct inode * dir, struct dentry * dentry, const char *
compstart = (char *)symname;
- do
- {
+ do {
symname++;
} while (*symname && *symname != '/');
pc->componentType = 5;
pc->lengthComponentIdent = 0;
pc->componentFileVersionNum = 0;
- if (compstart[0] == '.')
- {
- if ((symname-compstart) == 1)
+ if (compstart[0] == '.') {
+ if ((symname - compstart) == 1)
pc->componentType = 4;
- else if ((symname-compstart) == 2 && compstart[1] == '.')
+ else if ((symname - compstart) == 2 && compstart[1] == '.')
pc->componentType = 3;
}
- if (pc->componentType == 5)
- {
- if ( !(namelen = udf_put_filename(inode->i_sb, compstart, name, symname-compstart)))
+ if (pc->componentType == 5) {
+ namelen = udf_put_filename(inode->i_sb, compstart, name,
+ symname - compstart);
+ if (!namelen)
goto out_no_entry;
if (elen + sizeof(struct pathComponent) + namelen > eoffset)
@@ -1052,10 +997,8 @@ static int udf_symlink(struct inode * dir, struct dentry * dentry, const char *
elen += sizeof(struct pathComponent) + pc->lengthComponentIdent;
- if (*symname)
- {
- do
- {
+ if (*symname) {
+ do {
symname++;
} while (*symname == '/');
}
@@ -1071,8 +1014,7 @@ static int udf_symlink(struct inode * dir, struct dentry * dentry, const char *
goto out_no_entry;
cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize);
cfi.icb.extLocation = cpu_to_lelb(UDF_I_LOCATION(inode));
- if (UDF_SB_LVIDBH(inode->i_sb))
- {
+ if (UDF_SB_LVIDBH(inode->i_sb)) {
struct logicalVolHeaderDesc *lvhd;
uint64_t uniqueID;
lvhd = (struct logicalVolHeaderDesc *)(UDF_SB_LVID(inode->i_sb)->logicalVolContentsUse);
@@ -1085,8 +1027,7 @@ static int udf_symlink(struct inode * dir, struct dentry * dentry, const char *
mark_buffer_dirty(UDF_SB_LVIDBH(inode->i_sb));
}
udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL);
- if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB)
- {
+ if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB) {
mark_inode_dirty(dir);
}
if (fibh.sbh != fibh.ebh)
@@ -1105,8 +1046,8 @@ out_no_entry:
goto out;
}
-static int udf_link(struct dentry * old_dentry, struct inode * dir,
- struct dentry *dentry)
+static int udf_link(struct dentry *old_dentry, struct inode *dir,
+ struct dentry *dentry)
{
struct inode *inode = old_dentry->d_inode;
struct udf_fileident_bh fibh;
@@ -1114,21 +1055,18 @@ static int udf_link(struct dentry * old_dentry, struct inode * dir,
int err;
lock_kernel();
- if (inode->i_nlink >= (256<<sizeof(inode->i_nlink))-1)
- {
+ if (inode->i_nlink >= (256 << sizeof(inode->i_nlink)) - 1) {
unlock_kernel();
return -EMLINK;
}
- if (!(fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err)))
- {
+ if (!(fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err))) {
unlock_kernel();
return err;
}
cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize);
cfi.icb.extLocation = cpu_to_lelb(UDF_I_LOCATION(inode));
- if (UDF_SB_LVIDBH(inode->i_sb))
- {
+ if (UDF_SB_LVIDBH(inode->i_sb)) {
struct logicalVolHeaderDesc *lvhd;
uint64_t uniqueID;
lvhd = (struct logicalVolHeaderDesc *)(UDF_SB_LVID(inode->i_sb)->logicalVolContentsUse);
@@ -1141,10 +1079,10 @@ static int udf_link(struct dentry * old_dentry, struct inode * dir,
mark_buffer_dirty(UDF_SB_LVIDBH(inode->i_sb));
}
udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL);
- if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB)
- {
+ if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB) {
mark_inode_dirty(dir);
}
+
if (fibh.sbh != fibh.ebh)
brelse(fibh.ebh);
brelse(fibh.sbh);
@@ -1154,17 +1092,18 @@ static int udf_link(struct dentry * old_dentry, struct inode * dir,
atomic_inc(&inode->i_count);
d_instantiate(dentry, inode);
unlock_kernel();
+
return 0;
}
/* Anybody can rename anything with this: the permission checks are left to the
* higher-level routines.
*/
-static int udf_rename (struct inode * old_dir, struct dentry * old_dentry,
- struct inode * new_dir, struct dentry * new_dentry)
+static int udf_rename(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry)
{
- struct inode * old_inode = old_dentry->d_inode;
- struct inode * new_inode = new_dentry->d_inode;
+ struct inode *old_inode = old_dentry->d_inode;
+ struct inode *new_inode = new_dentry->d_inode;
struct udf_fileident_bh ofibh, nfibh;
struct fileIdentDesc *ofi = NULL, *nfi = NULL, *dir_fi = NULL, ocfi, ncfi;
struct buffer_head *dir_bh = NULL;
@@ -1172,49 +1111,41 @@ static int udf_rename (struct inode * old_dir, struct dentry * old_dentry,
kernel_lb_addr tloc;
lock_kernel();
- if ((ofi = udf_find_entry(old_dir, old_dentry, &ofibh, &ocfi)))
- {
+ if ((ofi = udf_find_entry(old_dir, old_dentry, &ofibh, &ocfi))) {
if (ofibh.sbh != ofibh.ebh)
brelse(ofibh.ebh);
brelse(ofibh.sbh);
}
tloc = lelb_to_cpu(ocfi.icb.extLocation);
if (!ofi || udf_get_lb_pblock(old_dir->i_sb, tloc, 0)
- != old_inode->i_ino)
+ != old_inode->i_ino)
goto end_rename;
nfi = udf_find_entry(new_dir, new_dentry, &nfibh, &ncfi);
- if (nfi)
- {
- if (!new_inode)
- {
+ if (nfi) {
+ if (!new_inode) {
if (nfibh.sbh != nfibh.ebh)
brelse(nfibh.ebh);
brelse(nfibh.sbh);
nfi = NULL;
}
}
- if (S_ISDIR(old_inode->i_mode))
- {
+ if (S_ISDIR(old_inode->i_mode)) {
uint32_t offset = udf_ext0_offset(old_inode);
- if (new_inode)
- {
+ if (new_inode) {
retval = -ENOTEMPTY;
if (!empty_dir(new_inode))
goto end_rename;
}
retval = -EIO;
- if (UDF_I_ALLOCTYPE(old_inode) == ICBTAG_FLAG_AD_IN_ICB)
- {
+ if (UDF_I_ALLOCTYPE(old_inode) == ICBTAG_FLAG_AD_IN_ICB) {
dir_fi = udf_get_fileident(UDF_I_DATA(old_inode) -
- (UDF_I_EFE(old_inode) ?
- sizeof(struct extendedFileEntry) :
- sizeof(struct fileEntry)),
- old_inode->i_sb->s_blocksize, &offset);
- }
- else
- {
+ (UDF_I_EFE(old_inode) ?
+ sizeof(struct extendedFileEntry) :
+ sizeof(struct fileEntry)),
+ old_inode->i_sb->s_blocksize, &offset);
+ } else {
dir_bh = udf_bread(old_inode, 0, 0, &retval);
if (!dir_bh)
goto end_rename;
@@ -1223,16 +1154,14 @@ static int udf_rename (struct inode * old_dir, struct dentry * old_dentry,
if (!dir_fi)
goto end_rename;
tloc = lelb_to_cpu(dir_fi->icb.extLocation);
- if (udf_get_lb_pblock(old_inode->i_sb, tloc, 0)
- != old_dir->i_ino)
+ if (udf_get_lb_pblock(old_inode->i_sb, tloc, 0) != old_dir->i_ino)
goto end_rename;
retval = -EMLINK;
- if (!new_inode && new_dir->i_nlink >= (256<<sizeof(new_dir->i_nlink))-1)
+ if (!new_inode && new_dir->i_nlink >= (256 << sizeof(new_dir->i_nlink)) - 1)
goto end_rename;
}
- if (!nfi)
- {
+ if (!nfi) {
nfi = udf_add_entry(new_dir, new_dentry, &nfibh, &ncfi, &retval);
if (!nfi)
goto end_rename;
@@ -1257,39 +1186,32 @@ static int udf_rename (struct inode * old_dir, struct dentry * old_dentry,
ofi = udf_find_entry(old_dir, old_dentry, &ofibh, &ocfi);
udf_delete_entry(old_dir, ofi, &ofibh, &ocfi);
- if (new_inode)
- {
+ if (new_inode) {
new_inode->i_ctime = current_fs_time(new_inode->i_sb);
inode_dec_link_count(new_inode);
}
old_dir->i_ctime = old_dir->i_mtime = current_fs_time(old_dir->i_sb);
mark_inode_dirty(old_dir);
- if (dir_fi)
- {
+ if (dir_fi) {
dir_fi->icb.extLocation = cpu_to_lelb(UDF_I_LOCATION(new_dir));
udf_update_tag((char *)dir_fi, (sizeof(struct fileIdentDesc) +
- le16_to_cpu(dir_fi->lengthOfImpUse) + 3) & ~3);
- if (UDF_I_ALLOCTYPE(old_inode) == ICBTAG_FLAG_AD_IN_ICB)
- {
+ le16_to_cpu(dir_fi->lengthOfImpUse) + 3) & ~3);
+ if (UDF_I_ALLOCTYPE(old_inode) == ICBTAG_FLAG_AD_IN_ICB) {
mark_inode_dirty(old_inode);
- }
- else
+ } else {
mark_buffer_dirty_inode(dir_bh, old_inode);
+ }
inode_dec_link_count(old_dir);
- if (new_inode)
- {
+ if (new_inode) {
inode_dec_link_count(new_inode);
- }
- else
- {
+ } else {
inc_nlink(new_dir);
mark_inode_dirty(new_dir);
}
}
- if (ofi)
- {
+ if (ofi) {
if (ofibh.sbh != ofibh.ebh)
brelse(ofibh.ebh);
brelse(ofibh.sbh);
@@ -1299,13 +1221,13 @@ static int udf_rename (struct inode * old_dir, struct dentry * old_dentry,
end_rename:
brelse(dir_bh);
- if (nfi)
- {
+ if (nfi) {
if (nfibh.sbh != nfibh.ebh)
brelse(nfibh.ebh);
brelse(nfibh.sbh);
}
unlock_kernel();
+
return retval;
}
diff --git a/fs/udf/osta_udf.h b/fs/udf/osta_udf.h
index e82aae6..65ff479 100644
--- a/fs/udf/osta_udf.h
+++ b/fs/udf/osta_udf.h
@@ -65,30 +65,26 @@
#define IS_DF_HARD_WRITE_PROTECT 0x01
#define IS_DF_SOFT_WRITE_PROTECT 0x02
-struct UDFIdentSuffix
-{
+struct UDFIdentSuffix {
__le16 UDFRevision;
uint8_t OSClass;
uint8_t OSIdentifier;
uint8_t reserved[4];
} __attribute__ ((packed));
-struct impIdentSuffix
-{
+struct impIdentSuffix {
uint8_t OSClass;
uint8_t OSIdentifier;
uint8_t reserved[6];
} __attribute__ ((packed));
-struct appIdentSuffix
-{
+struct appIdentSuffix {
uint8_t impUse[8];
} __attribute__ ((packed));
/* Logical Volume Integrity Descriptor (UDF 2.50 2.2.6) */
/* Implementation Use (UDF 2.50 2.2.6.4) */
-struct logicalVolIntegrityDescImpUse
-{
+struct logicalVolIntegrityDescImpUse {
regid impIdent;
__le32 numFiles;
__le32 numDirs;
@@ -100,8 +96,7 @@ struct logicalVolIntegrityDescImpUse
/* Implementation Use Volume Descriptor (UDF 2.50 2.2.7) */
/* Implementation Use (UDF 2.50 2.2.7.2) */
-struct impUseVolDescImpUse
-{
+struct impUseVolDescImpUse {
charspec LVICharset;
dstring logicalVolIdent[128];
dstring LVInfo1[36];
@@ -111,8 +106,7 @@ struct impUseVolDescImpUse
uint8_t impUse[128];
} __attribute__ ((packed));
-struct udfPartitionMap2
-{
+struct udfPartitionMap2 {
uint8_t partitionMapType;
uint8_t partitionMapLength;
uint8_t reserved1[2];
@@ -122,8 +116,7 @@ struct udfPartitionMap2
} __attribute__ ((packed));
/* Virtual Partition Map (UDF 2.50 2.2.8) */
-struct virtualPartitionMap
-{
+struct virtualPartitionMap {
uint8_t partitionMapType;
uint8_t partitionMapLength;
uint8_t reserved1[2];
@@ -134,24 +127,22 @@ struct virtualPartitionMap
} __attribute__ ((packed));
/* Sparable Partition Map (UDF 2.50 2.2.9) */
-struct sparablePartitionMap
-{
- uint8_t partitionMapType;
- uint8_t partitionMapLength;
- uint8_t reserved1[2];
- regid partIdent;
- __le16 volSeqNum;
- __le16 partitionNum;
- __le16 packetLength;
- uint8_t numSparingTables;
- uint8_t reserved2[1];
- __le32 sizeSparingTable;
- __le32 locSparingTable[4];
+struct sparablePartitionMap {
+ uint8_t partitionMapType;
+ uint8_t partitionMapLength;
+ uint8_t reserved1[2];
+ regid partIdent;
+ __le16 volSeqNum;
+ __le16 partitionNum;
+ __le16 packetLength;
+ uint8_t numSparingTables;
+ uint8_t reserved2[1];
+ __le32 sizeSparingTable;
+ __le32 locSparingTable[4];
} __attribute__ ((packed));
/* Metadata Partition Map (UDF 2.4.0 2.2.10) */
-struct metadataPartitionMap
-{
+struct metadataPartitionMap {
uint8_t partitionMapType;
uint8_t partitionMapLength;
uint8_t reserved1[2];
@@ -168,18 +159,16 @@ struct metadataPartitionMap
} __attribute__ ((packed));
/* Virtual Allocation Table (UDF 1.5 2.2.10) */
-struct virtualAllocationTable15
-{
+struct virtualAllocationTable15 {
__le32 VirtualSector[0];
regid vatIdent;
__le32 previousVATICBLoc;
-} __attribute__ ((packed));
+} __attribute__ ((packed));
#define ICBTAG_FILE_TYPE_VAT15 0x00U
/* Virtual Allocation Table (UDF 2.50 2.2.11) */
-struct virtualAllocationTable20
-{
+struct virtualAllocationTable20 {
__le16 lengthHeader;
__le16 lengthImpUse;
dstring logicalVolIdent[128];
@@ -197,14 +186,12 @@ struct virtualAllocationTable20
#define ICBTAG_FILE_TYPE_VAT20 0xF8U
/* Sparing Table (UDF 2.50 2.2.12) */
-struct sparingEntry
-{
+struct sparingEntry {
__le32 origLocation;
__le32 mappedLocation;
} __attribute__ ((packed));
-struct sparingTable
-{
+struct sparingTable {
tag descTag;
regid sparingIdent;
__le16 reallocationTableLen;
@@ -220,8 +207,7 @@ struct sparingTable
#define ICBTAG_FILE_TYPE_BITMAP 0xFC
/* struct long_ad ICB - ADImpUse (UDF 2.50 2.2.4.3) */
-struct allocDescImpUse
-{
+struct allocDescImpUse {
__le16 flags;
uint8_t impUse[4];
} __attribute__ ((packed));
@@ -233,15 +219,13 @@ struct allocDescImpUse
/* Implementation Use Extended Attribute (UDF 2.50 3.3.4.5) */
/* FreeEASpace (UDF 2.50 3.3.4.5.1.1) */
-struct freeEaSpace
-{
+struct freeEaSpace {
__le16 headerChecksum;
uint8_t freeEASpace[0];
} __attribute__ ((packed));
/* DVD Copyright Management Information (UDF 2.50 3.3.4.5.1.2) */
-struct DVDCopyrightImpUse
-{
+struct DVDCopyrightImpUse {
__le16 headerChecksum;
uint8_t CGMSInfo;
uint8_t dataType;
@@ -250,8 +234,7 @@ struct DVDCopyrightImpUse
/* Application Use Extended Attribute (UDF 2.50 3.3.4.6) */
/* FreeAppEASpace (UDF 2.50 3.3.4.6.1) */
-struct freeAppEASpace
-{
+struct freeAppEASpace {
__le16 headerChecksum;
uint8_t freeEASpace[0];
} __attribute__ ((packed));
diff --git a/fs/udf/partition.c b/fs/udf/partition.c
index 467a261..aaab24c 100644
--- a/fs/udf/partition.c
+++ b/fs/udf/partition.c
@@ -14,7 +14,7 @@
*
* HISTORY
*
- * 12/06/98 blf Created file.
+ * 12/06/98 blf Created file.
*
*/
@@ -28,12 +28,12 @@
#include <linux/slab.h>
#include <linux/buffer_head.h>
-inline uint32_t udf_get_pblock(struct super_block *sb, uint32_t block, uint16_t partition, uint32_t offset)
+inline uint32_t udf_get_pblock(struct super_block *sb, uint32_t block,
+ uint16_t partition, uint32_t offset)
{
- if (partition >= UDF_SB_NUMPARTS(sb))
- {
+ if (partition >= UDF_SB_NUMPARTS(sb)) {
udf_debug("block=%d, partition=%d, offset=%d: invalid partition\n",
- block, partition, offset);
+ block, partition, offset);
return 0xFFFFFFFF;
}
if (UDF_SB_PARTFUNC(sb, partition))
@@ -42,7 +42,8 @@ inline uint32_t udf_get_pblock(struct super_block *sb, uint32_t block, uint16_t
return UDF_SB_PARTROOT(sb, partition) + block + offset;
}
-uint32_t udf_get_pblock_virt15(struct super_block *sb, uint32_t block, uint16_t partition, uint32_t offset)
+uint32_t udf_get_pblock_virt15(struct super_block *sb, uint32_t block,
+ uint16_t partition, uint32_t offset)
{
struct buffer_head *bh = NULL;
uint32_t newblock;
@@ -51,31 +52,26 @@ uint32_t udf_get_pblock_virt15(struct super_block *sb, uint32_t block, uint16_t
index = (sb->s_blocksize - UDF_SB_TYPEVIRT(sb,partition).s_start_offset) / sizeof(uint32_t);
- if (block > UDF_SB_TYPEVIRT(sb,partition).s_num_entries)
- {
+ if (block > UDF_SB_TYPEVIRT(sb,partition).s_num_entries) {
udf_debug("Trying to access block beyond end of VAT (%d max %d)\n",
- block, UDF_SB_TYPEVIRT(sb,partition).s_num_entries);
+ block, UDF_SB_TYPEVIRT(sb,partition).s_num_entries);
return 0xFFFFFFFF;
}
- if (block >= index)
- {
+ if (block >= index) {
block -= index;
newblock = 1 + (block / (sb->s_blocksize / sizeof(uint32_t)));
index = block % (sb->s_blocksize / sizeof(uint32_t));
- }
- else
- {
+ } else {
newblock = 0;
index = UDF_SB_TYPEVIRT(sb,partition).s_start_offset / sizeof(uint32_t) + block;
}
loc = udf_block_map(UDF_SB_VAT(sb), newblock);
- if (!(bh = sb_bread(sb, loc)))
- {
+ if (!(bh = sb_bread(sb, loc))) {
udf_debug("get_pblock(UDF_VIRTUAL_MAP:%p,%d,%d) VAT: %d[%d]\n",
- sb, block, partition, loc, index);
+ sb, block, partition, loc, index);
return 0xFFFFFFFF;
}
@@ -83,50 +79,49 @@ uint32_t udf_get_pblock_virt15(struct super_block *sb, uint32_t block, uint16_t
brelse(bh);
- if (UDF_I_LOCATION(UDF_SB_VAT(sb)).partitionReferenceNum == partition)
- {
+ if (UDF_I_LOCATION(UDF_SB_VAT(sb)).partitionReferenceNum == partition) {
udf_debug("recursive call to udf_get_pblock!\n");
return 0xFFFFFFFF;
}
- return udf_get_pblock(sb, loc, UDF_I_LOCATION(UDF_SB_VAT(sb)).partitionReferenceNum, offset);
+ return udf_get_pblock(sb, loc,
+ UDF_I_LOCATION(UDF_SB_VAT(sb)).partitionReferenceNum,
+ offset);
}
-inline uint32_t udf_get_pblock_virt20(struct super_block *sb, uint32_t block, uint16_t partition, uint32_t offset)
+inline uint32_t udf_get_pblock_virt20(struct super_block * sb, uint32_t block,
+ uint16_t partition, uint32_t offset)
{
return udf_get_pblock_virt15(sb, block, partition, offset);
}
-uint32_t udf_get_pblock_spar15(struct super_block *sb, uint32_t block, uint16_t partition, uint32_t offset)
+uint32_t udf_get_pblock_spar15(struct super_block * sb, uint32_t block,
+ uint16_t partition, uint32_t offset)
{
int i;
struct sparingTable *st = NULL;
uint32_t packet = (block + offset) & ~(UDF_SB_TYPESPAR(sb,partition).s_packet_len - 1);
- for (i=0; i<4; i++)
- {
- if (UDF_SB_TYPESPAR(sb,partition).s_spar_map[i] != NULL)
- {
+ for (i = 0; i < 4; i++) {
+ if (UDF_SB_TYPESPAR(sb,partition).s_spar_map[i] != NULL) {
st = (struct sparingTable *)UDF_SB_TYPESPAR(sb,partition).s_spar_map[i]->b_data;
break;
}
}
- if (st)
- {
- for (i=0; i<le16_to_cpu(st->reallocationTableLen); i++)
- {
- if (le32_to_cpu(st->mapEntry[i].origLocation) >= 0xFFFFFFF0)
+ if (st) {
+ for (i = 0; i < le16_to_cpu(st->reallocationTableLen); i++) {
+ if (le32_to_cpu(st->mapEntry[i].origLocation) >= 0xFFFFFFF0) {
break;
- else if (le32_to_cpu(st->mapEntry[i].origLocation) == packet)
- {
+ } else if (le32_to_cpu(st->mapEntry[i].origLocation) == packet) {
return le32_to_cpu(st->mapEntry[i].mappedLocation) +
((block + offset) & (UDF_SB_TYPESPAR(sb,partition).s_packet_len - 1));
- }
- else if (le32_to_cpu(st->mapEntry[i].origLocation) > packet)
+ } else if (le32_to_cpu(st->mapEntry[i].origLocation) > packet) {
break;
+ }
}
}
+
return UDF_SB_PARTROOT(sb,partition) + block + offset;
}
@@ -138,18 +133,14 @@ int udf_relocate_blocks(struct super_block *sb, long old_block, long *new_block)
uint32_t packet;
int i, j, k, l;
- for (i=0; i<UDF_SB_NUMPARTS(sb); i++)
- {
+ for (i = 0; i < UDF_SB_NUMPARTS(sb); i++) {
if (old_block > UDF_SB_PARTROOT(sb,i) &&
- old_block < UDF_SB_PARTROOT(sb,i) + UDF_SB_PARTLEN(sb,i))
- {
+ old_block < UDF_SB_PARTROOT(sb,i) + UDF_SB_PARTLEN(sb,i)) {
sdata = &UDF_SB_TYPESPAR(sb,i);
packet = (old_block - UDF_SB_PARTROOT(sb,i)) & ~(sdata->s_packet_len - 1);
- for (j=0; j<4; j++)
- {
- if (UDF_SB_TYPESPAR(sb,i).s_spar_map[j] != NULL)
- {
+ for (j = 0; j < 4; j++) {
+ if (UDF_SB_TYPESPAR(sb,i).s_spar_map[j] != NULL) {
st = (struct sparingTable *)sdata->s_spar_map[j]->b_data;
break;
}
@@ -158,14 +149,10 @@ int udf_relocate_blocks(struct super_block *sb, long old_block, long *new_block)
if (!st)
return 1;
- for (k=0; k<le16_to_cpu(st->reallocationTableLen); k++)
- {
- if (le32_to_cpu(st->mapEntry[k].origLocation) == 0xFFFFFFFF)
- {
- for (; j<4; j++)
- {
- if (sdata->s_spar_map[j])
- {
+ for (k = 0; k < le16_to_cpu(st->reallocationTableLen); k++) {
+ if (le32_to_cpu(st->mapEntry[k].origLocation) == 0xFFFFFFFF) {
+ for (; j < 4; j++) {
+ if (sdata->s_spar_map[j]) {
st = (struct sparingTable *)sdata->s_spar_map[j]->b_data;
st->mapEntry[k].origLocation = cpu_to_le32(packet);
udf_update_tag((char *)st, sizeof(struct sparingTable) + le16_to_cpu(st->reallocationTableLen) * sizeof(struct sparingEntry));
@@ -175,28 +162,23 @@ int udf_relocate_blocks(struct super_block *sb, long old_block, long *new_block)
*new_block = le32_to_cpu(st->mapEntry[k].mappedLocation) +
((old_block - UDF_SB_PARTROOT(sb,i)) & (sdata->s_packet_len - 1));
return 0;
- }
- else if (le32_to_cpu(st->mapEntry[k].origLocation) == packet)
- {
+ } else if (le32_to_cpu(st->mapEntry[k].origLocation) == packet) {
*new_block = le32_to_cpu(st->mapEntry[k].mappedLocation) +
((old_block - UDF_SB_PARTROOT(sb,i)) & (sdata->s_packet_len - 1));
return 0;
- }
- else if (le32_to_cpu(st->mapEntry[k].origLocation) > packet)
+ } else if (le32_to_cpu(st->mapEntry[k].origLocation) > packet) {
break;
+ }
}
- for (l=k; l<le16_to_cpu(st->reallocationTableLen); l++)
- {
- if (le32_to_cpu(st->mapEntry[l].origLocation) == 0xFFFFFFFF)
- {
- for (; j<4; j++)
- {
- if (sdata->s_spar_map[j])
- {
+
+ for (l = k; l < le16_to_cpu(st->reallocationTableLen); l++) {
+ if (le32_to_cpu(st->mapEntry[l].origLocation) == 0xFFFFFFFF) {
+ for (; j < 4; j++) {
+ if (sdata->s_spar_map[j]) {
st = (struct sparingTable *)sdata->s_spar_map[j]->b_data;
mapEntry = st->mapEntry[l];
mapEntry.origLocation = cpu_to_le32(packet);
- memmove(&st->mapEntry[k+1], &st->mapEntry[k], (l-k)*sizeof(struct sparingEntry));
+ memmove(&st->mapEntry[k + 1], &st->mapEntry[k], (l - k) * sizeof(struct sparingEntry));
st->mapEntry[k] = mapEntry;
udf_update_tag((char *)st, sizeof(struct sparingTable) + le16_to_cpu(st->reallocationTableLen) * sizeof(struct sparingEntry));
mark_buffer_dirty(sdata->s_spar_map[j]);
@@ -207,11 +189,12 @@ int udf_relocate_blocks(struct super_block *sb, long old_block, long *new_block)
return 0;
}
}
+
return 1;
- }
+ } /* if old_block */
}
- if (i == UDF_SB_NUMPARTS(sb))
- {
+
+ if (i == UDF_SB_NUMPARTS(sb)) {
/* outside of partitions */
/* for now, fail =) */
return 1;
diff --git a/fs/udf/super.c b/fs/udf/super.c
index d6a504f..c68a6e7 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -38,7 +38,7 @@
* 12/20/98 find the free space bitmap (if it exists)
*/
-#include "udfdecl.h"
+#include "udfdecl.h"
#include <linux/blkdev.h>
#include <linux/slab.h>
@@ -80,13 +80,16 @@ static int udf_remount_fs(struct super_block *, int *, char *);
static int udf_check_valid(struct super_block *, int, int);
static int udf_vrs(struct super_block *sb, int silent);
static int udf_load_partition(struct super_block *, kernel_lb_addr *);
-static int udf_load_logicalvol(struct super_block *, struct buffer_head *, kernel_lb_addr *);
+static int udf_load_logicalvol(struct super_block *, struct buffer_head *,
+ kernel_lb_addr *);
static void udf_load_logicalvolint(struct super_block *, kernel_extent_ad);
static void udf_find_anchor(struct super_block *);
-static int udf_find_fileset(struct super_block *, kernel_lb_addr *, kernel_lb_addr *);
+static int udf_find_fileset(struct super_block *, kernel_lb_addr *,
+ kernel_lb_addr *);
static void udf_load_pvoldesc(struct super_block *, struct buffer_head *);
-static void udf_load_fileset(struct super_block *, struct buffer_head *, kernel_lb_addr *);
-static void udf_load_partdesc(struct super_block *, struct buffer_head *);
+static void udf_load_fileset(struct super_block *, struct buffer_head *,
+ kernel_lb_addr *);
+static int udf_load_partdesc(struct super_block *, struct buffer_head *);
static void udf_open_lvid(struct super_block *);
static void udf_close_lvid(struct super_block *);
static unsigned int udf_count_free(struct super_block *);
@@ -94,7 +97,8 @@ static int udf_statfs(struct dentry *, struct kstatfs *);
/* UDF filesystem type */
static int udf_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data, struct vfsmount *mnt)
+ int flags, const char *dev_name, void *data,
+ struct vfsmount *mnt)
{
return get_sb_bdev(fs_type, flags, dev_name, data, udf_fill_super, mnt);
}
@@ -107,7 +111,7 @@ static struct file_system_type udf_fstype = {
.fs_flags = FS_REQUIRES_DEV,
};
-static struct kmem_cache * udf_inode_cachep;
+static struct kmem_cache *udf_inode_cachep;
static struct inode *udf_alloc_inode(struct super_block *sb)
{
@@ -130,9 +134,9 @@ static void udf_destroy_inode(struct inode *inode)
kmem_cache_free(udf_inode_cachep, UDF_I(inode));
}
-static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
+static void init_once(void *foo, struct kmem_cache *cachep, unsigned long flags)
{
- struct udf_inode_info *ei = (struct udf_inode_info *) foo;
+ struct udf_inode_info *ei = (struct udf_inode_info *)foo;
ei->i_ext.i_data = NULL;
inode_init_once(&ei->vfs_inode);
@@ -142,10 +146,10 @@ static int init_inodecache(void)
{
udf_inode_cachep = kmem_cache_create("udf_inode_cache",
sizeof(struct udf_inode_info),
- 0, (SLAB_RECLAIM_ACCOUNT|
- SLAB_MEM_SPREAD),
- init_once, NULL);
- if (udf_inode_cachep == NULL)
+ 0, (SLAB_RECLAIM_ACCOUNT |
+ SLAB_MEM_SPREAD),
+ init_once);
+ if (!udf_inode_cachep)
return -ENOMEM;
return 0;
}
@@ -157,19 +161,18 @@ static void destroy_inodecache(void)
/* Superblock operations */
static const struct super_operations udf_sb_ops = {
- .alloc_inode = udf_alloc_inode,
- .destroy_inode = udf_destroy_inode,
- .write_inode = udf_write_inode,
- .delete_inode = udf_delete_inode,
- .clear_inode = udf_clear_inode,
- .put_super = udf_put_super,
- .write_super = udf_write_super,
- .statfs = udf_statfs,
- .remount_fs = udf_remount_fs,
+ .alloc_inode = udf_alloc_inode,
+ .destroy_inode = udf_destroy_inode,
+ .write_inode = udf_write_inode,
+ .delete_inode = udf_delete_inode,
+ .clear_inode = udf_clear_inode,
+ .put_super = udf_put_super,
+ .write_super = udf_write_super,
+ .statfs = udf_statfs,
+ .remount_fs = udf_remount_fs,
};
-struct udf_options
-{
+struct udf_options {
unsigned char novrs;
unsigned int blocksize;
unsigned int session;
@@ -189,15 +192,19 @@ struct udf_options
static int __init init_udf_fs(void)
{
int err;
+
err = init_inodecache();
if (err)
goto out1;
err = register_filesystem(&udf_fstype);
if (err)
goto out;
+
return 0;
+
out:
destroy_inodecache();
+
out1:
return err;
}
@@ -235,7 +242,7 @@ module_exit(exit_udf_fs)
*
* The remaining are for debugging and disaster recovery:
*
- * novrs Skip volume sequence recognition
+ * novrs Skip volume sequence recognition
*
* The following expect a offset from 0.
*
@@ -275,36 +282,35 @@ enum {
};
static match_table_t tokens = {
- {Opt_novrs, "novrs"},
- {Opt_nostrict, "nostrict"},
- {Opt_bs, "bs=%u"},
- {Opt_unhide, "unhide"},
- {Opt_undelete, "undelete"},
- {Opt_noadinicb, "noadinicb"},
- {Opt_adinicb, "adinicb"},
- {Opt_shortad, "shortad"},
- {Opt_longad, "longad"},
- {Opt_uforget, "uid=forget"},
- {Opt_uignore, "uid=ignore"},
- {Opt_gforget, "gid=forget"},
- {Opt_gignore, "gid=ignore"},
- {Opt_gid, "gid=%u"},
- {Opt_uid, "uid=%u"},
- {Opt_umask, "umask=%o"},
- {Opt_session, "session=%u"},
- {Opt_lastblock, "lastblock=%u"},
- {Opt_anchor, "anchor=%u"},
- {Opt_volume, "volume=%u"},
- {Opt_partition, "partition=%u"},
- {Opt_fileset, "fileset=%u"},
- {Opt_rootdir, "rootdir=%u"},
- {Opt_utf8, "utf8"},
- {Opt_iocharset, "iocharset=%s"},
- {Opt_err, NULL}
+ {Opt_novrs, "novrs"},
+ {Opt_nostrict, "nostrict"},
+ {Opt_bs, "bs=%u"},
+ {Opt_unhide, "unhide"},
+ {Opt_undelete, "undelete"},
+ {Opt_noadinicb, "noadinicb"},
+ {Opt_adinicb, "adinicb"},
+ {Opt_shortad, "shortad"},
+ {Opt_longad, "longad"},
+ {Opt_uforget, "uid=forget"},
+ {Opt_uignore, "uid=ignore"},
+ {Opt_gforget, "gid=forget"},
+ {Opt_gignore, "gid=ignore"},
+ {Opt_gid, "gid=%u"},
+ {Opt_uid, "uid=%u"},
+ {Opt_umask, "umask=%o"},
+ {Opt_session, "session=%u"},
+ {Opt_lastblock, "lastblock=%u"},
+ {Opt_anchor, "anchor=%u"},
+ {Opt_volume, "volume=%u"},
+ {Opt_partition, "partition=%u"},
+ {Opt_fileset, "fileset=%u"},
+ {Opt_rootdir, "rootdir=%u"},
+ {Opt_utf8, "utf8"},
+ {Opt_iocharset, "iocharset=%s"},
+ {Opt_err, NULL}
};
-static int
-udf_parse_options(char *options, struct udf_options *uopt)
+static int udf_parse_options(char *options, struct udf_options *uopt)
{
char *p;
int option;
@@ -323,145 +329,145 @@ udf_parse_options(char *options, struct udf_options *uopt)
if (!options)
return 1;
- while ((p = strsep(&options, ",")) != NULL)
- {
+ while ((p = strsep(&options, ",")) != NULL) {
substring_t args[MAX_OPT_ARGS];
int token;
if (!*p)
continue;
token = match_token(p, tokens, args);
- switch (token)
- {
- case Opt_novrs:
- uopt->novrs = 1;
- case Opt_bs:
- if (match_int(&args[0], &option))
- return 0;
- uopt->blocksize = option;
- break;
- case Opt_unhide:
- uopt->flags |= (1 << UDF_FLAG_UNHIDE);
- break;
- case Opt_undelete:
- uopt->flags |= (1 << UDF_FLAG_UNDELETE);
- break;
- case Opt_noadinicb:
- uopt->flags &= ~(1 << UDF_FLAG_USE_AD_IN_ICB);
- break;
- case Opt_adinicb:
- uopt->flags |= (1 << UDF_FLAG_USE_AD_IN_ICB);
- break;
- case Opt_shortad:
- uopt->flags |= (1 << UDF_FLAG_USE_SHORT_AD);
- break;
- case Opt_longad:
- uopt->flags &= ~(1 << UDF_FLAG_USE_SHORT_AD);
- break;
- case Opt_gid:
- if (match_int(args, &option))
- return 0;
- uopt->gid = option;
- break;
- case Opt_uid:
- if (match_int(args, &option))
- return 0;
- uopt->uid = option;
- break;
- case Opt_umask:
- if (match_octal(args, &option))
- return 0;
- uopt->umask = option;
- break;
- case Opt_nostrict:
- uopt->flags &= ~(1 << UDF_FLAG_STRICT);
- break;
- case Opt_session:
- if (match_int(args, &option))
- return 0;
- uopt->session = option;
- break;
- case Opt_lastblock:
- if (match_int(args, &option))
- return 0;
- uopt->lastblock = option;
- break;
- case Opt_anchor:
- if (match_int(args, &option))
- return 0;
- uopt->anchor = option;
- break;
- case Opt_volume:
- if (match_int(args, &option))
- return 0;
- uopt->volume = option;
- break;
- case Opt_partition:
- if (match_int(args, &option))
- return 0;
- uopt->partition = option;
- break;
- case Opt_fileset:
- if (match_int(args, &option))
- return 0;
- uopt->fileset = option;
- break;
- case Opt_rootdir:
- if (match_int(args, &option))
- return 0;
- uopt->rootdir = option;
- break;
- case Opt_utf8:
- uopt->flags |= (1 << UDF_FLAG_UTF8);
- break;
+ switch (token) {
+ case Opt_novrs:
+ uopt->novrs = 1;
+ case Opt_bs:
+ if (match_int(&args[0], &option))
+ return 0;
+ uopt->blocksize = option;
+ break;
+ case Opt_unhide:
+ uopt->flags |= (1 << UDF_FLAG_UNHIDE);
+ break;
+ case Opt_undelete:
+ uopt->flags |= (1 << UDF_FLAG_UNDELETE);
+ break;
+ case Opt_noadinicb:
+ uopt->flags &= ~(1 << UDF_FLAG_USE_AD_IN_ICB);
+ break;
+ case Opt_adinicb:
+ uopt->flags |= (1 << UDF_FLAG_USE_AD_IN_ICB);
+ break;
+ case Opt_shortad:
+ uopt->flags |= (1 << UDF_FLAG_USE_SHORT_AD);
+ break;
+ case Opt_longad:
+ uopt->flags &= ~(1 << UDF_FLAG_USE_SHORT_AD);
+ break;
+ case Opt_gid:
+ if (match_int(args, &option))
+ return 0;
+ uopt->gid = option;
+ uopt->flags |= (1 << UDF_FLAG_GID_SET);
+ break;
+ case Opt_uid:
+ if (match_int(args, &option))
+ return 0;
+ uopt->uid = option;
+ uopt->flags |= (1 << UDF_FLAG_UID_SET);
+ break;
+ case Opt_umask:
+ if (match_octal(args, &option))
+ return 0;
+ uopt->umask = option;
+ break;
+ case Opt_nostrict:
+ uopt->flags &= ~(1 << UDF_FLAG_STRICT);
+ break;
+ case Opt_session:
+ if (match_int(args, &option))
+ return 0;
+ uopt->session = option;
+ break;
+ case Opt_lastblock:
+ if (match_int(args, &option))
+ return 0;
+ uopt->lastblock = option;
+ break;
+ case Opt_anchor:
+ if (match_int(args, &option))
+ return 0;
+ uopt->anchor = option;
+ break;
+ case Opt_volume:
+ if (match_int(args, &option))
+ return 0;
+ uopt->volume = option;
+ break;
+ case Opt_partition:
+ if (match_int(args, &option))
+ return 0;
+ uopt->partition = option;
+ break;
+ case Opt_fileset:
+ if (match_int(args, &option))
+ return 0;
+ uopt->fileset = option;
+ break;
+ case Opt_rootdir:
+ if (match_int(args, &option))
+ return 0;
+ uopt->rootdir = option;
+ break;
+ case Opt_utf8:
+ uopt->flags |= (1 << UDF_FLAG_UTF8);
+ break;
#ifdef CONFIG_UDF_NLS
- case Opt_iocharset:
- uopt->nls_map = load_nls(args[0].from);
- uopt->flags |= (1 << UDF_FLAG_NLS_MAP);
- break;
+ case Opt_iocharset:
+ uopt->nls_map = load_nls(args[0].from);
+ uopt->flags |= (1 << UDF_FLAG_NLS_MAP);
+ break;
#endif
- case Opt_uignore:
- uopt->flags |= (1 << UDF_FLAG_UID_IGNORE);
- break;
- case Opt_uforget:
- uopt->flags |= (1 << UDF_FLAG_UID_FORGET);
- break;
- case Opt_gignore:
- uopt->flags |= (1 << UDF_FLAG_GID_IGNORE);
- break;
- case Opt_gforget:
- uopt->flags |= (1 << UDF_FLAG_GID_FORGET);
- break;
- default:
- printk(KERN_ERR "udf: bad mount option \"%s\" "
- "or missing value\n", p);
+ case Opt_uignore:
+ uopt->flags |= (1 << UDF_FLAG_UID_IGNORE);
+ break;
+ case Opt_uforget:
+ uopt->flags |= (1 << UDF_FLAG_UID_FORGET);
+ break;
+ case Opt_gignore:
+ uopt->flags |= (1 << UDF_FLAG_GID_IGNORE);
+ break;
+ case Opt_gforget:
+ uopt->flags |= (1 << UDF_FLAG_GID_FORGET);
+ break;
+ default:
+ printk(KERN_ERR "udf: bad mount option \"%s\" "
+ "or missing value\n", p);
return 0;
}
}
return 1;
}
-void
-udf_write_super(struct super_block *sb)
+void udf_write_super(struct super_block *sb)
{
lock_kernel();
+
if (!(sb->s_flags & MS_RDONLY))
udf_open_lvid(sb);
sb->s_dirt = 0;
+
unlock_kernel();
}
-static int
-udf_remount_fs(struct super_block *sb, int *flags, char *options)
+static int udf_remount_fs(struct super_block *sb, int *flags, char *options)
{
struct udf_options uopt;
- uopt.flags = UDF_SB(sb)->s_flags ;
- uopt.uid = UDF_SB(sb)->s_uid ;
- uopt.gid = UDF_SB(sb)->s_gid ;
- uopt.umask = UDF_SB(sb)->s_umask ;
+ uopt.flags = UDF_SB(sb)->s_flags;
+ uopt.uid = UDF_SB(sb)->s_uid;
+ uopt.gid = UDF_SB(sb)->s_gid;
+ uopt.umask = UDF_SB(sb)->s_umask;
- if ( !udf_parse_options(options, &uopt) )
+ if (!udf_parse_options(options, &uopt))
return -EINVAL;
UDF_SB(sb)->s_flags = uopt.flags;
@@ -512,27 +518,26 @@ udf_remount_fs(struct super_block *sb, int *flags, char *options)
* July 1, 1997 - Andrew E. Mileski
* Written, tested, and released.
*/
-static int
-udf_set_blocksize(struct super_block *sb, int bsize)
+static int udf_set_blocksize(struct super_block *sb, int bsize)
{
if (!sb_min_blocksize(sb, bsize)) {
udf_debug("Bad block size (%d)\n", bsize);
printk(KERN_ERR "udf: bad block size (%d)\n", bsize);
return 0;
}
+
return sb->s_blocksize;
}
-static int
-udf_vrs(struct super_block *sb, int silent)
+static int udf_vrs(struct super_block *sb, int silent)
{
struct volStructDesc *vsd = NULL;
int sector = 32768;
int sectorsize;
struct buffer_head *bh = NULL;
- int iso9660=0;
- int nsr02=0;
- int nsr03=0;
+ int iso9660 = 0;
+ int nsr02 = 0;
+ int nsr03 = 0;
/* Block size must be a multiple of 512 */
if (sb->s_blocksize & 511)
@@ -546,10 +551,9 @@ udf_vrs(struct super_block *sb, int silent)
sector += (UDF_SB_SESSION(sb) << sb->s_blocksize_bits);
udf_debug("Starting at sector %u (%ld byte sectors)\n",
- (sector >> sb->s_blocksize_bits), sb->s_blocksize);
+ (sector >> sb->s_blocksize_bits), sb->s_blocksize);
/* Process the sequence (if applicable) */
- for (;!nsr02 && !nsr03; sector += sectorsize)
- {
+ for (; !nsr02 && !nsr03; sector += sectorsize) {
/* Read a block */
bh = udf_tread(sb, sector >> sb->s_blocksize_bits);
if (!bh)
@@ -557,52 +561,45 @@ udf_vrs(struct super_block *sb, int silent)
/* Look for ISO descriptors */
vsd = (struct volStructDesc *)(bh->b_data +
- (sector & (sb->s_blocksize - 1)));
+ (sector & (sb->s_blocksize - 1)));
- if (vsd->stdIdent[0] == 0)
- {
+ if (vsd->stdIdent[0] == 0) {
brelse(bh);
break;
- }
- else if (!strncmp(vsd->stdIdent, VSD_STD_ID_CD001, VSD_STD_ID_LEN))
- {
+ } else if (!strncmp(vsd->stdIdent, VSD_STD_ID_CD001, VSD_STD_ID_LEN)) {
iso9660 = sector;
- switch (vsd->structType)
- {
- case 0:
- udf_debug("ISO9660 Boot Record found\n");
- break;
- case 1:
- udf_debug("ISO9660 Primary Volume Descriptor found\n");
- break;
- case 2:
- udf_debug("ISO9660 Supplementary Volume Descriptor found\n");
- break;
- case 3:
- udf_debug("ISO9660 Volume Partition Descriptor found\n");
- break;
- case 255:
- udf_debug("ISO9660 Volume Descriptor Set Terminator found\n");
- break;
- default:
- udf_debug("ISO9660 VRS (%u) found\n", vsd->structType);
- break;
+ switch (vsd->structType) {
+ case 0:
+ udf_debug("ISO9660 Boot Record found\n");
+ break;
+ case 1:
+ udf_debug
+ ("ISO9660 Primary Volume Descriptor found\n");
+ break;
+ case 2:
+ udf_debug
+ ("ISO9660 Supplementary Volume Descriptor found\n");
+ break;
+ case 3:
+ udf_debug
+ ("ISO9660 Volume Partition Descriptor found\n");
+ break;
+ case 255:
+ udf_debug
+ ("ISO9660 Volume Descriptor Set Terminator found\n");
+ break;
+ default:
+ udf_debug("ISO9660 VRS (%u) found\n",
+ vsd->structType);
+ break;
}
- }
- else if (!strncmp(vsd->stdIdent, VSD_STD_ID_BEA01, VSD_STD_ID_LEN))
- {
- }
- else if (!strncmp(vsd->stdIdent, VSD_STD_ID_TEA01, VSD_STD_ID_LEN))
- {
+ } else if (!strncmp(vsd->stdIdent, VSD_STD_ID_BEA01, VSD_STD_ID_LEN)) {
+ } else if (!strncmp(vsd->stdIdent, VSD_STD_ID_TEA01, VSD_STD_ID_LEN)) {
brelse(bh);
break;
- }
- else if (!strncmp(vsd->stdIdent, VSD_STD_ID_NSR02, VSD_STD_ID_LEN))
- {
+ } else if (!strncmp(vsd->stdIdent, VSD_STD_ID_NSR02, VSD_STD_ID_LEN)) {
nsr02 = sector;
- }
- else if (!strncmp(vsd->stdIdent, VSD_STD_ID_NSR03, VSD_STD_ID_LEN))
- {
+ } else if (!strncmp(vsd->stdIdent, VSD_STD_ID_NSR03, VSD_STD_ID_LEN)) {
nsr03 = sector;
}
brelse(bh);
@@ -635,8 +632,7 @@ udf_vrs(struct super_block *sb, int silent)
* July 1, 1997 - Andrew E. Mileski
* Written, tested, and released.
*/
-static void
-udf_find_anchor(struct super_block *sb)
+static void udf_find_anchor(struct super_block *sb)
{
int lastblock = UDF_SB_LASTBLOCK(sb);
struct buffer_head *bh = NULL;
@@ -644,8 +640,7 @@ udf_find_anchor(struct super_block *sb)
uint32_t location;
int i;
- if (lastblock)
- {
+ if (lastblock) {
int varlastblock = udf_variable_to_fixed(lastblock);
int last[] = { lastblock, lastblock - 2,
lastblock - 150, lastblock - 152,
@@ -663,74 +658,54 @@ udf_find_anchor(struct super_block *sb)
* however, if the disc isn't closed, it could be 512 */
for (i = 0; !lastblock && i < ARRAY_SIZE(last); i++) {
- if (last[i] < 0 || !(bh = sb_bread(sb, last[i])))
- {
+ if (last[i] < 0 || !(bh = sb_bread(sb, last[i]))) {
ident = location = 0;
- }
- else
- {
+ } else {
ident = le16_to_cpu(((tag *)bh->b_data)->tagIdent);
location = le32_to_cpu(((tag *)bh->b_data)->tagLocation);
brelse(bh);
}
- if (ident == TAG_IDENT_AVDP)
- {
- if (location == last[i] - UDF_SB_SESSION(sb))
- {
+ if (ident == TAG_IDENT_AVDP) {
+ if (location == last[i] - UDF_SB_SESSION(sb)) {
lastblock = UDF_SB_ANCHOR(sb)[0] = last[i] - UDF_SB_SESSION(sb);
UDF_SB_ANCHOR(sb)[1] = last[i] - 256 - UDF_SB_SESSION(sb);
- }
- else if (location == udf_variable_to_fixed(last[i]) - UDF_SB_SESSION(sb))
- {
+ } else if (location == udf_variable_to_fixed(last[i]) - UDF_SB_SESSION(sb)) {
UDF_SET_FLAG(sb, UDF_FLAG_VARCONV);
lastblock = UDF_SB_ANCHOR(sb)[0] = udf_variable_to_fixed(last[i]) - UDF_SB_SESSION(sb);
UDF_SB_ANCHOR(sb)[1] = lastblock - 256 - UDF_SB_SESSION(sb);
- }
- else
+ } else {
udf_debug("Anchor found at block %d, location mismatch %d.\n",
- last[i], location);
- }
- else if (ident == TAG_IDENT_FE || ident == TAG_IDENT_EFE)
- {
+ last[i], location);
+ }
+ } else if (ident == TAG_IDENT_FE || ident == TAG_IDENT_EFE) {
lastblock = last[i];
UDF_SB_ANCHOR(sb)[3] = 512;
- }
- else
- {
- if (last[i] < 256 || !(bh = sb_bread(sb, last[i] - 256)))
- {
+ } else {
+ if (last[i] < 256 || !(bh = sb_bread(sb, last[i] - 256))) {
ident = location = 0;
- }
- else
- {
+ } else {
ident = le16_to_cpu(((tag *)bh->b_data)->tagIdent);
location = le32_to_cpu(((tag *)bh->b_data)->tagLocation);
brelse(bh);
}
-
+
if (ident == TAG_IDENT_AVDP &&
- location == last[i] - 256 - UDF_SB_SESSION(sb))
- {
+ location == last[i] - 256 - UDF_SB_SESSION(sb)) {
lastblock = last[i];
UDF_SB_ANCHOR(sb)[1] = last[i] - 256;
- }
- else
- {
- if (last[i] < 312 + UDF_SB_SESSION(sb) || !(bh = sb_bread(sb, last[i] - 312 - UDF_SB_SESSION(sb))))
- {
+ } else {
+ if (last[i] < 312 + UDF_SB_SESSION(sb) ||
+ !(bh = sb_bread(sb, last[i] - 312 - UDF_SB_SESSION(sb)))) {
ident = location = 0;
- }
- else
- {
+ } else {
ident = le16_to_cpu(((tag *)bh->b_data)->tagIdent);
location = le32_to_cpu(((tag *)bh->b_data)->tagLocation);
brelse(bh);
}
-
+
if (ident == TAG_IDENT_AVDP &&
- location == udf_variable_to_fixed(last[i]) - 256)
- {
+ location == udf_variable_to_fixed(last[i]) - 256) {
UDF_SET_FLAG(sb, UDF_FLAG_VARCONV);
lastblock = udf_variable_to_fixed(last[i]);
UDF_SB_ANCHOR(sb)[1] = lastblock - 256;
@@ -740,11 +715,9 @@ udf_find_anchor(struct super_block *sb)
}
}
- if (!lastblock)
- {
+ if (!lastblock) {
/* We havn't found the lastblock. check 312 */
- if ((bh = sb_bread(sb, 312 + UDF_SB_SESSION(sb))))
- {
+ if ((bh = sb_bread(sb, 312 + UDF_SB_SESSION(sb)))) {
ident = le16_to_cpu(((tag *)bh->b_data)->tagIdent);
location = le32_to_cpu(((tag *)bh->b_data)->tagLocation);
brelse(bh);
@@ -755,19 +728,14 @@ udf_find_anchor(struct super_block *sb)
}
for (i = 0; i < ARRAY_SIZE(UDF_SB_ANCHOR(sb)); i++) {
- if (UDF_SB_ANCHOR(sb)[i])
- {
- if (!(bh = udf_read_tagged(sb,
- UDF_SB_ANCHOR(sb)[i], UDF_SB_ANCHOR(sb)[i], &ident)))
- {
+ if (UDF_SB_ANCHOR(sb)[i]) {
+ if (!(bh = udf_read_tagged(sb, UDF_SB_ANCHOR(sb)[i],
+ UDF_SB_ANCHOR(sb)[i], &ident))) {
UDF_SB_ANCHOR(sb)[i] = 0;
- }
- else
- {
+ } else {
brelse(bh);
- if ((ident != TAG_IDENT_AVDP) && (i ||
- (ident != TAG_IDENT_FE && ident != TAG_IDENT_EFE)))
- {
+ if ((ident != TAG_IDENT_AVDP) &&
+ (i || (ident != TAG_IDENT_FE && ident != TAG_IDENT_EFE))) {
UDF_SB_ANCHOR(sb)[i] = 0;
}
}
@@ -777,89 +745,78 @@ udf_find_anchor(struct super_block *sb)
UDF_SB_LASTBLOCK(sb) = lastblock;
}
-static int
-udf_find_fileset(struct super_block *sb, kernel_lb_addr *fileset, kernel_lb_addr *root)
+static int udf_find_fileset(struct super_block *sb, kernel_lb_addr *fileset, kernel_lb_addr *root)
{
struct buffer_head *bh = NULL;
long lastblock;
uint16_t ident;
if (fileset->logicalBlockNum != 0xFFFFFFFF ||
- fileset->partitionReferenceNum != 0xFFFF)
- {
+ fileset->partitionReferenceNum != 0xFFFF) {
bh = udf_read_ptagged(sb, *fileset, 0, &ident);
- if (!bh)
+ if (!bh) {
return 1;
- else if (ident != TAG_IDENT_FSD)
- {
+ } else if (ident != TAG_IDENT_FSD) {
brelse(bh);
return 1;
}
-
+
}
- if (!bh) /* Search backwards through the partitions */
- {
+ if (!bh) { /* Search backwards through the partitions */
kernel_lb_addr newfileset;
+/* --> cvg: FIXME - is it reasonable? */
return 1;
-
- for (newfileset.partitionReferenceNum=UDF_SB_NUMPARTS(sb)-1;
- (newfileset.partitionReferenceNum != 0xFFFF &&
- fileset->logicalBlockNum == 0xFFFFFFFF &&
- fileset->partitionReferenceNum == 0xFFFF);
- newfileset.partitionReferenceNum--)
- {
+
+ for (newfileset.partitionReferenceNum = UDF_SB_NUMPARTS(sb) - 1;
+ (newfileset.partitionReferenceNum != 0xFFFF &&
+ fileset->logicalBlockNum == 0xFFFFFFFF &&
+ fileset->partitionReferenceNum == 0xFFFF);
+ newfileset.partitionReferenceNum--) {
lastblock = UDF_SB_PARTLEN(sb, newfileset.partitionReferenceNum);
newfileset.logicalBlockNum = 0;
- do
- {
+ do {
bh = udf_read_ptagged(sb, newfileset, 0, &ident);
- if (!bh)
- {
- newfileset.logicalBlockNum ++;
+ if (!bh) {
+ newfileset.logicalBlockNum++;
continue;
}
- switch (ident)
+ switch (ident) {
+ case TAG_IDENT_SBD:
{
- case TAG_IDENT_SBD:
- {
- struct spaceBitmapDesc *sp;
- sp = (struct spaceBitmapDesc *)bh->b_data;
- newfileset.logicalBlockNum += 1 +
- ((le32_to_cpu(sp->numOfBytes) + sizeof(struct spaceBitmapDesc) - 1)
- >> sb->s_blocksize_bits);
- brelse(bh);
- break;
- }
- case TAG_IDENT_FSD:
- {
- *fileset = newfileset;
- break;
- }
- default:
- {
- newfileset.logicalBlockNum ++;
- brelse(bh);
- bh = NULL;
- break;
- }
+ struct spaceBitmapDesc *sp;
+ sp = (struct spaceBitmapDesc *)bh->b_data;
+ newfileset.logicalBlockNum += 1 +
+ ((le32_to_cpu(sp->numOfBytes) +
+ sizeof(struct spaceBitmapDesc) - 1)
+ >> sb->s_blocksize_bits);
+ brelse(bh);
+ break;
}
- }
- while (newfileset.logicalBlockNum < lastblock &&
- fileset->logicalBlockNum == 0xFFFFFFFF &&
- fileset->partitionReferenceNum == 0xFFFF);
+ case TAG_IDENT_FSD:
+ *fileset = newfileset;
+ break;
+ default:
+ newfileset.logicalBlockNum++;
+ brelse(bh);
+ bh = NULL;
+ break;
+ }
+ } while (newfileset.logicalBlockNum < lastblock &&
+ fileset->logicalBlockNum == 0xFFFFFFFF &&
+ fileset->partitionReferenceNum == 0xFFFF);
}
}
if ((fileset->logicalBlockNum != 0xFFFFFFFF ||
- fileset->partitionReferenceNum != 0xFFFF) && bh)
- {
+ fileset->partitionReferenceNum != 0xFFFF) && bh) {
udf_debug("Fileset at block=%d, partition=%d\n",
- fileset->logicalBlockNum, fileset->partitionReferenceNum);
+ fileset->logicalBlockNum,
+ fileset->partitionReferenceNum);
UDF_SB_PARTITION(sb) = fileset->partitionReferenceNum;
udf_load_fileset(sb, bh, root);
@@ -869,8 +826,7 @@ udf_find_fileset(struct super_block *sb, kernel_lb_addr *fileset, kernel_lb_addr
return 1;
}
-static void
-udf_load_pvoldesc(struct super_block *sb, struct buffer_head *bh)
+static void udf_load_pvoldesc(struct super_block *sb, struct buffer_head *bh)
{
struct primaryVolDesc *pvoldesc;
time_t recording;
@@ -880,37 +836,34 @@ udf_load_pvoldesc(struct super_block *sb, struct buffer_head *bh)
pvoldesc = (struct primaryVolDesc *)bh->b_data;
- if ( udf_stamp_to_time(&recording, &recording_usec,
- lets_to_cpu(pvoldesc->recordingDateAndTime)) )
- {
+ if (udf_stamp_to_time(&recording, &recording_usec,
+ lets_to_cpu(pvoldesc->recordingDateAndTime))) {
kernel_timestamp ts;
ts = lets_to_cpu(pvoldesc->recordingDateAndTime);
udf_debug("recording time %ld/%ld, %04u/%02u/%02u %02u:%02u (%x)\n",
- recording, recording_usec,
- ts.year, ts.month, ts.day, ts.hour, ts.minute, ts.typeAndTimezone);
+ recording, recording_usec,
+ ts.year, ts.month, ts.day, ts.hour,
+ ts.minute, ts.typeAndTimezone);
UDF_SB_RECORDTIME(sb).tv_sec = recording;
UDF_SB_RECORDTIME(sb).tv_nsec = recording_usec * 1000;
}
- if ( !udf_build_ustr(&instr, pvoldesc->volIdent, 32) )
- {
- if (udf_CS0toUTF8(&outstr, &instr))
- {
- strncpy( UDF_SB_VOLIDENT(sb), outstr.u_name,
+ if (!udf_build_ustr(&instr, pvoldesc->volIdent, 32)) {
+ if (udf_CS0toUTF8(&outstr, &instr)) {
+ strncpy(UDF_SB_VOLIDENT(sb), outstr.u_name,
outstr.u_len > 31 ? 31 : outstr.u_len);
udf_debug("volIdent[] = '%s'\n", UDF_SB_VOLIDENT(sb));
}
}
- if ( !udf_build_ustr(&instr, pvoldesc->volSetIdent, 128) )
- {
+ if (!udf_build_ustr(&instr, pvoldesc->volSetIdent, 128)) {
if (udf_CS0toUTF8(&outstr, &instr))
udf_debug("volSetIdent[] = '%s'\n", outstr.u_name);
}
}
-static void
-udf_load_fileset(struct super_block *sb, struct buffer_head *bh, kernel_lb_addr *root)
+static void udf_load_fileset(struct super_block *sb, struct buffer_head *bh,
+ kernel_lb_addr *root)
{
struct fileSetDesc *fset;
@@ -920,24 +873,21 @@ udf_load_fileset(struct super_block *sb, struct buffer_head *bh, kernel_lb_addr
UDF_SB_SERIALNUM(sb) = le16_to_cpu(fset->descTag.tagSerialNum);
- udf_debug("Rootdir at block=%d, partition=%d\n",
- root->logicalBlockNum, root->partitionReferenceNum);
+ udf_debug("Rootdir at block=%d, partition=%d\n",
+ root->logicalBlockNum, root->partitionReferenceNum);
}
-static void
-udf_load_partdesc(struct super_block *sb, struct buffer_head *bh)
+static int udf_load_partdesc(struct super_block *sb, struct buffer_head *bh)
{
struct partitionDesc *p;
int i;
p = (struct partitionDesc *)bh->b_data;
- for (i=0; i<UDF_SB_NUMPARTS(sb); i++)
- {
- udf_debug("Searching map: (%d == %d)\n",
- UDF_SB_PARTMAPS(sb)[i].s_partition_num, le16_to_cpu(p->partitionNumber));
- if (UDF_SB_PARTMAPS(sb)[i].s_partition_num == le16_to_cpu(p->partitionNumber))
- {
+ for (i = 0; i < UDF_SB_NUMPARTS(sb); i++) {
+ udf_debug("Searching map: (%d == %d)\n",
+ UDF_SB_PARTMAPS(sb)[i].s_partition_num, le16_to_cpu(p->partitionNumber));
+ if (UDF_SB_PARTMAPS(sb)[i].s_partition_num == le16_to_cpu(p->partitionNumber)) {
UDF_SB_PARTLEN(sb,i) = le32_to_cpu(p->partitionLength); /* blocks */
UDF_SB_PARTROOT(sb,i) = le32_to_cpu(p->partitionStartingLocation);
if (le32_to_cpu(p->accessType) == PD_ACCESS_TYPE_READ_ONLY)
@@ -950,79 +900,87 @@ udf_load_partdesc(struct super_block *sb, struct buffer_head *bh)
UDF_SB_PARTFLAGS(sb,i) |= UDF_PART_FLAG_OVERWRITABLE;
if (!strcmp(p->partitionContents.ident, PD_PARTITION_CONTENTS_NSR02) ||
- !strcmp(p->partitionContents.ident, PD_PARTITION_CONTENTS_NSR03))
- {
+ !strcmp(p->partitionContents.ident, PD_PARTITION_CONTENTS_NSR03)) {
struct partitionHeaderDesc *phd;
phd = (struct partitionHeaderDesc *)(p->partitionContentsUse);
- if (phd->unallocSpaceTable.extLength)
- {
- kernel_lb_addr loc = { le32_to_cpu(phd->unallocSpaceTable.extPosition), i };
+ if (phd->unallocSpaceTable.extLength) {
+ kernel_lb_addr loc = {
+ .logicalBlockNum = le32_to_cpu(phd->unallocSpaceTable.extPosition),
+ .partitionReferenceNum = i,
+ };
UDF_SB_PARTMAPS(sb)[i].s_uspace.s_table =
udf_iget(sb, loc);
+ if (!UDF_SB_PARTMAPS(sb)[i].s_uspace.s_table) {
+ udf_debug("cannot load unallocSpaceTable (part %d)\n",
+ i);
+ return 1;
+ }
UDF_SB_PARTFLAGS(sb,i) |= UDF_PART_FLAG_UNALLOC_TABLE;
udf_debug("unallocSpaceTable (part %d) @ %ld\n",
- i, UDF_SB_PARTMAPS(sb)[i].s_uspace.s_table->i_ino);
+ i, UDF_SB_PARTMAPS(sb)[i].s_uspace.s_table->i_ino);
}
- if (phd->unallocSpaceBitmap.extLength)
- {
+ if (phd->unallocSpaceBitmap.extLength) {
UDF_SB_ALLOC_BITMAP(sb, i, s_uspace);
- if (UDF_SB_PARTMAPS(sb)[i].s_uspace.s_bitmap != NULL)
- {
+ if (UDF_SB_PARTMAPS(sb)[i].s_uspace.s_bitmap != NULL) {
UDF_SB_PARTMAPS(sb)[i].s_uspace.s_bitmap->s_extLength =
le32_to_cpu(phd->unallocSpaceBitmap.extLength);
UDF_SB_PARTMAPS(sb)[i].s_uspace.s_bitmap->s_extPosition =
le32_to_cpu(phd->unallocSpaceBitmap.extPosition);
UDF_SB_PARTFLAGS(sb,i) |= UDF_PART_FLAG_UNALLOC_BITMAP;
udf_debug("unallocSpaceBitmap (part %d) @ %d\n",
- i, UDF_SB_PARTMAPS(sb)[i].s_uspace.s_bitmap->s_extPosition);
+ i, UDF_SB_PARTMAPS(sb)[i].s_uspace.s_bitmap->s_extPosition);
}
}
if (phd->partitionIntegrityTable.extLength)
udf_debug("partitionIntegrityTable (part %d)\n", i);
- if (phd->freedSpaceTable.extLength)
- {
- kernel_lb_addr loc = { le32_to_cpu(phd->freedSpaceTable.extPosition), i };
+ if (phd->freedSpaceTable.extLength) {
+ kernel_lb_addr loc = {
+ .logicalBlockNum = le32_to_cpu(phd->freedSpaceTable.extPosition),
+ .partitionReferenceNum = i,
+ };
UDF_SB_PARTMAPS(sb)[i].s_fspace.s_table =
udf_iget(sb, loc);
+ if (!UDF_SB_PARTMAPS(sb)[i].s_fspace.s_table) {
+ udf_debug("cannot load freedSpaceTable (part %d)\n",
+ i);
+ return 1;
+ }
UDF_SB_PARTFLAGS(sb,i) |= UDF_PART_FLAG_FREED_TABLE;
udf_debug("freedSpaceTable (part %d) @ %ld\n",
- i, UDF_SB_PARTMAPS(sb)[i].s_fspace.s_table->i_ino);
+ i, UDF_SB_PARTMAPS(sb)[i].s_fspace.s_table->i_ino);
}
- if (phd->freedSpaceBitmap.extLength)
- {
+ if (phd->freedSpaceBitmap.extLength) {
UDF_SB_ALLOC_BITMAP(sb, i, s_fspace);
- if (UDF_SB_PARTMAPS(sb)[i].s_fspace.s_bitmap != NULL)
- {
+ if (UDF_SB_PARTMAPS(sb)[i].s_fspace.s_bitmap != NULL) {
UDF_SB_PARTMAPS(sb)[i].s_fspace.s_bitmap->s_extLength =
le32_to_cpu(phd->freedSpaceBitmap.extLength);
UDF_SB_PARTMAPS(sb)[i].s_fspace.s_bitmap->s_extPosition =
le32_to_cpu(phd->freedSpaceBitmap.extPosition);
UDF_SB_PARTFLAGS(sb,i) |= UDF_PART_FLAG_FREED_BITMAP;
udf_debug("freedSpaceBitmap (part %d) @ %d\n",
- i, UDF_SB_PARTMAPS(sb)[i].s_fspace.s_bitmap->s_extPosition);
+ i, UDF_SB_PARTMAPS(sb)[i].s_fspace.s_bitmap->s_extPosition);
}
}
}
break;
}
}
- if (i == UDF_SB_NUMPARTS(sb))
- {
- udf_debug("Partition (%d) not found in partition map\n", le16_to_cpu(p->partitionNumber));
- }
- else
- {
+ if (i == UDF_SB_NUMPARTS(sb)) {
+ udf_debug("Partition (%d) not found in partition map\n",
+ le16_to_cpu(p->partitionNumber));
+ } else {
udf_debug("Partition (%d:%d type %x) starts at physical %d, block length %d\n",
- le16_to_cpu(p->partitionNumber), i, UDF_SB_PARTTYPE(sb,i),
- UDF_SB_PARTROOT(sb,i), UDF_SB_PARTLEN(sb,i));
+ le16_to_cpu(p->partitionNumber), i, UDF_SB_PARTTYPE(sb,i),
+ UDF_SB_PARTROOT(sb,i), UDF_SB_PARTLEN(sb,i));
}
+ return 0;
}
-static int
-udf_load_logicalvol(struct super_block *sb, struct buffer_head * bh, kernel_lb_addr *fileset)
+static int udf_load_logicalvol(struct super_block *sb, struct buffer_head *bh,
+ kernel_lb_addr *fileset)
{
struct logicalVolDesc *lvd;
int i, j, offset;
@@ -1032,37 +990,27 @@ udf_load_logicalvol(struct super_block *sb, struct buffer_head * bh, kernel_lb_a
UDF_SB_ALLOC_PARTMAPS(sb, le32_to_cpu(lvd->numPartitionMaps));
- for (i=0,offset=0;
- i<UDF_SB_NUMPARTS(sb) && offset<le32_to_cpu(lvd->mapTableLength);
- i++,offset+=((struct genericPartitionMap *)&(lvd->partitionMaps[offset]))->partitionMapLength)
- {
+ for (i = 0, offset = 0;
+ i < UDF_SB_NUMPARTS(sb) && offset < le32_to_cpu(lvd->mapTableLength);
+ i++, offset += ((struct genericPartitionMap *)&(lvd->partitionMaps[offset]))->partitionMapLength) {
type = ((struct genericPartitionMap *)&(lvd->partitionMaps[offset]))->partitionMapType;
- if (type == 1)
- {
+ if (type == 1) {
struct genericPartitionMap1 *gpm1 = (struct genericPartitionMap1 *)&(lvd->partitionMaps[offset]);
UDF_SB_PARTTYPE(sb,i) = UDF_TYPE1_MAP15;
UDF_SB_PARTVSN(sb,i) = le16_to_cpu(gpm1->volSeqNum);
UDF_SB_PARTNUM(sb,i) = le16_to_cpu(gpm1->partitionNum);
UDF_SB_PARTFUNC(sb,i) = NULL;
- }
- else if (type == 2)
- {
+ } else if (type == 2) {
struct udfPartitionMap2 *upm2 = (struct udfPartitionMap2 *)&(lvd->partitionMaps[offset]);
- if (!strncmp(upm2->partIdent.ident, UDF_ID_VIRTUAL, strlen(UDF_ID_VIRTUAL)))
- {
- if (le16_to_cpu(((__le16 *)upm2->partIdent.identSuffix)[0]) == 0x0150)
- {
+ if (!strncmp(upm2->partIdent.ident, UDF_ID_VIRTUAL, strlen(UDF_ID_VIRTUAL))) {
+ if (le16_to_cpu(((__le16 *)upm2->partIdent.identSuffix)[0]) == 0x0150) {
UDF_SB_PARTTYPE(sb,i) = UDF_VIRTUAL_MAP15;
UDF_SB_PARTFUNC(sb,i) = udf_get_pblock_virt15;
- }
- else if (le16_to_cpu(((__le16 *)upm2->partIdent.identSuffix)[0]) == 0x0200)
- {
+ } else if (le16_to_cpu(((__le16 *)upm2->partIdent.identSuffix)[0]) == 0x0200) {
UDF_SB_PARTTYPE(sb,i) = UDF_VIRTUAL_MAP20;
UDF_SB_PARTFUNC(sb,i) = udf_get_pblock_virt20;
}
- }
- else if (!strncmp(upm2->partIdent.ident, UDF_ID_SPARABLE, strlen(UDF_ID_SPARABLE)))
- {
+ } else if (!strncmp(upm2->partIdent.ident, UDF_ID_SPARABLE, strlen(UDF_ID_SPARABLE))) {
uint32_t loc;
uint16_t ident;
struct sparingTable *st;
@@ -1070,26 +1018,21 @@ udf_load_logicalvol(struct super_block *sb, struct buffer_head * bh, kernel_lb_a
UDF_SB_PARTTYPE(sb,i) = UDF_SPARABLE_MAP15;
UDF_SB_TYPESPAR(sb,i).s_packet_len = le16_to_cpu(spm->packetLength);
- for (j=0; j<spm->numSparingTables; j++)
- {
+ for (j = 0; j < spm->numSparingTables; j++) {
loc = le32_to_cpu(spm->locSparingTable[j]);
UDF_SB_TYPESPAR(sb,i).s_spar_map[j] =
udf_read_tagged(sb, loc, loc, &ident);
- if (UDF_SB_TYPESPAR(sb,i).s_spar_map[j] != NULL)
- {
+ if (UDF_SB_TYPESPAR(sb,i).s_spar_map[j] != NULL) {
st = (struct sparingTable *)UDF_SB_TYPESPAR(sb,i).s_spar_map[j]->b_data;
if (ident != 0 ||
- strncmp(st->sparingIdent.ident, UDF_ID_SPARING, strlen(UDF_ID_SPARING)))
- {
+ strncmp(st->sparingIdent.ident, UDF_ID_SPARING, strlen(UDF_ID_SPARING))) {
brelse(UDF_SB_TYPESPAR(sb,i).s_spar_map[j]);
UDF_SB_TYPESPAR(sb,i).s_spar_map[j] = NULL;
}
}
}
UDF_SB_PARTFUNC(sb,i) = udf_get_pblock_spar15;
- }
- else
- {
+ } else {
udf_debug("Unknown ident: %s\n", upm2->partIdent.ident);
continue;
}
@@ -1097,20 +1040,20 @@ udf_load_logicalvol(struct super_block *sb, struct buffer_head * bh, kernel_lb_a
UDF_SB_PARTNUM(sb,i) = le16_to_cpu(upm2->partitionNum);
}
udf_debug("Partition (%d:%d) type %d on volume %d\n",
- i, UDF_SB_PARTNUM(sb,i), type, UDF_SB_PARTVSN(sb,i));
+ i, UDF_SB_PARTNUM(sb,i), type, UDF_SB_PARTVSN(sb,i));
}
- if (fileset)
- {
+ if (fileset) {
long_ad *la = (long_ad *)&(lvd->logicalVolContentsUse[0]);
*fileset = lelb_to_cpu(la->extLocation);
udf_debug("FileSet found in LogicalVolDesc at block=%d, partition=%d\n",
- fileset->logicalBlockNum,
- fileset->partitionReferenceNum);
+ fileset->logicalBlockNum,
+ fileset->partitionReferenceNum);
}
if (lvd->integritySeqExt.extLength)
udf_load_logicalvolint(sb, leea_to_cpu(lvd->integritySeqExt));
+
return 0;
}
@@ -1118,26 +1061,24 @@ udf_load_logicalvol(struct super_block *sb, struct buffer_head * bh, kernel_lb_a
* udf_load_logicalvolint
*
*/
-static void
-udf_load_logicalvolint(struct super_block *sb, kernel_extent_ad loc)
+static void udf_load_logicalvolint(struct super_block *sb, kernel_extent_ad loc)
{
struct buffer_head *bh = NULL;
uint16_t ident;
while (loc.extLength > 0 &&
- (bh = udf_read_tagged(sb, loc.extLocation,
- loc.extLocation, &ident)) &&
- ident == TAG_IDENT_LVID)
- {
+ (bh = udf_read_tagged(sb, loc.extLocation,
+ loc.extLocation, &ident)) &&
+ ident == TAG_IDENT_LVID) {
UDF_SB_LVIDBH(sb) = bh;
-
+
if (UDF_SB_LVID(sb)->nextIntegrityExt.extLength)
udf_load_logicalvolint(sb, leea_to_cpu(UDF_SB_LVID(sb)->nextIntegrityExt));
-
+
if (UDF_SB_LVIDBH(sb) != bh)
brelse(bh);
loc.extLength -= sb->s_blocksize;
- loc.extLocation ++;
+ loc.extLocation++;
}
if (UDF_SB_LVIDBH(sb) != bh)
brelse(bh);
@@ -1158,15 +1099,15 @@ udf_load_logicalvolint(struct super_block *sb, kernel_extent_ad loc)
* July 1, 1997 - Andrew E. Mileski
* Written, tested, and released.
*/
-static int
-udf_process_sequence(struct super_block *sb, long block, long lastblock, kernel_lb_addr *fileset)
+static int udf_process_sequence(struct super_block *sb, long block, long lastblock,
+ kernel_lb_addr *fileset)
{
struct buffer_head *bh = NULL;
struct udf_vds_record vds[VDS_POS_LENGTH];
struct generic_desc *gd;
struct volDescPtr *vdp;
- int done=0;
- int i,j;
+ int done = 0;
+ int i, j;
uint32_t vdsn;
uint16_t ident;
long next_s = 0, next_e = 0;
@@ -1174,97 +1115,92 @@ udf_process_sequence(struct super_block *sb, long block, long lastblock, kernel_
memset(vds, 0, sizeof(struct udf_vds_record) * VDS_POS_LENGTH);
/* Read the main descriptor sequence */
- for (;(!done && block <= lastblock); block++)
- {
+ for (; (!done && block <= lastblock); block++) {
bh = udf_read_tagged(sb, block, block, &ident);
- if (!bh)
+ if (!bh)
break;
/* Process each descriptor (ISO 13346 3/8.3-8.4) */
gd = (struct generic_desc *)bh->b_data;
vdsn = le32_to_cpu(gd->volDescSeqNum);
- switch (ident)
- {
- case TAG_IDENT_PVD: /* ISO 13346 3/10.1 */
- if (vdsn >= vds[VDS_POS_PRIMARY_VOL_DESC].volDescSeqNum)
- {
- vds[VDS_POS_PRIMARY_VOL_DESC].volDescSeqNum = vdsn;
- vds[VDS_POS_PRIMARY_VOL_DESC].block = block;
- }
- break;
- case TAG_IDENT_VDP: /* ISO 13346 3/10.3 */
- if (vdsn >= vds[VDS_POS_VOL_DESC_PTR].volDescSeqNum)
- {
- vds[VDS_POS_VOL_DESC_PTR].volDescSeqNum = vdsn;
- vds[VDS_POS_VOL_DESC_PTR].block = block;
-
- vdp = (struct volDescPtr *)bh->b_data;
- next_s = le32_to_cpu(vdp->nextVolDescSeqExt.extLocation);
- next_e = le32_to_cpu(vdp->nextVolDescSeqExt.extLength);
- next_e = next_e >> sb->s_blocksize_bits;
- next_e += next_s;
- }
- break;
- case TAG_IDENT_IUVD: /* ISO 13346 3/10.4 */
- if (vdsn >= vds[VDS_POS_IMP_USE_VOL_DESC].volDescSeqNum)
- {
- vds[VDS_POS_IMP_USE_VOL_DESC].volDescSeqNum = vdsn;
- vds[VDS_POS_IMP_USE_VOL_DESC].block = block;
- }
- break;
- case TAG_IDENT_PD: /* ISO 13346 3/10.5 */
- if (!vds[VDS_POS_PARTITION_DESC].block)
- vds[VDS_POS_PARTITION_DESC].block = block;
- break;
- case TAG_IDENT_LVD: /* ISO 13346 3/10.6 */
- if (vdsn >= vds[VDS_POS_LOGICAL_VOL_DESC].volDescSeqNum)
- {
- vds[VDS_POS_LOGICAL_VOL_DESC].volDescSeqNum = vdsn;
- vds[VDS_POS_LOGICAL_VOL_DESC].block = block;
- }
- break;
- case TAG_IDENT_USD: /* ISO 13346 3/10.8 */
- if (vdsn >= vds[VDS_POS_UNALLOC_SPACE_DESC].volDescSeqNum)
- {
- vds[VDS_POS_UNALLOC_SPACE_DESC].volDescSeqNum = vdsn;
- vds[VDS_POS_UNALLOC_SPACE_DESC].block = block;
- }
- break;
- case TAG_IDENT_TD: /* ISO 13346 3/10.9 */
- vds[VDS_POS_TERMINATING_DESC].block = block;
- if (next_e)
- {
- block = next_s;
- lastblock = next_e;
- next_s = next_e = 0;
- }
- else
- done = 1;
- break;
+ switch (ident) {
+ case TAG_IDENT_PVD: /* ISO 13346 3/10.1 */
+ if (vdsn >= vds[VDS_POS_PRIMARY_VOL_DESC].volDescSeqNum) {
+ vds[VDS_POS_PRIMARY_VOL_DESC].volDescSeqNum = vdsn;
+ vds[VDS_POS_PRIMARY_VOL_DESC].block = block;
+ }
+ break;
+ case TAG_IDENT_VDP: /* ISO 13346 3/10.3 */
+ if (vdsn >= vds[VDS_POS_VOL_DESC_PTR].volDescSeqNum) {
+ vds[VDS_POS_VOL_DESC_PTR].volDescSeqNum = vdsn;
+ vds[VDS_POS_VOL_DESC_PTR].block = block;
+
+ vdp = (struct volDescPtr *)bh->b_data;
+ next_s = le32_to_cpu(vdp->nextVolDescSeqExt.extLocation);
+ next_e = le32_to_cpu(vdp->nextVolDescSeqExt.extLength);
+ next_e = next_e >> sb->s_blocksize_bits;
+ next_e += next_s;
+ }
+ break;
+ case TAG_IDENT_IUVD: /* ISO 13346 3/10.4 */
+ if (vdsn >= vds[VDS_POS_IMP_USE_VOL_DESC].volDescSeqNum) {
+ vds[VDS_POS_IMP_USE_VOL_DESC].volDescSeqNum = vdsn;
+ vds[VDS_POS_IMP_USE_VOL_DESC].block = block;
+ }
+ break;
+ case TAG_IDENT_PD: /* ISO 13346 3/10.5 */
+ if (!vds[VDS_POS_PARTITION_DESC].block)
+ vds[VDS_POS_PARTITION_DESC].block = block;
+ break;
+ case TAG_IDENT_LVD: /* ISO 13346 3/10.6 */
+ if (vdsn >= vds[VDS_POS_LOGICAL_VOL_DESC].volDescSeqNum) {
+ vds[VDS_POS_LOGICAL_VOL_DESC].volDescSeqNum = vdsn;
+ vds[VDS_POS_LOGICAL_VOL_DESC].block = block;
+ }
+ break;
+ case TAG_IDENT_USD: /* ISO 13346 3/10.8 */
+ if (vdsn >= vds[VDS_POS_UNALLOC_SPACE_DESC].volDescSeqNum) {
+ vds[VDS_POS_UNALLOC_SPACE_DESC].volDescSeqNum = vdsn;
+ vds[VDS_POS_UNALLOC_SPACE_DESC].block = block;
+ }
+ break;
+ case TAG_IDENT_TD: /* ISO 13346 3/10.9 */
+ vds[VDS_POS_TERMINATING_DESC].block = block;
+ if (next_e) {
+ block = next_s;
+ lastblock = next_e;
+ next_s = next_e = 0;
+ } else {
+ done = 1;
+ }
+ break;
}
brelse(bh);
}
- for (i=0; i<VDS_POS_LENGTH; i++)
- {
- if (vds[i].block)
- {
+ for (i = 0; i < VDS_POS_LENGTH; i++) {
+ if (vds[i].block) {
bh = udf_read_tagged(sb, vds[i].block, vds[i].block, &ident);
- if (i == VDS_POS_PRIMARY_VOL_DESC)
+ if (i == VDS_POS_PRIMARY_VOL_DESC) {
udf_load_pvoldesc(sb, bh);
- else if (i == VDS_POS_LOGICAL_VOL_DESC)
+ } else if (i == VDS_POS_LOGICAL_VOL_DESC) {
udf_load_logicalvol(sb, bh, fileset);
- else if (i == VDS_POS_PARTITION_DESC)
- {
+ } else if (i == VDS_POS_PARTITION_DESC) {
struct buffer_head *bh2 = NULL;
- udf_load_partdesc(sb, bh);
- for (j=vds[i].block+1; j<vds[VDS_POS_TERMINATING_DESC].block; j++)
- {
+ if (udf_load_partdesc(sb, bh)) {
+ brelse(bh);
+ return 1;
+ }
+ for (j = vds[i].block + 1; j < vds[VDS_POS_TERMINATING_DESC].block; j++) {
bh2 = udf_read_tagged(sb, j, j, &ident);
gd = (struct generic_desc *)bh2->b_data;
if (ident == TAG_IDENT_PD)
- udf_load_partdesc(sb, bh2);
+ if (udf_load_partdesc(sb, bh2)) {
+ brelse(bh);
+ brelse(bh2);
+ return 1;
+ }
brelse(bh2);
}
}
@@ -1278,31 +1214,28 @@ udf_process_sequence(struct super_block *sb, long block, long lastblock, kernel_
/*
* udf_check_valid()
*/
-static int
-udf_check_valid(struct super_block *sb, int novrs, int silent)
+static int udf_check_valid(struct super_block *sb, int novrs, int silent)
{
long block;
- if (novrs)
- {
+ if (novrs) {
udf_debug("Validity check skipped because of novrs option\n");
return 0;
}
/* Check that it is NSR02 compliant */
/* Process any "CD-ROM Volume Descriptor Set" (ECMA 167 2/8.3.1) */
- else if ((block = udf_vrs(sb, silent)) == -1)
- {
- udf_debug("Failed to read byte 32768. Assuming open disc. Skipping validity check\n");
+ else if ((block = udf_vrs(sb, silent)) == -1) {
+ udf_debug("Failed to read byte 32768. Assuming open disc. "
+ "Skipping validity check\n");
if (!UDF_SB_LASTBLOCK(sb))
UDF_SB_LASTBLOCK(sb) = udf_get_last_block(sb);
return 0;
- }
- else
+ } else {
return !block;
+ }
}
-static int
-udf_load_partition(struct super_block *sb, kernel_lb_addr *fileset)
+static int udf_load_partition(struct super_block *sb, kernel_lb_addr *fileset)
{
struct anchorVolDescPtr *anchor;
uint16_t ident;
@@ -1314,14 +1247,14 @@ udf_load_partition(struct super_block *sb, kernel_lb_addr *fileset)
return 1;
for (i = 0; i < ARRAY_SIZE(UDF_SB_ANCHOR(sb)); i++) {
- if (UDF_SB_ANCHOR(sb)[i] && (bh = udf_read_tagged(sb,
- UDF_SB_ANCHOR(sb)[i], UDF_SB_ANCHOR(sb)[i], &ident)))
- {
+ if (UDF_SB_ANCHOR(sb)[i] &&
+ (bh = udf_read_tagged(sb, UDF_SB_ANCHOR(sb)[i],
+ UDF_SB_ANCHOR(sb)[i], &ident))) {
anchor = (struct anchorVolDescPtr *)bh->b_data;
/* Locate the main sequence */
- main_s = le32_to_cpu( anchor->mainVolDescSeqExt.extLocation );
- main_e = le32_to_cpu( anchor->mainVolDescSeqExt.extLength );
+ main_s = le32_to_cpu(anchor->mainVolDescSeqExt.extLocation);
+ main_e = le32_to_cpu(anchor->mainVolDescSeqExt.extLength );
main_e = main_e >> sb->s_blocksize_bits;
main_e += main_s;
@@ -1336,8 +1269,7 @@ udf_load_partition(struct super_block *sb, kernel_lb_addr *fileset)
/* Process the main & reserve sequences */
/* responsible for finding the PartitionDesc(s) */
if (!(udf_process_sequence(sb, main_s, main_e, fileset) &&
- udf_process_sequence(sb, reserve_s, reserve_e, fileset)))
- {
+ udf_process_sequence(sb, reserve_s, reserve_e, fileset))) {
break;
}
}
@@ -1349,70 +1281,68 @@ udf_load_partition(struct super_block *sb, kernel_lb_addr *fileset)
} else
udf_debug("Using anchor in block %d\n", UDF_SB_ANCHOR(sb)[i]);
- for (i=0; i<UDF_SB_NUMPARTS(sb); i++)
- {
- switch (UDF_SB_PARTTYPE(sb, i))
- {
- case UDF_VIRTUAL_MAP15:
- case UDF_VIRTUAL_MAP20:
- {
- kernel_lb_addr uninitialized_var(ino);
-
- if (!UDF_SB_LASTBLOCK(sb))
- {
- UDF_SB_LASTBLOCK(sb) = udf_get_last_block(sb);
- udf_find_anchor(sb);
- }
+ for (i = 0; i < UDF_SB_NUMPARTS(sb); i++) {
+ kernel_lb_addr uninitialized_var(ino);
+ switch (UDF_SB_PARTTYPE(sb, i)) {
+ case UDF_VIRTUAL_MAP15:
+ case UDF_VIRTUAL_MAP20:
+ if (!UDF_SB_LASTBLOCK(sb)) {
+ UDF_SB_LASTBLOCK(sb) = udf_get_last_block(sb);
+ udf_find_anchor(sb);
+ }
- if (!UDF_SB_LASTBLOCK(sb))
- {
- udf_debug("Unable to determine Lastblock (For Virtual Partition)\n");
- return 1;
- }
+ if (!UDF_SB_LASTBLOCK(sb)) {
+ udf_debug("Unable to determine Lastblock (For "
+ "Virtual Partition)\n");
+ return 1;
+ }
- for (j=0; j<UDF_SB_NUMPARTS(sb); j++)
- {
- if (j != i &&
- UDF_SB_PARTVSN(sb,i) == UDF_SB_PARTVSN(sb,j) &&
- UDF_SB_PARTNUM(sb,i) == UDF_SB_PARTNUM(sb,j))
- {
- ino.partitionReferenceNum = j;
- ino.logicalBlockNum = UDF_SB_LASTBLOCK(sb) -
- UDF_SB_PARTROOT(sb,j);
- break;
- }
+ for (j = 0; j < UDF_SB_NUMPARTS(sb); j++) {
+ if (j != i && UDF_SB_PARTVSN(sb, i) ==
+ UDF_SB_PARTVSN(sb, j) &&
+ UDF_SB_PARTNUM(sb, i) ==
+ UDF_SB_PARTNUM(sb, j)) {
+ ino.partitionReferenceNum = j;
+ ino.logicalBlockNum =
+ UDF_SB_LASTBLOCK(sb) -
+ UDF_SB_PARTROOT(sb, j);
+ break;
}
+ }
- if (j == UDF_SB_NUMPARTS(sb))
- return 1;
+ if (j == UDF_SB_NUMPARTS(sb))
+ return 1;
- if (!(UDF_SB_VAT(sb) = udf_iget(sb, ino)))
- return 1;
+ if (!(UDF_SB_VAT(sb) = udf_iget(sb, ino)))
+ return 1;
- if (UDF_SB_PARTTYPE(sb,i) == UDF_VIRTUAL_MAP15)
- {
- UDF_SB_TYPEVIRT(sb,i).s_start_offset = udf_ext0_offset(UDF_SB_VAT(sb));
- UDF_SB_TYPEVIRT(sb,i).s_num_entries = (UDF_SB_VAT(sb)->i_size - 36) >> 2;
- }
- else if (UDF_SB_PARTTYPE(sb,i) == UDF_VIRTUAL_MAP20)
- {
- struct buffer_head *bh = NULL;
- uint32_t pos;
+ if (UDF_SB_PARTTYPE(sb, i) == UDF_VIRTUAL_MAP15) {
+ UDF_SB_TYPEVIRT(sb, i).s_start_offset =
+ udf_ext0_offset(UDF_SB_VAT(sb));
+ UDF_SB_TYPEVIRT(sb, i).s_num_entries =
+ (UDF_SB_VAT(sb)->i_size - 36) >> 2;
+ } else if (UDF_SB_PARTTYPE(sb, i) == UDF_VIRTUAL_MAP20) {
+ struct buffer_head *bh = NULL;
+ uint32_t pos;
- pos = udf_block_map(UDF_SB_VAT(sb), 0);
- bh = sb_bread(sb, pos);
- if (!bh)
- return 1;
- UDF_SB_TYPEVIRT(sb,i).s_start_offset =
- le16_to_cpu(((struct virtualAllocationTable20 *)bh->b_data + udf_ext0_offset(UDF_SB_VAT(sb)))->lengthHeader) +
- udf_ext0_offset(UDF_SB_VAT(sb));
- UDF_SB_TYPEVIRT(sb,i).s_num_entries = (UDF_SB_VAT(sb)->i_size -
- UDF_SB_TYPEVIRT(sb,i).s_start_offset) >> 2;
- brelse(bh);
- }
- UDF_SB_PARTROOT(sb,i) = udf_get_pblock(sb, 0, i, 0);
- UDF_SB_PARTLEN(sb,i) = UDF_SB_PARTLEN(sb,ino.partitionReferenceNum);
+ pos = udf_block_map(UDF_SB_VAT(sb), 0);
+ bh = sb_bread(sb, pos);
+ if (!bh)
+ return 1;
+ UDF_SB_TYPEVIRT(sb, i).s_start_offset =
+ le16_to_cpu(((struct
+ virtualAllocationTable20 *)bh->b_data +
+ udf_ext0_offset(UDF_SB_VAT(sb)))->
+ lengthHeader) +
+ udf_ext0_offset(UDF_SB_VAT(sb));
+ UDF_SB_TYPEVIRT(sb, i).s_num_entries =
+ (UDF_SB_VAT(sb)->i_size -
+ UDF_SB_TYPEVIRT(sb, i).s_start_offset) >> 2;
+ brelse(bh);
}
+ UDF_SB_PARTROOT(sb, i) = udf_get_pblock(sb, 0, i, 0);
+ UDF_SB_PARTLEN(sb, i) = UDF_SB_PARTLEN(sb,
+ ino.partitionReferenceNum);
}
}
return 0;
@@ -1420,26 +1350,28 @@ udf_load_partition(struct super_block *sb, kernel_lb_addr *fileset)
static void udf_open_lvid(struct super_block *sb)
{
- if (UDF_SB_LVIDBH(sb))
- {
+ if (UDF_SB_LVIDBH(sb)) {
int i;
kernel_timestamp cpu_time;
UDF_SB_LVIDIU(sb)->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
UDF_SB_LVIDIU(sb)->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
if (udf_time_to_stamp(&cpu_time, CURRENT_TIME))
- UDF_SB_LVID(sb)->recordingDateAndTime = cpu_to_lets(cpu_time);
+ UDF_SB_LVID(sb)->recordingDateAndTime =
+ cpu_to_lets(cpu_time);
UDF_SB_LVID(sb)->integrityType = LVID_INTEGRITY_TYPE_OPEN;
UDF_SB_LVID(sb)->descTag.descCRC =
- cpu_to_le16(udf_crc((char *)UDF_SB_LVID(sb) + sizeof(tag),
- le16_to_cpu(UDF_SB_LVID(sb)->descTag.descCRCLength), 0));
+ cpu_to_le16(udf_crc((char *)UDF_SB_LVID(sb) + sizeof(tag),
+ le16_to_cpu(UDF_SB_LVID(sb)->descTag.
+ descCRCLength), 0));
UDF_SB_LVID(sb)->descTag.tagChecksum = 0;
- for (i=0; i<16; i++)
+ for (i = 0; i < 16; i++)
if (i != 4)
UDF_SB_LVID(sb)->descTag.tagChecksum +=
- ((uint8_t *)&(UDF_SB_LVID(sb)->descTag))[i];
+ ((uint8_t *) &
+ (UDF_SB_LVID(sb)->descTag))[i];
mark_buffer_dirty(UDF_SB_LVIDBH(sb));
}
@@ -1447,12 +1379,11 @@ static void udf_open_lvid(struct super_block *sb)
static void udf_close_lvid(struct super_block *sb)
{
- if (UDF_SB_LVIDBH(sb) &&
- UDF_SB_LVID(sb)->integrityType == LVID_INTEGRITY_TYPE_OPEN)
- {
- int i;
- kernel_timestamp cpu_time;
+ kernel_timestamp cpu_time;
+ int i;
+ if (UDF_SB_LVIDBH(sb) &&
+ UDF_SB_LVID(sb)->integrityType == LVID_INTEGRITY_TYPE_OPEN) {
UDF_SB_LVIDIU(sb)->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
UDF_SB_LVIDIU(sb)->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
if (udf_time_to_stamp(&cpu_time, CURRENT_TIME))
@@ -1467,10 +1398,10 @@ static void udf_close_lvid(struct super_block *sb)
UDF_SB_LVID(sb)->descTag.descCRC =
cpu_to_le16(udf_crc((char *)UDF_SB_LVID(sb) + sizeof(tag),
- le16_to_cpu(UDF_SB_LVID(sb)->descTag.descCRCLength), 0));
+ le16_to_cpu(UDF_SB_LVID(sb)->descTag.descCRCLength), 0));
UDF_SB_LVID(sb)->descTag.tagChecksum = 0;
- for (i=0; i<16; i++)
+ for (i = 0; i < 16; i++)
if (i != 4)
UDF_SB_LVID(sb)->descTag.tagChecksum +=
((uint8_t *)&(UDF_SB_LVID(sb)->descTag))[i];
@@ -1498,7 +1429,7 @@ static void udf_close_lvid(struct super_block *sb)
static int udf_fill_super(struct super_block *sb, void *options, int silent)
{
int i;
- struct inode *inode=NULL;
+ struct inode *inode = NULL;
struct udf_options uopt;
kernel_lb_addr rootdir, fileset;
struct udf_sb_info *sbi;
@@ -1511,6 +1442,7 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
sbi = kmalloc(sizeof(struct udf_sb_info), GFP_KERNEL);
if (!sbi)
return -ENOMEM;
+
sb->s_fs_info = sbi;
memset(UDF_SB(sb), 0x00, sizeof(struct udf_sb_info));
@@ -1520,15 +1452,13 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
goto error_out;
if (uopt.flags & (1 << UDF_FLAG_UTF8) &&
- uopt.flags & (1 << UDF_FLAG_NLS_MAP))
- {
+ uopt.flags & (1 << UDF_FLAG_NLS_MAP)) {
udf_error(sb, "udf_read_super",
- "utf8 cannot be combined with iocharset\n");
+ "utf8 cannot be combined with iocharset\n");
goto error_out;
}
#ifdef CONFIG_UDF_NLS
- if ((uopt.flags & (1 << UDF_FLAG_NLS_MAP)) && !uopt.nls_map)
- {
+ if ((uopt.flags & (1 << UDF_FLAG_NLS_MAP)) && !uopt.nls_map) {
uopt.nls_map = load_nls_default();
if (!uopt.nls_map)
uopt.flags &= ~(1 << UDF_FLAG_NLS_MAP);
@@ -1552,7 +1482,7 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
if (!udf_set_blocksize(sb, uopt.blocksize))
goto error_out;
- if ( uopt.session == 0xFFFFFFFF )
+ if (uopt.session == 0xFFFFFFFF)
UDF_SB_SESSION(sb) = udf_get_last_session(sb);
else
UDF_SB_SESSION(sb) = uopt.session;
@@ -1564,10 +1494,9 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
UDF_SB_ANCHOR(sb)[2] = uopt.anchor;
UDF_SB_ANCHOR(sb)[3] = 256;
- if (udf_check_valid(sb, uopt.novrs, silent)) /* read volume recognition sequences */
- {
+ if (udf_check_valid(sb, uopt.novrs, silent)) { /* read volume recognition sequences */
printk("UDF-fs: No VRS found\n");
- goto error_out;
+ goto error_out;
}
udf_find_anchor(sb);
@@ -1579,29 +1508,24 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
sb->s_magic = UDF_SUPER_MAGIC;
sb->s_time_gran = 1000;
- if (udf_load_partition(sb, &fileset))
- {
+ if (udf_load_partition(sb, &fileset)) {
printk("UDF-fs: No partition found (1)\n");
goto error_out;
}
udf_debug("Lastblock=%d\n", UDF_SB_LASTBLOCK(sb));
- if ( UDF_SB_LVIDBH(sb) )
- {
+ if (UDF_SB_LVIDBH(sb)) {
uint16_t minUDFReadRev = le16_to_cpu(UDF_SB_LVIDIU(sb)->minUDFReadRev);
uint16_t minUDFWriteRev = le16_to_cpu(UDF_SB_LVIDIU(sb)->minUDFWriteRev);
/* uint16_t maxUDFWriteRev = le16_to_cpu(UDF_SB_LVIDIU(sb)->maxUDFWriteRev); */
- if (minUDFReadRev > UDF_MAX_READ_VERSION)
- {
+ if (minUDFReadRev > UDF_MAX_READ_VERSION) {
printk("UDF-fs: minUDFReadRev=%x (max is %x)\n",
- le16_to_cpu(UDF_SB_LVIDIU(sb)->minUDFReadRev),
- UDF_MAX_READ_VERSION);
+ le16_to_cpu(UDF_SB_LVIDIU(sb)->minUDFReadRev),
+ UDF_MAX_READ_VERSION);
goto error_out;
- }
- else if (minUDFWriteRev > UDF_MAX_WRITE_VERSION)
- {
+ } else if (minUDFWriteRev > UDF_MAX_WRITE_VERSION) {
sb->s_flags |= MS_RDONLY;
}
@@ -1613,8 +1537,7 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
UDF_SET_FLAG(sb, UDF_FLAG_USE_STREAMS);
}
- if ( !UDF_SB_NUMPARTS(sb) )
- {
+ if (!UDF_SB_NUMPARTS(sb)) {
printk("UDF-fs: No partition found (2)\n");
goto error_out;
}
@@ -1624,20 +1547,19 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
sb->s_flags |= MS_RDONLY;
}
- if ( udf_find_fileset(sb, &fileset, &rootdir) )
- {
+ if (udf_find_fileset(sb, &fileset, &rootdir)) {
printk("UDF-fs: No fileset found\n");
goto error_out;
}
- if (!silent)
- {
+ if (!silent) {
kernel_timestamp ts;
udf_time_to_stamp(&ts, UDF_SB_RECORDTIME(sb));
- udf_info("UDF %s (%s) Mounting volume '%s', timestamp %04u/%02u/%02u %02u:%02u (%x)\n",
- UDFFS_VERSION, UDFFS_DATE,
- UDF_SB_VOLIDENT(sb), ts.year, ts.month, ts.day, ts.hour, ts.minute,
- ts.typeAndTimezone);
+ udf_info("UDF %s (%s) Mounting volume '%s', "
+ "timestamp %04u/%02u/%02u %02u:%02u (%x)\n",
+ UDFFS_VERSION, UDFFS_DATE,
+ UDF_SB_VOLIDENT(sb), ts.year, ts.month, ts.day, ts.hour, ts.minute,
+ ts.typeAndTimezone);
}
if (!(sb->s_flags & MS_RDONLY))
udf_open_lvid(sb);
@@ -1645,18 +1567,16 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
/* Assign the root inode */
/* assign inodes by physical block number */
/* perhaps it's not extensible enough, but for now ... */
- inode = udf_iget(sb, rootdir);
- if (!inode)
- {
+ inode = udf_iget(sb, rootdir);
+ if (!inode) {
printk("UDF-fs: Error in udf_iget, block=%d, partition=%d\n",
- rootdir.logicalBlockNum, rootdir.partitionReferenceNum);
+ rootdir.logicalBlockNum, rootdir.partitionReferenceNum);
goto error_out;
}
/* Allocate a dentry for the root inode */
sb->s_root = d_alloc_root(inode);
- if (!sb->s_root)
- {
+ if (!sb->s_root) {
printk("UDF-fs: Couldn't allocate root dentry\n");
iput(inode);
goto error_out;
@@ -1667,19 +1587,17 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
error_out:
if (UDF_SB_VAT(sb))
iput(UDF_SB_VAT(sb));
- if (UDF_SB_NUMPARTS(sb))
- {
+ if (UDF_SB_NUMPARTS(sb)) {
if (UDF_SB_PARTFLAGS(sb, UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_UNALLOC_TABLE)
iput(UDF_SB_PARTMAPS(sb)[UDF_SB_PARTITION(sb)].s_uspace.s_table);
if (UDF_SB_PARTFLAGS(sb, UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_FREED_TABLE)
iput(UDF_SB_PARTMAPS(sb)[UDF_SB_PARTITION(sb)].s_fspace.s_table);
if (UDF_SB_PARTFLAGS(sb, UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_UNALLOC_BITMAP)
- UDF_SB_FREE_BITMAP(sb,UDF_SB_PARTITION(sb),s_uspace);
+ UDF_SB_FREE_BITMAP(sb,UDF_SB_PARTITION(sb), s_uspace);
if (UDF_SB_PARTFLAGS(sb, UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_FREED_BITMAP)
- UDF_SB_FREE_BITMAP(sb,UDF_SB_PARTITION(sb),s_fspace);
- if (UDF_SB_PARTTYPE(sb, UDF_SB_PARTITION(sb)) == UDF_SPARABLE_MAP15)
- {
- for (i=0; i<4; i++)
+ UDF_SB_FREE_BITMAP(sb,UDF_SB_PARTITION(sb), s_fspace);
+ if (UDF_SB_PARTTYPE(sb, UDF_SB_PARTITION(sb)) == UDF_SPARABLE_MAP15) {
+ for (i = 0; i < 4; i++)
brelse(UDF_SB_TYPESPAR(sb, UDF_SB_PARTITION(sb)).s_spar_map[i]);
}
}
@@ -1693,16 +1611,16 @@ error_out:
UDF_SB_FREE(sb);
kfree(sbi);
sb->s_fs_info = NULL;
+
return -EINVAL;
}
void udf_error(struct super_block *sb, const char *function,
- const char *fmt, ...)
+ const char *fmt, ...)
{
va_list args;
- if (!(sb->s_flags & MS_RDONLY))
- {
+ if (!(sb->s_flags & MS_RDONLY)) {
/* mark sb error */
sb->s_dirt = 1;
}
@@ -1714,15 +1632,15 @@ void udf_error(struct super_block *sb, const char *function,
}
void udf_warning(struct super_block *sb, const char *function,
- const char *fmt, ...)
+ const char *fmt, ...)
{
va_list args;
- va_start (args, fmt);
+ va_start(args, fmt);
vsnprintf(error_buf, sizeof(error_buf), fmt, args);
va_end(args);
printk(KERN_WARNING "UDF-fs warning (device %s): %s: %s\n",
- sb->s_id, function, error_buf);
+ sb->s_id, function, error_buf);
}
/*
@@ -1738,26 +1656,23 @@ void udf_warning(struct super_block *sb, const char *function,
* July 1, 1997 - Andrew E. Mileski
* Written, tested, and released.
*/
-static void
-udf_put_super(struct super_block *sb)
+static void udf_put_super(struct super_block *sb)
{
int i;
if (UDF_SB_VAT(sb))
iput(UDF_SB_VAT(sb));
- if (UDF_SB_NUMPARTS(sb))
- {
+ if (UDF_SB_NUMPARTS(sb)) {
if (UDF_SB_PARTFLAGS(sb, UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_UNALLOC_TABLE)
iput(UDF_SB_PARTMAPS(sb)[UDF_SB_PARTITION(sb)].s_uspace.s_table);
if (UDF_SB_PARTFLAGS(sb, UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_FREED_TABLE)
iput(UDF_SB_PARTMAPS(sb)[UDF_SB_PARTITION(sb)].s_fspace.s_table);
if (UDF_SB_PARTFLAGS(sb, UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_UNALLOC_BITMAP)
- UDF_SB_FREE_BITMAP(sb,UDF_SB_PARTITION(sb),s_uspace);
+ UDF_SB_FREE_BITMAP(sb,UDF_SB_PARTITION(sb), s_uspace);
if (UDF_SB_PARTFLAGS(sb, UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_FREED_BITMAP)
- UDF_SB_FREE_BITMAP(sb,UDF_SB_PARTITION(sb),s_fspace);
- if (UDF_SB_PARTTYPE(sb, UDF_SB_PARTITION(sb)) == UDF_SPARABLE_MAP15)
- {
- for (i=0; i<4; i++)
+ UDF_SB_FREE_BITMAP(sb,UDF_SB_PARTITION(sb), s_fspace);
+ if (UDF_SB_PARTTYPE(sb, UDF_SB_PARTITION(sb)) == UDF_SPARABLE_MAP15) {
+ for (i = 0; i < 4; i++)
brelse(UDF_SB_TYPESPAR(sb, UDF_SB_PARTITION(sb)).s_spar_map[i]);
}
}
@@ -1786,8 +1701,7 @@ udf_put_super(struct super_block *sb)
* July 1, 1997 - Andrew E. Mileski
* Written, tested, and released.
*/
-static int
-udf_statfs(struct dentry *dentry, struct kstatfs *buf)
+static int udf_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct super_block *sb = dentry->d_sb;
@@ -1797,11 +1711,11 @@ udf_statfs(struct dentry *dentry, struct kstatfs *buf)
buf->f_bfree = udf_count_free(sb);
buf->f_bavail = buf->f_bfree;
buf->f_files = (UDF_SB_LVIDBH(sb) ?
- (le32_to_cpu(UDF_SB_LVIDIU(sb)->numFiles) +
- le32_to_cpu(UDF_SB_LVIDIU(sb)->numDirs)) : 0) + buf->f_bfree;
+ (le32_to_cpu(UDF_SB_LVIDIU(sb)->numFiles) +
+ le32_to_cpu(UDF_SB_LVIDIU(sb)->numDirs)) : 0) + buf->f_bfree;
buf->f_ffree = buf->f_bfree;
/* __kernel_fsid_t f_fsid */
- buf->f_namelen = UDF_NAME_LEN-2;
+ buf->f_namelen = UDF_NAME_LEN - 2;
return 0;
}
@@ -1810,8 +1724,7 @@ static unsigned char udf_bitmap_lookup[16] = {
0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4
};
-static unsigned int
-udf_count_free_bitmap(struct super_block *sb, struct udf_bitmap *bitmap)
+static unsigned int udf_count_free_bitmap(struct super_block *sb, struct udf_bitmap *bitmap)
{
struct buffer_head *bh = NULL;
unsigned int accum = 0;
@@ -1830,13 +1743,10 @@ udf_count_free_bitmap(struct super_block *sb, struct udf_bitmap *bitmap)
loc.partitionReferenceNum = UDF_SB_PARTITION(sb);
bh = udf_read_ptagged(sb, loc, 0, &ident);
- if (!bh)
- {
+ if (!bh) {
printk(KERN_ERR "udf: udf_count_free failed\n");
goto out;
- }
- else if (ident != TAG_IDENT_SBD)
- {
+ } else if (ident != TAG_IDENT_SBD) {
brelse(bh);
printk(KERN_ERR "udf: udf_count_free failed\n");
goto out;
@@ -1847,23 +1757,19 @@ udf_count_free_bitmap(struct super_block *sb, struct udf_bitmap *bitmap)
index = sizeof(struct spaceBitmapDesc); /* offset in first block only */
ptr = (uint8_t *)bh->b_data;
- while ( bytes > 0 )
- {
- while ((bytes > 0) && (index < sb->s_blocksize))
- {
+ while (bytes > 0) {
+ while ((bytes > 0) && (index < sb->s_blocksize)) {
value = ptr[index];
- accum += udf_bitmap_lookup[ value & 0x0f ];
- accum += udf_bitmap_lookup[ value >> 4 ];
+ accum += udf_bitmap_lookup[value & 0x0f];
+ accum += udf_bitmap_lookup[value >> 4];
index++;
bytes--;
}
- if ( bytes )
- {
+ if (bytes) {
brelse(bh);
newblock = udf_get_lb_pblock(sb, loc, ++block);
bh = udf_tread(sb, newblock);
- if (!bh)
- {
+ if (!bh) {
udf_debug("read failed\n");
goto out;
}
@@ -1879,8 +1785,7 @@ out:
return accum;
}
-static unsigned int
-udf_count_free_table(struct super_block *sb, struct inode * table)
+static unsigned int udf_count_free_table(struct super_block *sb, struct inode *table)
{
unsigned int accum = 0;
uint32_t elen;
@@ -1894,26 +1799,23 @@ udf_count_free_table(struct super_block *sb, struct inode * table)
epos.offset = sizeof(struct unallocSpaceEntry);
epos.bh = NULL;
- while ((etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1)
+ while ((etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
accum += (elen >> table->i_sb->s_blocksize_bits);
+ }
brelse(epos.bh);
unlock_kernel();
return accum;
}
-
-static unsigned int
-udf_count_free(struct super_block *sb)
+
+static unsigned int udf_count_free(struct super_block *sb)
{
unsigned int accum = 0;
- if (UDF_SB_LVIDBH(sb))
- {
- if (le32_to_cpu(UDF_SB_LVID(sb)->numOfPartitions) > UDF_SB_PARTITION(sb))
- {
+ if (UDF_SB_LVIDBH(sb)) {
+ if (le32_to_cpu(UDF_SB_LVID(sb)->numOfPartitions) > UDF_SB_PARTITION(sb)) {
accum = le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[UDF_SB_PARTITION(sb)]);
-
if (accum == 0xFFFFFFFF)
accum = 0;
}
@@ -1922,28 +1824,24 @@ udf_count_free(struct super_block *sb)
if (accum)
return accum;
- if (UDF_SB_PARTFLAGS(sb,UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_UNALLOC_BITMAP)
- {
+ if (UDF_SB_PARTFLAGS(sb,UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_UNALLOC_BITMAP) {
accum += udf_count_free_bitmap(sb,
- UDF_SB_PARTMAPS(sb)[UDF_SB_PARTITION(sb)].s_uspace.s_bitmap);
+ UDF_SB_PARTMAPS(sb)[UDF_SB_PARTITION(sb)].s_uspace.s_bitmap);
}
- if (UDF_SB_PARTFLAGS(sb,UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_FREED_BITMAP)
- {
+ if (UDF_SB_PARTFLAGS(sb,UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_FREED_BITMAP) {
accum += udf_count_free_bitmap(sb,
- UDF_SB_PARTMAPS(sb)[UDF_SB_PARTITION(sb)].s_fspace.s_bitmap);
+ UDF_SB_PARTMAPS(sb)[UDF_SB_PARTITION(sb)].s_fspace.s_bitmap);
}
if (accum)
return accum;
- if (UDF_SB_PARTFLAGS(sb,UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_UNALLOC_TABLE)
- {
+ if (UDF_SB_PARTFLAGS(sb,UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_UNALLOC_TABLE) {
accum += udf_count_free_table(sb,
- UDF_SB_PARTMAPS(sb)[UDF_SB_PARTITION(sb)].s_uspace.s_table);
+ UDF_SB_PARTMAPS(sb)[UDF_SB_PARTITION(sb)].s_uspace.s_table);
}
- if (UDF_SB_PARTFLAGS(sb,UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_FREED_TABLE)
- {
+ if (UDF_SB_PARTFLAGS(sb,UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_FREED_TABLE) {
accum += udf_count_free_table(sb,
- UDF_SB_PARTMAPS(sb)[UDF_SB_PARTITION(sb)].s_fspace.s_table);
+ UDF_SB_PARTMAPS(sb)[UDF_SB_PARTITION(sb)].s_fspace.s_table);
}
return accum;
diff --git a/fs/udf/symlink.c b/fs/udf/symlink.c
index 12613b6..e6f933d 100644
--- a/fs/udf/symlink.c
+++ b/fs/udf/symlink.c
@@ -11,7 +11,7 @@
* Each contributing author retains all rights to their own work.
*
* (C) 1998-2001 Ben Fennema
- * (C) 1999 Stelias Computing Inc
+ * (C) 1999 Stelias Computing Inc
*
* HISTORY
*
@@ -39,35 +39,33 @@ static void udf_pc_to_char(struct super_block *sb, char *from, int fromlen, char
int elen = 0;
char *p = to;
- while (elen < fromlen)
- {
+ while (elen < fromlen) {
pc = (struct pathComponent *)(from + elen);
- switch (pc->componentType)
- {
- case 1:
- if (pc->lengthComponentIdent == 0)
- {
- p = to;
- *p++ = '/';
- }
- break;
- case 3:
- memcpy(p, "../", 3);
- p += 3;
- break;
- case 4:
- memcpy(p, "./", 2);
- p += 2;
- /* that would be . - just ignore */
- break;
- case 5:
- p += udf_get_filename(sb, pc->componentIdent, p, pc->lengthComponentIdent);
+ switch (pc->componentType) {
+ case 1:
+ if (pc->lengthComponentIdent == 0) {
+ p = to;
*p++ = '/';
- break;
+ }
+ break;
+ case 3:
+ memcpy(p, "../", 3);
+ p += 3;
+ break;
+ case 4:
+ memcpy(p, "./", 2);
+ p += 2;
+ /* that would be . - just ignore */
+ break;
+ case 5:
+ p += udf_get_filename(sb, pc->componentIdent, p,
+ pc->lengthComponentIdent);
+ *p++ = '/';
+ break;
}
elen += sizeof(struct pathComponent) + pc->lengthComponentIdent;
}
- if (p > to+1)
+ if (p > to + 1)
p[-1] = '\0';
else
p[0] = '\0';
@@ -82,10 +80,9 @@ static int udf_symlink_filler(struct file *file, struct page *page)
char *p = kmap(page);
lock_kernel();
- if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
+ if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB) {
symlink = UDF_I_DATA(inode) + UDF_I_LENEATTR(inode);
- else
- {
+ } else {
bh = sb_bread(inode->i_sb, udf_block_map(inode, 0));
if (!bh)
@@ -102,6 +99,7 @@ static int udf_symlink_filler(struct file *file, struct page *page)
kunmap(page);
unlock_page(page);
return 0;
+
out:
unlock_kernel();
SetPageError(page);
diff --git a/fs/udf/truncate.c b/fs/udf/truncate.c
index 60d2776..7fc3912 100644
--- a/fs/udf/truncate.c
+++ b/fs/udf/truncate.c
@@ -28,35 +28,36 @@
#include "udf_i.h"
#include "udf_sb.h"
-static void extent_trunc(struct inode * inode, struct extent_position *epos,
- kernel_lb_addr eloc, int8_t etype, uint32_t elen, uint32_t nelen)
+static void extent_trunc(struct inode *inode, struct extent_position *epos,
+ kernel_lb_addr eloc, int8_t etype, uint32_t elen,
+ uint32_t nelen)
{
- kernel_lb_addr neloc = { 0, 0 };
- int last_block = (elen + inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits;
- int first_block = (nelen + inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits;
+ kernel_lb_addr neloc = {};
+ int last_block = (elen + inode->i_sb->s_blocksize - 1) >>
+ inode->i_sb->s_blocksize_bits;
+ int first_block = (nelen + inode->i_sb->s_blocksize - 1) >>
+ inode->i_sb->s_blocksize_bits;
- if (nelen)
- {
- if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30))
- {
- udf_free_blocks(inode->i_sb, inode, eloc, 0, last_block);
+ if (nelen) {
+ if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) {
+ udf_free_blocks(inode->i_sb, inode, eloc, 0,
+ last_block);
etype = (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30);
- }
- else
+ } else
neloc = eloc;
nelen = (etype << 30) | nelen;
}
- if (elen != nelen)
- {
+ if (elen != nelen) {
udf_write_aext(inode, epos, neloc, nelen, 0);
- if (last_block - first_block > 0)
- {
+ if (last_block - first_block > 0) {
if (etype == (EXT_RECORDED_ALLOCATED >> 30))
mark_inode_dirty(inode);
if (etype != (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
- udf_free_blocks(inode->i_sb, inode, eloc, first_block, last_block - first_block);
+ udf_free_blocks(inode->i_sb, inode, eloc,
+ first_block,
+ last_block - first_block);
}
}
}
@@ -67,7 +68,7 @@ static void extent_trunc(struct inode * inode, struct extent_position *epos,
*/
void udf_truncate_tail_extent(struct inode *inode)
{
- struct extent_position epos = { NULL, 0, {0, 0}};
+ struct extent_position epos = {};
kernel_lb_addr eloc;
uint32_t elen, nelen;
uint64_t lbcount = 0;
@@ -89,8 +90,7 @@ void udf_truncate_tail_extent(struct inode *inode)
BUG();
/* Find the last extent in the file */
- while ((netype = udf_next_aext(inode, &epos, &eloc, &elen, 1)) != -1)
- {
+ while ((netype = udf_next_aext(inode, &epos, &eloc, &elen, 1)) != -1) {
etype = netype;
lbcount += elen;
if (lbcount > inode->i_size) {
@@ -123,7 +123,7 @@ void udf_truncate_tail_extent(struct inode *inode)
void udf_discard_prealloc(struct inode *inode)
{
- struct extent_position epos = { NULL, 0, {0, 0}};
+ struct extent_position epos = { NULL, 0, {0, 0} };
kernel_lb_addr eloc;
uint32_t elen;
uint64_t lbcount = 0;
@@ -131,7 +131,7 @@ void udf_discard_prealloc(struct inode *inode)
int adsize;
if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB ||
- inode->i_size == UDF_I_LENEXTENTS(inode))
+ inode->i_size == UDF_I_LENEXTENTS(inode))
return;
if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT)
@@ -153,15 +153,21 @@ void udf_discard_prealloc(struct inode *inode)
lbcount -= elen;
extent_trunc(inode, &epos, eloc, etype, elen, 0);
if (!epos.bh) {
- UDF_I_LENALLOC(inode) = epos.offset - udf_file_entry_alloc_offset(inode);
+ UDF_I_LENALLOC(inode) =
+ epos.offset - udf_file_entry_alloc_offset(inode);
mark_inode_dirty(inode);
} else {
- struct allocExtDesc *aed = (struct allocExtDesc *)(epos.bh->b_data);
- aed->lengthAllocDescs = cpu_to_le32(epos.offset - sizeof(struct allocExtDesc));
- if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
+ struct allocExtDesc *aed =
+ (struct allocExtDesc *)(epos.bh->b_data);
+ aed->lengthAllocDescs =
+ cpu_to_le32(epos.offset -
+ sizeof(struct allocExtDesc));
+ if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
+ UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
udf_update_tag(epos.bh->b_data, epos.offset);
else
- udf_update_tag(epos.bh->b_data, sizeof(struct allocExtDesc));
+ udf_update_tag(epos.bh->b_data,
+ sizeof(struct allocExtDesc));
mark_buffer_dirty_inode(epos.bh, inode);
}
}
@@ -171,10 +177,10 @@ void udf_discard_prealloc(struct inode *inode)
brelse(epos.bh);
}
-void udf_truncate_extents(struct inode * inode)
+void udf_truncate_extents(struct inode *inode)
{
struct extent_position epos;
- kernel_lb_addr eloc, neloc = { 0, 0 };
+ kernel_lb_addr eloc, neloc = {};
uint32_t elen, nelen = 0, indirect_ext_len = 0, lenalloc;
int8_t etype;
struct super_block *sb = inode->i_sb;
@@ -190,9 +196,9 @@ void udf_truncate_extents(struct inode * inode)
BUG();
etype = inode_bmap(inode, first_block, &epos, &eloc, &elen, &offset);
- byte_offset = (offset << sb->s_blocksize_bits) + (inode->i_size & (sb->s_blocksize-1));
- if (etype != -1)
- {
+ byte_offset = (offset << sb->s_blocksize_bits) +
+ (inode->i_size & (sb->s_blocksize - 1));
+ if (etype != -1) {
epos.offset -= adsize;
extent_trunc(inode, &epos, eloc, etype, elen, byte_offset);
epos.offset += adsize;
@@ -206,35 +212,33 @@ void udf_truncate_extents(struct inode * inode)
else
lenalloc -= sizeof(struct allocExtDesc);
- while ((etype = udf_current_aext(inode, &epos, &eloc, &elen, 0)) != -1)
- {
- if (etype == (EXT_NEXT_EXTENT_ALLOCDECS >> 30))
- {
+ while ((etype = udf_current_aext(inode, &epos, &eloc, &elen, 0)) != -1) {
+ if (etype == (EXT_NEXT_EXTENT_ALLOCDECS >> 30)) {
udf_write_aext(inode, &epos, neloc, nelen, 0);
- if (indirect_ext_len)
- {
+ if (indirect_ext_len) {
/* We managed to free all extents in the
* indirect extent - free it too */
if (!epos.bh)
BUG();
- udf_free_blocks(sb, inode, epos.block, 0, indirect_ext_len);
- }
- else
- {
- if (!epos.bh)
- {
+ udf_free_blocks(sb, inode, epos.block,
+ 0, indirect_ext_len);
+ } else {
+ if (!epos.bh) {
UDF_I_LENALLOC(inode) = lenalloc;
mark_inode_dirty(inode);
- }
- else
- {
- struct allocExtDesc *aed = (struct allocExtDesc *)(epos.bh->b_data);
- aed->lengthAllocDescs = cpu_to_le32(lenalloc);
- if (!UDF_QUERY_FLAG(sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(sb) >= 0x0201)
- udf_update_tag(epos.bh->b_data, lenalloc +
- sizeof(struct allocExtDesc));
+ } else {
+ struct allocExtDesc *aed =
+ (struct allocExtDesc *)(epos.bh->b_data);
+ aed->lengthAllocDescs =
+ cpu_to_le32(lenalloc);
+ if (!UDF_QUERY_FLAG(sb, UDF_FLAG_STRICT) ||
+ UDF_SB_UDFREV(sb) >= 0x0201)
+ udf_update_tag(epos.bh->b_data,
+ lenalloc +
+ sizeof(struct allocExtDesc));
else
- udf_update_tag(epos.bh->b_data, sizeof(struct allocExtDesc));
+ udf_update_tag(epos.bh->b_data,
+ sizeof(struct allocExtDesc));
mark_buffer_dirty_inode(epos.bh, inode);
}
}
@@ -243,49 +247,41 @@ void udf_truncate_extents(struct inode * inode)
epos.block = eloc;
epos.bh = udf_tread(sb, udf_get_lb_pblock(sb, eloc, 0));
if (elen)
- indirect_ext_len = (elen +
- sb->s_blocksize - 1) >>
+ indirect_ext_len = (elen + sb->s_blocksize -1) >>
sb->s_blocksize_bits;
else
indirect_ext_len = 1;
- }
- else
- {
+ } else {
extent_trunc(inode, &epos, eloc, etype, elen, 0);
epos.offset += adsize;
}
}
- if (indirect_ext_len)
- {
+ if (indirect_ext_len) {
if (!epos.bh)
BUG();
- udf_free_blocks(sb, inode, epos.block, 0, indirect_ext_len);
- }
- else
- {
- if (!epos.bh)
- {
+ udf_free_blocks(sb, inode, epos.block, 0,
+ indirect_ext_len);
+ } else {
+ if (!epos.bh) {
UDF_I_LENALLOC(inode) = lenalloc;
mark_inode_dirty(inode);
- }
- else
- {
- struct allocExtDesc *aed = (struct allocExtDesc *)(epos.bh->b_data);
+ } else {
+ struct allocExtDesc *aed =
+ (struct allocExtDesc *)(epos.bh->b_data);
aed->lengthAllocDescs = cpu_to_le32(lenalloc);
- if (!UDF_QUERY_FLAG(sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(sb) >= 0x0201)
- udf_update_tag(epos.bh->b_data, lenalloc +
- sizeof(struct allocExtDesc));
+ if (!UDF_QUERY_FLAG(sb, UDF_FLAG_STRICT) ||
+ UDF_SB_UDFREV(sb) >= 0x0201)
+ udf_update_tag(epos.bh->b_data,
+ lenalloc + sizeof(struct allocExtDesc));
else
- udf_update_tag(epos.bh->b_data, sizeof(struct allocExtDesc));
+ udf_update_tag(epos.bh->b_data,
+ sizeof(struct allocExtDesc));
mark_buffer_dirty_inode(epos.bh, inode);
}
}
- }
- else if (inode->i_size)
- {
- if (byte_offset)
- {
+ } else if (inode->i_size) {
+ if (byte_offset) {
kernel_long_ad extent;
/*
@@ -293,21 +289,23 @@ void udf_truncate_extents(struct inode * inode)
* no extent above inode->i_size => truncate is
* extending the file by 'offset' blocks.
*/
- if ((!epos.bh && epos.offset == udf_file_entry_alloc_offset(inode)) ||
+ if ((!epos.bh &&
+ epos.offset == udf_file_entry_alloc_offset(inode)) ||
(epos.bh && epos.offset == sizeof(struct allocExtDesc))) {
/* File has no extents at all or has empty last
* indirect extent! Create a fake extent... */
extent.extLocation.logicalBlockNum = 0;
extent.extLocation.partitionReferenceNum = 0;
extent.extLength = EXT_NOT_RECORDED_NOT_ALLOCATED;
- }
- else {
+ } else {
epos.offset -= adsize;
etype = udf_next_aext(inode, &epos,
- &extent.extLocation, &extent.extLength, 0);
+ &extent.extLocation,
+ &extent.extLength, 0);
extent.extLength |= etype << 30;
}
- udf_extend_file(inode, &epos, &extent, offset+((inode->i_size & (sb->s_blocksize-1)) != 0));
+ udf_extend_file(inode, &epos, &extent,
+ offset + ((inode->i_size & (sb->s_blocksize - 1)) != 0));
}
}
UDF_I_LENEXTENTS(inode) = inode->i_size;
diff --git a/fs/udf/udf_sb.h b/fs/udf/udf_sb.h
index 3b2e6c8..3c29820 100644
--- a/fs/udf/udf_sb.h
+++ b/fs/udf/udf_sb.h
@@ -24,6 +24,8 @@
#define UDF_FLAG_UID_IGNORE 12 /* use sb uid instead of on disk uid */
#define UDF_FLAG_GID_FORGET 13
#define UDF_FLAG_GID_IGNORE 14
+#define UDF_FLAG_UID_SET 15
+#define UDF_FLAG_GID_SET 16
#define UDF_PART_FLAG_UNALLOC_BITMAP 0x0001
#define UDF_PART_FLAG_UNALLOC_TABLE 0x0002
@@ -41,8 +43,7 @@ static inline struct udf_sb_info *UDF_SB(struct super_block *sb)
#define UDF_SB_FREE(X)\
{\
- if (UDF_SB(X))\
- {\
+ if (UDF_SB(X)) {\
kfree(UDF_SB_PARTMAPS(X));\
UDF_SB_PARTMAPS(X) = NULL;\
}\
@@ -51,13 +52,10 @@ static inline struct udf_sb_info *UDF_SB(struct super_block *sb)
#define UDF_SB_ALLOC_PARTMAPS(X,Y)\
{\
UDF_SB_PARTMAPS(X) = kmalloc(sizeof(struct udf_part_map) * Y, GFP_KERNEL);\
- if (UDF_SB_PARTMAPS(X) != NULL)\
- {\
+ if (UDF_SB_PARTMAPS(X) != NULL) {\
UDF_SB_NUMPARTS(X) = Y;\
memset(UDF_SB_PARTMAPS(X), 0x00, sizeof(struct udf_part_map) * Y);\
- }\
- else\
- {\
+ } else {\
UDF_SB_NUMPARTS(X) = 0;\
udf_error(X, __FUNCTION__, "Unable to allocate space for %d partition maps", Y);\
}\
@@ -72,15 +70,12 @@ static inline struct udf_sb_info *UDF_SB(struct super_block *sb)
UDF_SB_PARTMAPS(X)[(Y)].Z.s_bitmap = kmalloc(size, GFP_KERNEL);\
else\
UDF_SB_PARTMAPS(X)[(Y)].Z.s_bitmap = vmalloc(size);\
- if (UDF_SB_PARTMAPS(X)[(Y)].Z.s_bitmap != NULL)\
- {\
+ if (UDF_SB_PARTMAPS(X)[(Y)].Z.s_bitmap != NULL) {\
memset(UDF_SB_PARTMAPS(X)[(Y)].Z.s_bitmap, 0x00, size);\
UDF_SB_PARTMAPS(X)[(Y)].Z.s_bitmap->s_block_bitmap =\
(struct buffer_head **)(UDF_SB_PARTMAPS(X)[(Y)].Z.s_bitmap + 1);\
UDF_SB_PARTMAPS(X)[(Y)].Z.s_bitmap->s_nr_groups = nr_groups;\
- }\
- else\
- {\
+ } else {\
udf_error(X, __FUNCTION__, "Unable to allocate space for bitmap and %d buffer_head pointers", nr_groups);\
}\
}
@@ -90,8 +85,7 @@ static inline struct udf_sb_info *UDF_SB(struct super_block *sb)
int i;\
int nr_groups = UDF_SB_BITMAP_NR_GROUPS(X,Y,Z);\
int size = sizeof(struct udf_bitmap) + (sizeof(struct buffer_head *) * nr_groups);\
- for (i=0; i<nr_groups; i++)\
- {\
+ for (i = 0; i < nr_groups; i++) {\
if (UDF_SB_BITMAP(X,Y,Z,i))\
brelse(UDF_SB_BITMAP(X,Y,Z,i));\
}\
diff --git a/fs/udf/udfdecl.h b/fs/udf/udfdecl.h
index f581f2f..c8016cc 100644
--- a/fs/udf/udfdecl.h
+++ b/fs/udf/udfdecl.h
@@ -50,30 +50,26 @@ extern const struct address_space_operations udf_aops;
extern const struct address_space_operations udf_adinicb_aops;
extern const struct address_space_operations udf_symlink_aops;
-struct udf_fileident_bh
-{
+struct udf_fileident_bh {
struct buffer_head *sbh;
struct buffer_head *ebh;
int soffset;
int eoffset;
};
-struct udf_vds_record
-{
+struct udf_vds_record {
uint32_t block;
uint32_t volDescSeqNum;
};
-struct generic_desc
-{
+struct generic_desc {
tag descTag;
__le32 volDescSeqNum;
};
-struct ustr
-{
+struct ustr {
uint8_t u_cmpID;
- uint8_t u_name[UDF_NAME_LEN-2];
+ uint8_t u_name[UDF_NAME_LEN - 2];
uint8_t u_len;
};
@@ -83,44 +79,58 @@ struct extent_position {
kernel_lb_addr block;
};
-
/* super.c */
extern void udf_error(struct super_block *, const char *, const char *, ...);
extern void udf_warning(struct super_block *, const char *, const char *, ...);
/* namei.c */
-extern int udf_write_fi(struct inode *inode, struct fileIdentDesc *, struct fileIdentDesc *, struct udf_fileident_bh *, uint8_t *, uint8_t *);
+extern int udf_write_fi(struct inode *inode, struct fileIdentDesc *,
+ struct fileIdentDesc *, struct udf_fileident_bh *,
+ uint8_t *, uint8_t *);
/* file.c */
-extern int udf_ioctl(struct inode *, struct file *, unsigned int, unsigned long);
+extern int udf_ioctl(struct inode *, struct file *, unsigned int,
+ unsigned long);
/* inode.c */
extern struct inode *udf_iget(struct super_block *, kernel_lb_addr);
extern int udf_sync_inode(struct inode *);
extern void udf_expand_file_adinicb(struct inode *, int, int *);
-extern struct buffer_head * udf_expand_dir_adinicb(struct inode *, int *, int *);
-extern struct buffer_head * udf_bread(struct inode *, int, int, int *);
+extern struct buffer_head *udf_expand_dir_adinicb(struct inode *, int *, int *);
+extern struct buffer_head *udf_bread(struct inode *, int, int, int *);
extern void udf_truncate(struct inode *);
extern void udf_read_inode(struct inode *);
extern void udf_delete_inode(struct inode *);
extern void udf_clear_inode(struct inode *);
extern int udf_write_inode(struct inode *, int);
extern long udf_block_map(struct inode *, sector_t);
-extern int udf_extend_file(struct inode *, struct extent_position *, kernel_long_ad *, sector_t);
-extern int8_t inode_bmap(struct inode *, sector_t, struct extent_position *, kernel_lb_addr *, uint32_t *, sector_t *);
-extern int8_t udf_add_aext(struct inode *, struct extent_position *, kernel_lb_addr, uint32_t, int);
-extern int8_t udf_write_aext(struct inode *, struct extent_position *, kernel_lb_addr, uint32_t, int);
-extern int8_t udf_delete_aext(struct inode *, struct extent_position, kernel_lb_addr, uint32_t);
-extern int8_t udf_next_aext(struct inode *, struct extent_position *, kernel_lb_addr *, uint32_t *, int);
-extern int8_t udf_current_aext(struct inode *, struct extent_position *, kernel_lb_addr *, uint32_t *, int);
+extern int udf_extend_file(struct inode *, struct extent_position *,
+ kernel_long_ad *, sector_t);
+extern int8_t inode_bmap(struct inode *, sector_t, struct extent_position *,
+ kernel_lb_addr *, uint32_t *, sector_t *);
+extern int8_t udf_add_aext(struct inode *, struct extent_position *,
+ kernel_lb_addr, uint32_t, int);
+extern int8_t udf_write_aext(struct inode *, struct extent_position *,
+ kernel_lb_addr, uint32_t, int);
+extern int8_t udf_delete_aext(struct inode *, struct extent_position,
+ kernel_lb_addr, uint32_t);
+extern int8_t udf_next_aext(struct inode *, struct extent_position *,
+ kernel_lb_addr *, uint32_t *, int);
+extern int8_t udf_current_aext(struct inode *, struct extent_position *,
+ kernel_lb_addr *, uint32_t *, int);
/* misc.c */
extern struct buffer_head *udf_tgetblk(struct super_block *, int);
extern struct buffer_head *udf_tread(struct super_block *, int);
-extern struct genericFormat *udf_add_extendedattr(struct inode *, uint32_t, uint32_t, uint8_t);
-extern struct genericFormat *udf_get_extendedattr(struct inode *, uint32_t, uint8_t);
-extern struct buffer_head *udf_read_tagged(struct super_block *, uint32_t, uint32_t, uint16_t *);
-extern struct buffer_head *udf_read_ptagged(struct super_block *, kernel_lb_addr, uint32_t, uint16_t *);
+extern struct genericFormat *udf_add_extendedattr(struct inode *, uint32_t,
+ uint32_t, uint8_t);
+extern struct genericFormat *udf_get_extendedattr(struct inode *, uint32_t,
+ uint8_t);
+extern struct buffer_head *udf_read_tagged(struct super_block *, uint32_t,
+ uint32_t, uint16_t *);
+extern struct buffer_head *udf_read_ptagged(struct super_block *,
+ kernel_lb_addr, uint32_t,
+ uint16_t *);
extern void udf_update_tag(char *, int);
extern void udf_new_tag(char *, uint16_t, uint16_t, uint16_t, uint32_t, int);
@@ -129,21 +139,26 @@ extern unsigned int udf_get_last_session(struct super_block *);
extern unsigned long udf_get_last_block(struct super_block *);
/* partition.c */
-extern uint32_t udf_get_pblock(struct super_block *, uint32_t, uint16_t, uint32_t);
-extern uint32_t udf_get_pblock_virt15(struct super_block *, uint32_t, uint16_t, uint32_t);
-extern uint32_t udf_get_pblock_virt20(struct super_block *, uint32_t, uint16_t, uint32_t);
-extern uint32_t udf_get_pblock_spar15(struct super_block *, uint32_t, uint16_t, uint32_t);
+extern uint32_t udf_get_pblock(struct super_block *, uint32_t, uint16_t,
+ uint32_t);
+extern uint32_t udf_get_pblock_virt15(struct super_block *, uint32_t, uint16_t,
+ uint32_t);
+extern uint32_t udf_get_pblock_virt20(struct super_block *, uint32_t, uint16_t,
+ uint32_t);
+extern uint32_t udf_get_pblock_spar15(struct super_block *, uint32_t, uint16_t,
+ uint32_t);
extern int udf_relocate_blocks(struct super_block *, long, long *);
/* unicode.c */
extern int udf_get_filename(struct super_block *, uint8_t *, uint8_t *, int);
-extern int udf_put_filename(struct super_block *, const uint8_t *, uint8_t *, int);
+extern int udf_put_filename(struct super_block *, const uint8_t *, uint8_t *,
+ int);
extern int udf_build_ustr(struct ustr *, dstring *, int);
extern int udf_CS0toUTF8(struct ustr *, struct ustr *);
/* ialloc.c */
extern void udf_free_inode(struct inode *);
-extern struct inode * udf_new_inode (struct inode *, int, int *);
+extern struct inode *udf_new_inode(struct inode *, int, int *);
/* truncate.c */
extern void udf_truncate_tail_extent(struct inode *);
@@ -151,18 +166,27 @@ extern void udf_discard_prealloc(struct inode *);
extern void udf_truncate_extents(struct inode *);
/* balloc.c */
-extern void udf_free_blocks(struct super_block *, struct inode *, kernel_lb_addr, uint32_t, uint32_t);
-extern int udf_prealloc_blocks(struct super_block *, struct inode *, uint16_t, uint32_t, uint32_t);
-extern int udf_new_block(struct super_block *, struct inode *, uint16_t, uint32_t, int *);
+extern void udf_free_blocks(struct super_block *, struct inode *,
+ kernel_lb_addr, uint32_t, uint32_t);
+extern int udf_prealloc_blocks(struct super_block *, struct inode *, uint16_t,
+ uint32_t, uint32_t);
+extern int udf_new_block(struct super_block *, struct inode *, uint16_t,
+ uint32_t, int *);
/* fsync.c */
extern int udf_fsync_file(struct file *, struct dentry *, int);
/* directory.c */
-extern struct fileIdentDesc * udf_fileident_read(struct inode *, loff_t *, struct udf_fileident_bh *, struct fileIdentDesc *, struct extent_position *, kernel_lb_addr *, uint32_t *, sector_t *);
-extern struct fileIdentDesc * udf_get_fileident(void * buffer, int bufsize, int * offset);
-extern long_ad * udf_get_filelongad(uint8_t *, int, int *, int);
-extern short_ad * udf_get_fileshortad(uint8_t *, int, int *, int);
+extern struct fileIdentDesc *udf_fileident_read(struct inode *, loff_t *,
+ struct udf_fileident_bh *,
+ struct fileIdentDesc *,
+ struct extent_position *,
+ kernel_lb_addr *, uint32_t *,
+ sector_t *);
+extern struct fileIdentDesc *udf_get_fileident(void *buffer, int bufsize,
+ int *offset);
+extern long_ad *udf_get_filelongad(uint8_t *, int, int *, int);
+extern short_ad *udf_get_fileshortad(uint8_t *, int, int *, int);
/* crc.c */
extern uint16_t udf_crc(uint8_t *, uint32_t, uint16_t);
@@ -171,4 +195,4 @@ extern uint16_t udf_crc(uint8_t *, uint32_t, uint16_t);
extern time_t *udf_stamp_to_time(time_t *, long *, kernel_timestamp);
extern kernel_timestamp *udf_time_to_stamp(kernel_timestamp *, struct timespec);
-#endif /* __UDF_DECL_H */
+#endif /* __UDF_DECL_H */
diff --git a/fs/udf/udfend.h b/fs/udf/udfend.h
index 17d3788..c4bd120 100644
--- a/fs/udf/udfend.h
+++ b/fs/udf/udfend.h
@@ -7,74 +7,92 @@
static inline kernel_lb_addr lelb_to_cpu(lb_addr in)
{
kernel_lb_addr out;
+
out.logicalBlockNum = le32_to_cpu(in.logicalBlockNum);
out.partitionReferenceNum = le16_to_cpu(in.partitionReferenceNum);
+
return out;
}
static inline lb_addr cpu_to_lelb(kernel_lb_addr in)
{
lb_addr out;
+
out.logicalBlockNum = cpu_to_le32(in.logicalBlockNum);
out.partitionReferenceNum = cpu_to_le16(in.partitionReferenceNum);
+
return out;
}
static inline kernel_timestamp lets_to_cpu(timestamp in)
{
kernel_timestamp out;
+
memcpy(&out, &in, sizeof(timestamp));
out.typeAndTimezone = le16_to_cpu(in.typeAndTimezone);
out.year = le16_to_cpu(in.year);
+
return out;
}
static inline short_ad lesa_to_cpu(short_ad in)
{
short_ad out;
+
out.extLength = le32_to_cpu(in.extLength);
out.extPosition = le32_to_cpu(in.extPosition);
+
return out;
}
static inline short_ad cpu_to_lesa(short_ad in)
{
short_ad out;
+
out.extLength = cpu_to_le32(in.extLength);
out.extPosition = cpu_to_le32(in.extPosition);
+
return out;
}
static inline kernel_long_ad lela_to_cpu(long_ad in)
{
kernel_long_ad out;
+
out.extLength = le32_to_cpu(in.extLength);
out.extLocation = lelb_to_cpu(in.extLocation);
+
return out;
}
static inline long_ad cpu_to_lela(kernel_long_ad in)
{
long_ad out;
+
out.extLength = cpu_to_le32(in.extLength);
out.extLocation = cpu_to_lelb(in.extLocation);
+
return out;
}
static inline kernel_extent_ad leea_to_cpu(extent_ad in)
{
kernel_extent_ad out;
+
out.extLength = le32_to_cpu(in.extLength);
out.extLocation = le32_to_cpu(in.extLocation);
+
return out;
}
static inline timestamp cpu_to_lets(kernel_timestamp in)
{
timestamp out;
+
memcpy(&out, &in, sizeof(timestamp));
out.typeAndTimezone = cpu_to_le16(in.typeAndTimezone);
out.year = cpu_to_le16(in.year);
+
return out;
}
diff --git a/fs/udf/udftime.c b/fs/udf/udftime.c
index 85d8dbe..3fd80eb 100644
--- a/fs/udf/udftime.c
+++ b/fs/udf/udftime.c
@@ -18,18 +18,18 @@
Boston, MA 02111-1307, USA. */
/*
- * dgb 10/02/98: ripped this from glibc source to help convert timestamps to unix time
+ * dgb 10/02/98: ripped this from glibc source to help convert timestamps to unix time
* 10/04/98: added new table-based lookup after seeing how ugly the gnu code is
* blf 09/27/99: ripped out all the old code and inserted new table from
- * John Brockmeyer (without leap second corrections)
- * rewrote udf_stamp_to_time and fixed timezone accounting in
- udf_time_to_stamp.
+ * John Brockmeyer (without leap second corrections)
+ * rewrote udf_stamp_to_time and fixed timezone accounting in
+ * udf_time_to_stamp.
*/
/*
* We don't take into account leap seconds. This may be correct or incorrect.
* For more NIST information (especially dealing with leap seconds), see:
- * http://www.boulder.nist.gov/timefreq/pubs/bulletin/leapsecond.htm
+ * http://www.boulder.nist.gov/timefreq/pubs/bulletin/leapsecond.htm
*/
#include <linux/types.h>
@@ -46,36 +46,35 @@
#endif
/* How many days come before each month (0-12). */
-static const unsigned short int __mon_yday[2][13] =
-{
+static const unsigned short int __mon_yday[2][13] = {
/* Normal years. */
- { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365 },
+ {0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365},
/* Leap years. */
- { 0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366 }
+ {0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366}
};
#define MAX_YEAR_SECONDS 69
-#define SPD 0x15180 /*3600*24*/
-#define SPY(y,l,s) (SPD * (365*y+l)+s)
+#define SPD 0x15180 /*3600*24 */
+#define SPY(y,l,s) (SPD * (365*y+l)+s)
static time_t year_seconds[MAX_YEAR_SECONDS]= {
-/*1970*/ SPY( 0, 0,0), SPY( 1, 0,0), SPY( 2, 0,0), SPY( 3, 1,0),
-/*1974*/ SPY( 4, 1,0), SPY( 5, 1,0), SPY( 6, 1,0), SPY( 7, 2,0),
-/*1978*/ SPY( 8, 2,0), SPY( 9, 2,0), SPY(10, 2,0), SPY(11, 3,0),
-/*1982*/ SPY(12, 3,0), SPY(13, 3,0), SPY(14, 3,0), SPY(15, 4,0),
-/*1986*/ SPY(16, 4,0), SPY(17, 4,0), SPY(18, 4,0), SPY(19, 5,0),
-/*1990*/ SPY(20, 5,0), SPY(21, 5,0), SPY(22, 5,0), SPY(23, 6,0),
-/*1994*/ SPY(24, 6,0), SPY(25, 6,0), SPY(26, 6,0), SPY(27, 7,0),
-/*1998*/ SPY(28, 7,0), SPY(29, 7,0), SPY(30, 7,0), SPY(31, 8,0),
-/*2002*/ SPY(32, 8,0), SPY(33, 8,0), SPY(34, 8,0), SPY(35, 9,0),
-/*2006*/ SPY(36, 9,0), SPY(37, 9,0), SPY(38, 9,0), SPY(39,10,0),
-/*2010*/ SPY(40,10,0), SPY(41,10,0), SPY(42,10,0), SPY(43,11,0),
-/*2014*/ SPY(44,11,0), SPY(45,11,0), SPY(46,11,0), SPY(47,12,0),
-/*2018*/ SPY(48,12,0), SPY(49,12,0), SPY(50,12,0), SPY(51,13,0),
-/*2022*/ SPY(52,13,0), SPY(53,13,0), SPY(54,13,0), SPY(55,14,0),
-/*2026*/ SPY(56,14,0), SPY(57,14,0), SPY(58,14,0), SPY(59,15,0),
-/*2030*/ SPY(60,15,0), SPY(61,15,0), SPY(62,15,0), SPY(63,16,0),
-/*2034*/ SPY(64,16,0), SPY(65,16,0), SPY(66,16,0), SPY(67,17,0),
+/*1970*/ SPY( 0, 0,0), SPY( 1, 0,0), SPY( 2, 0,0), SPY( 3, 1,0),
+/*1974*/ SPY( 4, 1,0), SPY( 5, 1,0), SPY( 6, 1,0), SPY( 7, 2,0),
+/*1978*/ SPY( 8, 2,0), SPY( 9, 2,0), SPY(10, 2,0), SPY(11, 3,0),
+/*1982*/ SPY(12, 3,0), SPY(13, 3,0), SPY(14, 3,0), SPY(15, 4,0),
+/*1986*/ SPY(16, 4,0), SPY(17, 4,0), SPY(18, 4,0), SPY(19, 5,0),
+/*1990*/ SPY(20, 5,0), SPY(21, 5,0), SPY(22, 5,0), SPY(23, 6,0),
+/*1994*/ SPY(24, 6,0), SPY(25, 6,0), SPY(26, 6,0), SPY(27, 7,0),
+/*1998*/ SPY(28, 7,0), SPY(29, 7,0), SPY(30, 7,0), SPY(31, 8,0),
+/*2002*/ SPY(32, 8,0), SPY(33, 8,0), SPY(34, 8,0), SPY(35, 9,0),
+/*2006*/ SPY(36, 9,0), SPY(37, 9,0), SPY(38, 9,0), SPY(39,10,0),
+/*2010*/ SPY(40,10,0), SPY(41,10,0), SPY(42,10,0), SPY(43,11,0),
+/*2014*/ SPY(44,11,0), SPY(45,11,0), SPY(46,11,0), SPY(47,12,0),
+/*2018*/ SPY(48,12,0), SPY(49,12,0), SPY(50,12,0), SPY(51,13,0),
+/*2022*/ SPY(52,13,0), SPY(53,13,0), SPY(54,13,0), SPY(55,14,0),
+/*2026*/ SPY(56,14,0), SPY(57,14,0), SPY(58,14,0), SPY(59,15,0),
+/*2030*/ SPY(60,15,0), SPY(61,15,0), SPY(62,15,0), SPY(63,16,0),
+/*2034*/ SPY(64,16,0), SPY(65,16,0), SPY(66,16,0), SPY(67,17,0),
/*2038*/ SPY(68,17,0)
};
@@ -84,27 +83,24 @@ extern struct timezone sys_tz;
#define SECS_PER_HOUR (60 * 60)
#define SECS_PER_DAY (SECS_PER_HOUR * 24)
-time_t *
-udf_stamp_to_time(time_t *dest, long *dest_usec, kernel_timestamp src)
+time_t *udf_stamp_to_time(time_t *dest, long *dest_usec, kernel_timestamp src)
{
int yday;
uint8_t type = src.typeAndTimezone >> 12;
int16_t offset;
- if (type == 1)
- {
+ if (type == 1) {
offset = src.typeAndTimezone << 4;
/* sign extent offset */
offset = (offset >> 4);
if (offset == -2047) /* unspecified offset */
offset = 0;
- }
- else
+ } else {
offset = 0;
+ }
if ((src.year < EPOCH_YEAR) ||
- (src.year >= EPOCH_YEAR+MAX_YEAR_SECONDS))
- {
+ (src.year >= EPOCH_YEAR + MAX_YEAR_SECONDS)) {
*dest = -1;
*dest_usec = -1;
return NULL;
@@ -113,15 +109,13 @@ udf_stamp_to_time(time_t *dest, long *dest_usec, kernel_timestamp src)
*dest -= offset * 60;
yday = ((__mon_yday[__isleap (src.year)]
- [src.month-1]) + (src.day-1));
- *dest += ( ( (yday* 24) + src.hour ) * 60 + src.minute ) * 60 + src.second;
+ [src.month - 1]) + (src.day - 1));
+ *dest += ( ( (yday * 24) + src.hour ) * 60 + src.minute ) * 60 + src.second;
*dest_usec = src.centiseconds * 10000 + src.hundredsOfMicroseconds * 100 + src.microseconds;
return dest;
}
-
-kernel_timestamp *
-udf_time_to_stamp(kernel_timestamp *dest, struct timespec ts)
+kernel_timestamp *udf_time_to_stamp(kernel_timestamp * dest, struct timespec ts)
{
long int days, rem, y;
const unsigned short int *ip;
@@ -146,19 +140,18 @@ udf_time_to_stamp(kernel_timestamp *dest, struct timespec ts)
#define DIV(a,b) ((a) / (b) - ((a) % (b) < 0))
#define LEAPS_THRU_END_OF(y) (DIV (y, 4) - DIV (y, 100) + DIV (y, 400))
- while (days < 0 || days >= (__isleap(y) ? 366 : 365))
- {
+ while (days < 0 || days >= (__isleap(y) ? 366 : 365)) {
long int yg = y + days / 365 - (days % 365 < 0);
/* Adjust DAYS and Y to match the guessed year. */
days -= ((yg - y) * 365
- + LEAPS_THRU_END_OF (yg - 1)
- - LEAPS_THRU_END_OF (y - 1));
+ + LEAPS_THRU_END_OF (yg - 1)
+ - LEAPS_THRU_END_OF (y - 1));
y = yg;
}
dest->year = y;
ip = __mon_yday[__isleap(y)];
- for (y = 11; days < (long int) ip[y]; --y)
+ for (y = 11; days < (long int)ip[y]; --y)
continue;
days -= ip[y];
dest->month = y + 1;
@@ -167,7 +160,7 @@ udf_time_to_stamp(kernel_timestamp *dest, struct timespec ts)
dest->centiseconds = ts.tv_nsec / 10000000;
dest->hundredsOfMicroseconds = (ts.tv_nsec / 1000 - dest->centiseconds * 10000) / 100;
dest->microseconds = (ts.tv_nsec / 1000 - dest->centiseconds * 10000 -
- dest->hundredsOfMicroseconds * 100);
+ dest->hundredsOfMicroseconds * 100);
return dest;
}
diff --git a/fs/udf/unicode.c b/fs/udf/unicode.c
index 706c92e..9e6099c 100644
--- a/fs/udf/unicode.c
+++ b/fs/udf/unicode.c
@@ -31,12 +31,14 @@ static int udf_translate_to_linux(uint8_t *, uint8_t *, int, uint8_t *, int);
static int udf_char_to_ustr(struct ustr *dest, const uint8_t *src, int strlen)
{
- if ( (!dest) || (!src) || (!strlen) || (strlen > UDF_NAME_LEN-2) )
+ if ((!dest) || (!src) || (!strlen) || (strlen > UDF_NAME_LEN - 2))
return 0;
+
memset(dest, 0, sizeof(struct ustr));
memcpy(dest->u_name, src, strlen);
dest->u_cmpID = 0x08;
dest->u_len = strlen;
+
return strlen;
}
@@ -47,14 +49,15 @@ int udf_build_ustr(struct ustr *dest, dstring *ptr, int size)
{
int usesize;
- if ( (!dest) || (!ptr) || (!size) )
+ if ((!dest) || (!ptr) || (!size))
return -1;
memset(dest, 0, sizeof(struct ustr));
- usesize= (size > UDF_NAME_LEN) ? UDF_NAME_LEN : size;
- dest->u_cmpID=ptr[0];
- dest->u_len=ptr[size-1];
- memcpy(dest->u_name, ptr+1, usesize-1);
+ usesize = (size > UDF_NAME_LEN) ? UDF_NAME_LEN : size;
+ dest->u_cmpID = ptr[0];
+ dest->u_len = ptr[size - 1];
+ memcpy(dest->u_name, ptr + 1, usesize - 1);
+
return 0;
}
@@ -63,13 +66,14 @@ int udf_build_ustr(struct ustr *dest, dstring *ptr, int size)
*/
static int udf_build_ustr_exact(struct ustr *dest, dstring *ptr, int exactsize)
{
- if ( (!dest) || (!ptr) || (!exactsize) )
+ if ((!dest) || (!ptr) || (!exactsize))
return -1;
memset(dest, 0, sizeof(struct ustr));
- dest->u_cmpID=ptr[0];
- dest->u_len=exactsize-1;
- memcpy(dest->u_name, ptr+1, exactsize-1);
+ dest->u_cmpID = ptr[0];
+ dest->u_len = exactsize - 1;
+ memcpy(dest->u_name, ptr + 1, exactsize - 1);
+
return 0;
}
@@ -108,22 +112,20 @@ int udf_CS0toUTF8(struct ustr *utf_o, struct ustr *ocu_i)
cmp_id = ocu_i->u_cmpID;
utf_o->u_len = 0;
- if (ocu_len == 0)
- {
+ if (ocu_len == 0) {
memset(utf_o, 0, sizeof(struct ustr));
utf_o->u_cmpID = 0;
utf_o->u_len = 0;
return 0;
}
- if ((cmp_id != 8) && (cmp_id != 16))
- {
- printk(KERN_ERR "udf: unknown compression code (%d) stri=%s\n", cmp_id, ocu_i->u_name);
+ if ((cmp_id != 8) && (cmp_id != 16)) {
+ printk(KERN_ERR "udf: unknown compression code (%d) stri=%s\n",
+ cmp_id, ocu_i->u_name);
return 0;
}
- for (i = 0; (i < ocu_len) && (utf_o->u_len <= (UDF_NAME_LEN-3)) ;)
- {
+ for (i = 0; (i < ocu_len) && (utf_o->u_len <= (UDF_NAME_LEN - 3));) {
/* Expand OSTA compressed Unicode to Unicode */
c = ocu[i++];
@@ -131,21 +133,18 @@ int udf_CS0toUTF8(struct ustr *utf_o, struct ustr *ocu_i)
c = (c << 8) | ocu[i++];
/* Compress Unicode to UTF-8 */
- if (c < 0x80U)
+ if (c < 0x80U) {
utf_o->u_name[utf_o->u_len++] = (uint8_t)c;
- else if (c < 0x800U)
- {
+ } else if (c < 0x800U) {
utf_o->u_name[utf_o->u_len++] = (uint8_t)(0xc0 | (c >> 6));
utf_o->u_name[utf_o->u_len++] = (uint8_t)(0x80 | (c & 0x3f));
- }
- else
- {
+ } else {
utf_o->u_name[utf_o->u_len++] = (uint8_t)(0xe0 | (c >> 12));
utf_o->u_name[utf_o->u_len++] = (uint8_t)(0x80 | ((c >> 6) & 0x3f));
utf_o->u_name[utf_o->u_len++] = (uint8_t)(0x80 | (c & 0x3f));
}
}
- utf_o->u_cmpID=8;
+ utf_o->u_cmpID = 8;
return utf_o->u_len;
}
@@ -186,61 +185,46 @@ try_again:
u_len = 0U;
utf_char = 0U;
utf_cnt = 0U;
- for (i = 0U; i < utf->u_len; i++)
- {
+ for (i = 0U; i < utf->u_len; i++) {
c = (uint8_t)utf->u_name[i];
/* Complete a multi-byte UTF-8 character */
- if (utf_cnt)
- {
+ if (utf_cnt) {
utf_char = (utf_char << 6) | (c & 0x3fU);
if (--utf_cnt)
continue;
- }
- else
- {
+ } else {
/* Check for a multi-byte UTF-8 character */
- if (c & 0x80U)
- {
+ if (c & 0x80U) {
/* Start a multi-byte UTF-8 character */
- if ((c & 0xe0U) == 0xc0U)
- {
+ if ((c & 0xe0U) == 0xc0U) {
utf_char = c & 0x1fU;
utf_cnt = 1;
- }
- else if ((c & 0xf0U) == 0xe0U)
- {
+ } else if ((c & 0xf0U) == 0xe0U) {
utf_char = c & 0x0fU;
utf_cnt = 2;
- }
- else if ((c & 0xf8U) == 0xf0U)
- {
+ } else if ((c & 0xf8U) == 0xf0U) {
utf_char = c & 0x07U;
utf_cnt = 3;
- }
- else if ((c & 0xfcU) == 0xf8U)
- {
+ } else if ((c & 0xfcU) == 0xf8U) {
utf_char = c & 0x03U;
utf_cnt = 4;
- }
- else if ((c & 0xfeU) == 0xfcU)
- {
+ } else if ((c & 0xfeU) == 0xfcU) {
utf_char = c & 0x01U;
utf_cnt = 5;
- }
- else
+ } else {
goto error_out;
+ }
continue;
- } else
+ } else {
/* Single byte UTF-8 character (most common) */
utf_char = c;
+ }
}
/* Choose no compression if necessary */
- if (utf_char > max_val)
- {
- if ( 0xffU == max_val )
- {
+ if (utf_char > max_val) {
+ if (max_val == 0xffU) {
max_val = 0xffffU;
ocu[0] = (uint8_t)0x10U;
goto try_again;
@@ -248,26 +232,25 @@ try_again:
goto error_out;
}
- if (max_val == 0xffffU)
- {
+ if (max_val == 0xffffU) {
ocu[++u_len] = (uint8_t)(utf_char >> 8);
}
ocu[++u_len] = (uint8_t)(utf_char & 0xffU);
}
-
- if (utf_cnt)
- {
+ if (utf_cnt) {
error_out:
ocu[++u_len] = '?';
printk(KERN_DEBUG "udf: bad UTF-8 character\n");
}
ocu[length - 1] = (uint8_t)u_len + 1;
+
return u_len + 1;
}
-static int udf_CS0toNLS(struct nls_table *nls, struct ustr *utf_o, struct ustr *ocu_i)
+static int udf_CS0toNLS(struct nls_table *nls, struct ustr *utf_o,
+ struct ustr *ocu_i)
{
uint8_t *ocu;
uint32_t c;
@@ -280,36 +263,35 @@ static int udf_CS0toNLS(struct nls_table *nls, struct ustr *utf_o, struct ustr *
cmp_id = ocu_i->u_cmpID;
utf_o->u_len = 0;
- if (ocu_len == 0)
- {
+ if (ocu_len == 0) {
memset(utf_o, 0, sizeof(struct ustr));
utf_o->u_cmpID = 0;
utf_o->u_len = 0;
return 0;
}
- if ((cmp_id != 8) && (cmp_id != 16))
- {
- printk(KERN_ERR "udf: unknown compression code (%d) stri=%s\n", cmp_id, ocu_i->u_name);
+ if ((cmp_id != 8) && (cmp_id != 16)) {
+ printk(KERN_ERR "udf: unknown compression code (%d) stri=%s\n",
+ cmp_id, ocu_i->u_name);
return 0;
}
- for (i = 0; (i < ocu_len) && (utf_o->u_len <= (UDF_NAME_LEN-3)) ;)
- {
+ for (i = 0; (i < ocu_len) && (utf_o->u_len <= (UDF_NAME_LEN - 3));) {
/* Expand OSTA compressed Unicode to Unicode */
c = ocu[i++];
if (cmp_id == 16)
c = (c << 8) | ocu[i++];
- utf_o->u_len += nls->uni2char(c, &utf_o->u_name[utf_o->u_len],
- UDF_NAME_LEN - utf_o->u_len);
+ utf_o->u_len += nls->uni2char(c, &utf_o->u_name[utf_o->u_len],
+ UDF_NAME_LEN - utf_o->u_len);
}
- utf_o->u_cmpID=8;
+ utf_o->u_cmpID = 8;
return utf_o->u_len;
}
-static int udf_NLStoCS0(struct nls_table *nls, dstring *ocu, struct ustr *uni, int length)
+static int udf_NLStoCS0(struct nls_table *nls, dstring *ocu, struct ustr *uni,
+ int length)
{
unsigned len, i, max_val;
uint16_t uni_char;
@@ -321,19 +303,17 @@ static int udf_NLStoCS0(struct nls_table *nls, dstring *ocu, struct ustr *uni, i
try_again:
u_len = 0U;
- for (i = 0U; i < uni->u_len; i++)
- {
- len = nls->char2uni(&uni->u_name[i], uni->u_len-i, &uni_char);
+ for (i = 0U; i < uni->u_len; i++) {
+ len = nls->char2uni(&uni->u_name[i], uni->u_len - i, &uni_char);
if (len <= 0)
continue;
- if (uni_char > max_val)
- {
+ if (uni_char > max_val) {
max_val = 0xffffU;
ocu[0] = (uint8_t)0x10U;
goto try_again;
}
-
+
if (max_val == 0xffffU)
ocu[++u_len] = (uint8_t)(uni_char >> 8);
ocu[++u_len] = (uint8_t)(uni_char & 0xffU);
@@ -344,112 +324,98 @@ try_again:
return u_len + 1;
}
-int udf_get_filename(struct super_block *sb, uint8_t *sname, uint8_t *dname, int flen)
+int udf_get_filename(struct super_block *sb, uint8_t *sname, uint8_t *dname,
+ int flen)
{
struct ustr filename, unifilename;
int len;
- if (udf_build_ustr_exact(&unifilename, sname, flen))
- {
+ if (udf_build_ustr_exact(&unifilename, sname, flen)) {
return 0;
}
- if (UDF_QUERY_FLAG(sb, UDF_FLAG_UTF8))
- {
- if (!udf_CS0toUTF8(&filename, &unifilename) )
- {
+ if (UDF_QUERY_FLAG(sb, UDF_FLAG_UTF8)) {
+ if (!udf_CS0toUTF8(&filename, &unifilename)) {
udf_debug("Failed in udf_get_filename: sname = %s\n", sname);
return 0;
}
- }
- else if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP))
- {
- if (!udf_CS0toNLS(UDF_SB(sb)->s_nls_map, &filename, &unifilename) )
- {
+ } else if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP)) {
+ if (!udf_CS0toNLS(UDF_SB(sb)->s_nls_map, &filename, &unifilename)) {
udf_debug("Failed in udf_get_filename: sname = %s\n", sname);
return 0;
}
- }
- else
+ } else {
return 0;
+ }
- if ((len = udf_translate_to_linux(dname, filename.u_name, filename.u_len,
- unifilename.u_name, unifilename.u_len)))
- {
+ len = udf_translate_to_linux(dname, filename.u_name, filename.u_len,
+ unifilename.u_name, unifilename.u_len);
+ if (len) {
return len;
}
+
return 0;
}
-int udf_put_filename(struct super_block *sb, const uint8_t *sname, uint8_t *dname, int flen)
+int udf_put_filename(struct super_block *sb, const uint8_t *sname,
+ uint8_t *dname, int flen)
{
struct ustr unifilename;
int namelen;
- if ( !(udf_char_to_ustr(&unifilename, sname, flen)) )
- {
+ if (!(udf_char_to_ustr(&unifilename, sname, flen))) {
return 0;
}
- if (UDF_QUERY_FLAG(sb, UDF_FLAG_UTF8))
- {
- if ( !(namelen = udf_UTF8toCS0(dname, &unifilename, UDF_NAME_LEN)) )
- {
+ if (UDF_QUERY_FLAG(sb, UDF_FLAG_UTF8)) {
+ namelen = udf_UTF8toCS0(dname, &unifilename, UDF_NAME_LEN);
+ if (!namelen) {
return 0;
}
- }
- else if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP))
- {
- if ( !(namelen = udf_NLStoCS0(UDF_SB(sb)->s_nls_map, dname, &unifilename, UDF_NAME_LEN)) )
- {
+ } else if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP)) {
+ namelen = udf_NLStoCS0(UDF_SB(sb)->s_nls_map, dname, &unifilename, UDF_NAME_LEN);
+ if (!namelen) {
return 0;
}
- }
- else
+ } else {
return 0;
+ }
return namelen;
}
#define ILLEGAL_CHAR_MARK '_'
-#define EXT_MARK '.'
-#define CRC_MARK '#'
-#define EXT_SIZE 5
+#define EXT_MARK '.'
+#define CRC_MARK '#'
+#define EXT_SIZE 5
-static int udf_translate_to_linux(uint8_t *newName, uint8_t *udfName, int udfLen, uint8_t *fidName, int fidNameLen)
+static int udf_translate_to_linux(uint8_t *newName, uint8_t *udfName, int udfLen,
+ uint8_t *fidName, int fidNameLen)
{
- int index, newIndex = 0, needsCRC = 0;
+ int index, newIndex = 0, needsCRC = 0;
int extIndex = 0, newExtIndex = 0, hasExt = 0;
unsigned short valueCRC;
uint8_t curr;
const uint8_t hexChar[] = "0123456789ABCDEF";
- if (udfName[0] == '.' && (udfLen == 1 ||
- (udfLen == 2 && udfName[1] == '.')))
- {
+ if (udfName[0] == '.' &&
+ (udfLen == 1 || (udfLen == 2 && udfName[1] == '.'))) {
needsCRC = 1;
newIndex = udfLen;
memcpy(newName, udfName, udfLen);
- }
- else
- {
- for (index = 0; index < udfLen; index++)
- {
+ } else {
+ for (index = 0; index < udfLen; index++) {
curr = udfName[index];
- if (curr == '/' || curr == 0)
- {
+ if (curr == '/' || curr == 0) {
needsCRC = 1;
curr = ILLEGAL_CHAR_MARK;
- while (index+1 < udfLen && (udfName[index+1] == '/' ||
- udfName[index+1] == 0))
+ while (index + 1 < udfLen && (udfName[index + 1] == '/' ||
+ udfName[index + 1] == 0))
index++;
- }
- if (curr == EXT_MARK && (udfLen - index - 1) <= EXT_SIZE)
- {
- if (udfLen == index + 1)
+ } if (curr == EXT_MARK && (udfLen - index - 1) <= EXT_SIZE) {
+ if (udfLen == index + 1) {
hasExt = 0;
- else
- {
+ } else {
hasExt = 1;
extIndex = index;
newExtIndex = newIndex;
@@ -461,26 +427,22 @@ static int udf_translate_to_linux(uint8_t *newName, uint8_t *udfName, int udfLen
needsCRC = 1;
}
}
- if (needsCRC)
- {
+ if (needsCRC) {
uint8_t ext[EXT_SIZE];
int localExtIndex = 0;
- if (hasExt)
- {
+ if (hasExt) {
int maxFilenameLen;
- for(index = 0; index<EXT_SIZE && extIndex + index +1 < udfLen;
- index++ )
- {
+ for(index = 0; index < EXT_SIZE && extIndex + index + 1 < udfLen; index++) {
curr = udfName[extIndex + index + 1];
- if (curr == '/' || curr == 0)
- {
+ if (curr == '/' || curr == 0) {
needsCRC = 1;
curr = ILLEGAL_CHAR_MARK;
- while(extIndex + index + 2 < udfLen && (index + 1 < EXT_SIZE
- && (udfName[extIndex + index + 2] == '/' ||
- udfName[extIndex + index + 2] == 0)))
+ while(extIndex + index + 2 < udfLen &&
+ (index + 1 < EXT_SIZE
+ && (udfName[extIndex + index + 2] == '/' ||
+ udfName[extIndex + index + 2] == 0)))
index++;
}
ext[localExtIndex++] = curr;
@@ -490,9 +452,9 @@ static int udf_translate_to_linux(uint8_t *newName, uint8_t *udfName, int udfLen
newIndex = maxFilenameLen;
else
newIndex = newExtIndex;
- }
- else if (newIndex > 250)
+ } else if (newIndex > 250) {
newIndex = 250;
+ }
newName[newIndex++] = CRC_MARK;
valueCRC = udf_crc(fidName, fidNameLen, 0);
newName[newIndex++] = hexChar[(valueCRC & 0xf000) >> 12];
@@ -500,12 +462,12 @@ static int udf_translate_to_linux(uint8_t *newName, uint8_t *udfName, int udfLen
newName[newIndex++] = hexChar[(valueCRC & 0x00f0) >> 4];
newName[newIndex++] = hexChar[(valueCRC & 0x000f)];
- if (hasExt)
- {
+ if (hasExt) {
newName[newIndex++] = EXT_MARK;
- for (index = 0;index < localExtIndex ;index++ )
+ for (index = 0; index < localExtIndex; index++)
newName[newIndex++] = ext[index];
}
}
+
return newIndex;
}
diff --git a/fs/ufs/super.c b/fs/ufs/super.c
index 2b30116..38eb0b7 100644
--- a/fs/ufs/super.c
+++ b/fs/ufs/super.c
@@ -894,7 +894,7 @@ magic_found:
goto again;
}
-
+ sbi->s_flags = flags;/*after that line some functions use s_flags*/
ufs_print_super_stuff(sb, usb1, usb2, usb3);
/*
@@ -1025,8 +1025,6 @@ magic_found:
UFS_MOUNT_UFSTYPE_44BSD)
uspi->s_maxsymlinklen =
fs32_to_cpu(sb, usb3->fs_un2.fs_44.fs_maxsymlinklen);
-
- sbi->s_flags = flags;
inode = iget(sb, UFS_ROOTINO);
if (!inode || is_bad_inode(inode))
@@ -1240,14 +1238,14 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
inode_init_once(&ei->vfs_inode);
}
-
+
static int init_inodecache(void)
{
ufs_inode_cachep = kmem_cache_create("ufs_inode_cache",
sizeof(struct ufs_inode_info),
0, (SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD),
- init_once, NULL);
+ init_once);
if (ufs_inode_cachep == NULL)
return -ENOMEM;
return 0;
diff --git a/fs/xfs/linux-2.6/kmem.h b/fs/xfs/linux-2.6/kmem.h
index 4b6470c..e6ea293 100644
--- a/fs/xfs/linux-2.6/kmem.h
+++ b/fs/xfs/linux-2.6/kmem.h
@@ -74,14 +74,14 @@ extern void kmem_free(void *, size_t);
static inline kmem_zone_t *
kmem_zone_init(int size, char *zone_name)
{
- return kmem_cache_create(zone_name, size, 0, 0, NULL, NULL);
+ return kmem_cache_create(zone_name, size, 0, 0, NULL);
}
static inline kmem_zone_t *
kmem_zone_init_flags(int size, char *zone_name, unsigned long flags,
void (*construct)(void *, kmem_zone_t *, unsigned long))
{
- return kmem_cache_create(zone_name, size, 0, flags, construct, NULL);
+ return kmem_cache_create(zone_name, size, 0, flags, construct);
}
static inline void
@@ -103,7 +103,7 @@ extern void *kmem_zone_zalloc(kmem_zone_t *, unsigned int __nocast);
static inline int
kmem_shake_allow(gfp_t gfp_mask)
{
- return (gfp_mask & __GFP_WAIT);
+ return (gfp_mask & __GFP_WAIT) != 0;
}
#endif /* __XFS_SUPPORT_KMEM_H__ */
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index fd4105d..6f4c29e 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -181,6 +181,7 @@ xfs_setfilesize(
ip->i_d.di_size = isize;
ip->i_update_core = 1;
ip->i_update_size = 1;
+ mark_inode_dirty_sync(vn_to_inode(ioend->io_vnode));
}
xfs_iunlock(ip, XFS_ILOCK_EXCL);
@@ -322,17 +323,13 @@ xfs_iomap_valid(
/*
* BIO completion handler for buffered IO.
*/
-STATIC int
+STATIC void
xfs_end_bio(
struct bio *bio,
- unsigned int bytes_done,
int error)
{
xfs_ioend_t *ioend = bio->bi_private;
- if (bio->bi_size)
- return 1;
-
ASSERT(atomic_read(&bio->bi_cnt) >= 1);
ioend->io_error = test_bit(BIO_UPTODATE, &bio->bi_flags) ? 0 : error;
@@ -342,7 +339,6 @@ xfs_end_bio(
bio_put(bio);
xfs_finish_ioend(ioend, 0);
- return 0;
}
STATIC void
@@ -652,7 +648,7 @@ xfs_probe_cluster(
for (i = 0; i < pagevec_count(&pvec); i++) {
struct page *page = pvec.pages[i];
- size_t pg_offset, len = 0;
+ size_t pg_offset, pg_len = 0;
if (tindex == tlast) {
pg_offset =
@@ -665,16 +661,16 @@ xfs_probe_cluster(
pg_offset = PAGE_CACHE_SIZE;
if (page->index == tindex && !TestSetPageLocked(page)) {
- len = xfs_probe_page(page, pg_offset, mapped);
+ pg_len = xfs_probe_page(page, pg_offset, mapped);
unlock_page(page);
}
- if (!len) {
+ if (!pg_len) {
done = 1;
break;
}
- total += len;
+ total += pg_len;
tindex++;
}
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index b0f0e58..39f44ee 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -1103,19 +1103,15 @@ _xfs_buf_ioend(
}
}
-STATIC int
+STATIC void
xfs_buf_bio_end_io(
struct bio *bio,
- unsigned int bytes_done,
int error)
{
xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private;
unsigned int blocksize = bp->b_target->bt_bsize;
struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
- if (bio->bi_size)
- return 1;
-
if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
bp->b_error = EIO;
@@ -1143,7 +1139,6 @@ xfs_buf_bio_end_io(
_xfs_buf_ioend(bp, 1);
bio_put(bio);
- return 0;
}
STATIC void
diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c
index cbcd40c..0d4001e 100644
--- a/fs/xfs/linux-2.6/xfs_file.c
+++ b/fs/xfs/linux-2.6/xfs_file.c
@@ -212,19 +212,18 @@ xfs_file_fsync(
}
#ifdef CONFIG_XFS_DMAPI
-STATIC struct page *
-xfs_vm_nopage(
- struct vm_area_struct *area,
- unsigned long address,
- int *type)
+STATIC int
+xfs_vm_fault(
+ struct vm_area_struct *vma,
+ struct vm_fault *vmf)
{
- struct inode *inode = area->vm_file->f_path.dentry->d_inode;
+ struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
bhv_vnode_t *vp = vn_from_inode(inode);
ASSERT_ALWAYS(vp->v_vfsp->vfs_flag & VFS_DMI);
- if (XFS_SEND_MMAP(XFS_VFSTOM(vp->v_vfsp), area, 0))
- return NULL;
- return filemap_nopage(area, address, type);
+ if (XFS_SEND_MMAP(XFS_VFSTOM(vp->v_vfsp), vma, 0))
+ return VM_FAULT_SIGBUS;
+ return filemap_fault(vma, vmf);
}
#endif /* CONFIG_XFS_DMAPI */
@@ -310,6 +309,7 @@ xfs_file_mmap(
struct vm_area_struct *vma)
{
vma->vm_ops = &xfs_file_vm_ops;
+ vma->vm_flags |= VM_CAN_NONLINEAR;
#ifdef CONFIG_XFS_DMAPI
if (vn_from_inode(filp->f_path.dentry->d_inode)->v_vfsp->vfs_flag & VFS_DMI)
@@ -413,6 +413,20 @@ xfs_file_open_exec(
}
#endif /* HAVE_FOP_OPEN_EXEC */
+/*
+ * mmap()d file has taken write protection fault and is being made
+ * writable. We can set the page state up correctly for a writable
+ * page, which means we can do correct delalloc accounting (ENOSPC
+ * checking!) and unwritten extent mapping.
+ */
+STATIC int
+xfs_vm_page_mkwrite(
+ struct vm_area_struct *vma,
+ struct page *page)
+{
+ return block_page_mkwrite(vma, page, xfs_get_blocks);
+}
+
const struct file_operations xfs_file_operations = {
.llseek = generic_file_llseek,
.read = do_sync_read,
@@ -464,14 +478,14 @@ const struct file_operations xfs_dir_file_operations = {
};
static struct vm_operations_struct xfs_file_vm_ops = {
- .nopage = filemap_nopage,
- .populate = filemap_populate,
+ .fault = filemap_fault,
+ .page_mkwrite = xfs_vm_page_mkwrite,
};
#ifdef CONFIG_XFS_DMAPI
static struct vm_operations_struct xfs_dmapi_file_vm_ops = {
- .nopage = xfs_vm_nopage,
- .populate = filemap_populate,
+ .fault = xfs_vm_fault,
+ .page_mkwrite = xfs_vm_page_mkwrite,
#ifdef HAVE_VMOP_MPROTECT
.mprotect = xfs_vm_mprotect,
#endif
diff --git a/fs/xfs/linux-2.6/xfs_globals.c b/fs/xfs/linux-2.6/xfs_globals.c
index bb72c3d..81565de 100644
--- a/fs/xfs/linux-2.6/xfs_globals.c
+++ b/fs/xfs/linux-2.6/xfs_globals.c
@@ -46,7 +46,7 @@ xfs_param_t xfs_params = {
.inherit_nosym = { 0, 0, 1 },
.rotorstep = { 1, 1, 255 },
.inherit_nodfrg = { 0, 1, 1 },
- .fstrm_timer = { 1, 50, 3600*100},
+ .fstrm_timer = { 1, 30*100, 3600*100},
};
/*
diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.c b/fs/xfs/linux-2.6/xfs_ioctl32.c
index 141cf15..42319d7 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl32.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl32.c
@@ -139,7 +139,7 @@ STATIC int xfs_inumbers_fmt_compat(
long count,
long *written)
{
- compat_xfs_inogrp_t *p32 = ubuffer;
+ compat_xfs_inogrp_t __user *p32 = ubuffer;
long i;
for (i = 0; i < count; i++) {
@@ -444,7 +444,7 @@ xfs_compat_ioctl(
case XFS_IOC_FSINUMBERS_32:
cmd = _NATIVE_IOC(cmd, struct xfs_fsop_bulkreq);
return xfs_ioc_bulkstat_compat(XFS_BHVTOI(VNHEAD(vp))->i_mount,
- cmd, (void*)arg);
+ cmd, (void __user*)arg);
case XFS_IOC_FD_TO_HANDLE_32:
case XFS_IOC_PATH_TO_HANDLE_32:
case XFS_IOC_PATH_TO_FSHANDLE_32:
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index 4528f9a..491d1f4 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -415,8 +415,10 @@ xfs_fs_write_inode(
if (vp) {
vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address);
- if (sync)
+ if (sync) {
+ filemap_fdatawait(inode->i_mapping);
flags |= FLUSH_SYNC;
+ }
error = bhv_vop_iflush(vp, flags);
if (error == EAGAIN)
error = sync? bhv_vop_iflush(vp, flags | FLUSH_LOG) : 0;
diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c
index 2d274b2..6ff0f4d 100644
--- a/fs/xfs/quota/xfs_qm.c
+++ b/fs/xfs/quota/xfs_qm.c
@@ -120,7 +120,8 @@ xfs_Gqm_init(void)
* Initialize the dquot hash tables.
*/
udqhash = kmem_zalloc_greedy(&hsize,
- XFS_QM_HASHSIZE_LOW, XFS_QM_HASHSIZE_HIGH,
+ XFS_QM_HASHSIZE_LOW * sizeof(xfs_dqhash_t),
+ XFS_QM_HASHSIZE_HIGH * sizeof(xfs_dqhash_t),
KM_SLEEP | KM_MAYFAIL | KM_LARGE);
gdqhash = kmem_zalloc(hsize, KM_SLEEP | KM_LARGE);
hsize /= sizeof(xfs_dqhash_t);
diff --git a/fs/xfs/support/debug.h b/fs/xfs/support/debug.h
index a27a7c8..855da04 100644
--- a/fs/xfs/support/debug.h
+++ b/fs/xfs/support/debug.h
@@ -34,10 +34,10 @@ extern void cmn_err(int, char *, ...)
extern void assfail(char *expr, char *f, int l);
#define ASSERT_ALWAYS(expr) \
- (unlikely((expr) != 0) ? (void)0 : assfail(#expr, __FILE__, __LINE__))
+ (unlikely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__))
#ifndef DEBUG
-# define ASSERT(expr) ((void)0)
+#define ASSERT(expr) ((void)0)
#ifndef STATIC
# define STATIC static noinline
@@ -49,8 +49,10 @@ extern void assfail(char *expr, char *f, int l);
#else /* DEBUG */
-# define ASSERT(expr) ASSERT_ALWAYS(expr)
-# include <linux/random.h>
+#include <linux/random.h>
+
+#define ASSERT(expr) \
+ (unlikely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__))
#ifndef STATIC
# define STATIC noinline
diff --git a/fs/xfs/xfs_da_btree.c b/fs/xfs/xfs_da_btree.c
index aea37df..26d09e2 100644
--- a/fs/xfs/xfs_da_btree.c
+++ b/fs/xfs/xfs_da_btree.c
@@ -1975,7 +1975,6 @@ xfs_da_do_buf(
error = mappedbno == -2 ? 0 : XFS_ERROR(EFSCORRUPTED);
if (unlikely(error == EFSCORRUPTED)) {
if (xfs_error_level >= XFS_ERRLEVEL_LOW) {
- int i;
cmn_err(CE_ALERT, "xfs_da_do_buf: bno %lld\n",
(long long)bno);
cmn_err(CE_ALERT, "dir: inode %lld\n",
diff --git a/fs/xfs/xfs_filestream.c b/fs/xfs/xfs_filestream.c
index ce22786..36d8f6a 100644
--- a/fs/xfs/xfs_filestream.c
+++ b/fs/xfs/xfs_filestream.c
@@ -350,9 +350,10 @@ _xfs_filestream_update_ag(
/* xfs_fstrm_free_func(): callback for freeing cached stream items. */
void
xfs_fstrm_free_func(
- xfs_ino_t ino,
- fstrm_item_t *item)
+ unsigned long ino,
+ void *data)
{
+ fstrm_item_t *item = (fstrm_item_t *)data;
xfs_inode_t *ip = item->ip;
int ref;
@@ -438,7 +439,7 @@ xfs_filestream_mount(
grp_count = 10;
err = xfs_mru_cache_create(&mp->m_filestream, lifetime, grp_count,
- (xfs_mru_cache_free_func_t)xfs_fstrm_free_func);
+ xfs_fstrm_free_func);
return err;
}
@@ -467,8 +468,7 @@ void
xfs_filestream_flush(
xfs_mount_t *mp)
{
- /* point in time flush, so keep the reaper running */
- xfs_mru_cache_flush(mp->m_filestream, 1);
+ xfs_mru_cache_flush(mp->m_filestream);
}
/*
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 9d4c4fb..9bfb69e 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -2185,13 +2185,13 @@ xlog_state_do_callback(
}
cb = iclog->ic_callback;
- while (cb != 0) {
+ while (cb) {
iclog->ic_callback_tail = &(iclog->ic_callback);
iclog->ic_callback = NULL;
LOG_UNLOCK(log, s);
/* perform callbacks in the order given */
- for (; cb != 0; cb = cb_next) {
+ for (; cb; cb = cb_next) {
cb_next = cb->cb_next;
cb->cb_func(cb->cb_arg, aborted);
}
@@ -2202,7 +2202,7 @@ xlog_state_do_callback(
loopdidcallbacks++;
funcdidcallbacks++;
- ASSERT(iclog->ic_callback == 0);
+ ASSERT(iclog->ic_callback == NULL);
if (!(iclog->ic_state & XLOG_STATE_IOERROR))
iclog->ic_state = XLOG_STATE_DIRTY;
@@ -3242,10 +3242,10 @@ xlog_ticket_put(xlog_t *log,
#else
/* When we debug, it is easier if tickets are cycled */
ticket->t_next = NULL;
- if (log->l_tail != 0) {
+ if (log->l_tail) {
log->l_tail->t_next = ticket;
} else {
- ASSERT(log->l_freelist == 0);
+ ASSERT(log->l_freelist == NULL);
log->l_freelist = ticket;
}
log->l_tail = ticket;
@@ -3463,7 +3463,7 @@ xlog_verify_iclog(xlog_t *log,
s = LOG_LOCK(log);
icptr = log->l_iclog;
for (i=0; i < log->l_iclog_bufs; i++) {
- if (icptr == 0)
+ if (icptr == NULL)
xlog_panic("xlog_verify_iclog: invalid ptr");
icptr = icptr->ic_next;
}
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index fddbb09..8ae6e8e 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -1366,7 +1366,7 @@ xlog_recover_add_to_cont_trans(
int old_len;
item = trans->r_itemq;
- if (item == 0) {
+ if (item == NULL) {
/* finish copying rest of trans header */
xlog_recover_add_item(&trans->r_itemq);
ptr = (xfs_caddr_t) &trans->r_theader +
@@ -1412,7 +1412,7 @@ xlog_recover_add_to_trans(
if (!len)
return 0;
item = trans->r_itemq;
- if (item == 0) {
+ if (item == NULL) {
ASSERT(*(uint *)dp == XFS_TRANS_HEADER_MAGIC);
if (len == sizeof(xfs_trans_header_t))
xlog_recover_add_item(&trans->r_itemq);
@@ -1467,12 +1467,12 @@ xlog_recover_unlink_tid(
xlog_recover_t *tp;
int found = 0;
- ASSERT(trans != 0);
+ ASSERT(trans != NULL);
if (trans == *q) {
*q = (*q)->r_next;
} else {
tp = *q;
- while (tp != 0) {
+ while (tp) {
if (tp->r_next == trans) {
found = 1;
break;
@@ -1495,7 +1495,7 @@ xlog_recover_insert_item_backq(
xlog_recover_item_t **q,
xlog_recover_item_t *item)
{
- if (*q == 0) {
+ if (*q == NULL) {
item->ri_prev = item->ri_next = item;
*q = item;
} else {
@@ -1899,7 +1899,7 @@ xlog_recover_do_reg_buffer(
break;
nbits = xfs_contig_bits(data_map, map_size, bit);
ASSERT(nbits > 0);
- ASSERT(item->ri_buf[i].i_addr != 0);
+ ASSERT(item->ri_buf[i].i_addr != NULL);
ASSERT(item->ri_buf[i].i_len % XFS_BLI_CHUNK == 0);
ASSERT(XFS_BUF_COUNT(bp) >=
((uint)bit << XFS_BLI_SHIFT)+(nbits<<XFS_BLI_SHIFT));
diff --git a/fs/xfs/xfs_mru_cache.c b/fs/xfs/xfs_mru_cache.c
index 7deb9e3..e0b358c 100644
--- a/fs/xfs/xfs_mru_cache.c
+++ b/fs/xfs/xfs_mru_cache.c
@@ -206,8 +206,11 @@ _xfs_mru_cache_list_insert(
*/
if (!_xfs_mru_cache_migrate(mru, now)) {
mru->time_zero = now;
- if (!mru->next_reap)
- mru->next_reap = mru->grp_count * mru->grp_time;
+ if (!mru->queued) {
+ mru->queued = 1;
+ queue_delayed_work(xfs_mru_reap_wq, &mru->work,
+ mru->grp_count * mru->grp_time);
+ }
} else {
grp = (now - mru->time_zero) / mru->grp_time;
grp = (mru->lru_grp + grp) % mru->grp_count;
@@ -271,29 +274,26 @@ _xfs_mru_cache_reap(
struct work_struct *work)
{
xfs_mru_cache_t *mru = container_of(work, xfs_mru_cache_t, work.work);
- unsigned long now;
+ unsigned long now, next;
ASSERT(mru && mru->lists);
if (!mru || !mru->lists)
return;
mutex_spinlock(&mru->lock);
- now = jiffies;
- if (mru->reap_all ||
- (mru->next_reap && time_after(now, mru->next_reap))) {
- if (mru->reap_all)
- now += mru->grp_count * mru->grp_time * 2;
- mru->next_reap = _xfs_mru_cache_migrate(mru, now);
- _xfs_mru_cache_clear_reap_list(mru);
+ next = _xfs_mru_cache_migrate(mru, jiffies);
+ _xfs_mru_cache_clear_reap_list(mru);
+
+ mru->queued = next;
+ if ((mru->queued > 0)) {
+ now = jiffies;
+ if (next <= now)
+ next = 0;
+ else
+ next -= now;
+ queue_delayed_work(xfs_mru_reap_wq, &mru->work, next);
}
- /*
- * the process that triggered the reap_all is responsible
- * for restating the periodic reap if it is required.
- */
- if (!mru->reap_all)
- queue_delayed_work(xfs_mru_reap_wq, &mru->work, mru->grp_time);
- mru->reap_all = 0;
mutex_spinunlock(&mru->lock, 0);
}
@@ -352,7 +352,7 @@ xfs_mru_cache_create(
/* An extra list is needed to avoid reaping up to a grp_time early. */
mru->grp_count = grp_count + 1;
- mru->lists = kmem_alloc(mru->grp_count * sizeof(*mru->lists), KM_SLEEP);
+ mru->lists = kmem_zalloc(mru->grp_count * sizeof(*mru->lists), KM_SLEEP);
if (!mru->lists) {
err = ENOMEM;
@@ -374,11 +374,6 @@ xfs_mru_cache_create(
mru->grp_time = grp_time;
mru->free_func = free_func;
- /* start up the reaper event */
- mru->next_reap = 0;
- mru->reap_all = 0;
- queue_delayed_work(xfs_mru_reap_wq, &mru->work, mru->grp_time);
-
*mrup = mru;
exit:
@@ -394,35 +389,25 @@ exit:
* Call xfs_mru_cache_flush() to flush out all cached entries, calling their
* free functions as they're deleted. When this function returns, the caller is
* guaranteed that all the free functions for all the elements have finished
- * executing.
- *
- * While we are flushing, we stop the periodic reaper event from triggering.
- * Normally, we want to restart this periodic event, but if we are shutting
- * down the cache we do not want it restarted. hence the restart parameter
- * where 0 = do not restart reaper and 1 = restart reaper.
+ * executing and the reaper is not running.
*/
void
xfs_mru_cache_flush(
- xfs_mru_cache_t *mru,
- int restart)
+ xfs_mru_cache_t *mru)
{
if (!mru || !mru->lists)
return;
- cancel_rearming_delayed_workqueue(xfs_mru_reap_wq, &mru->work);
-
mutex_spinlock(&mru->lock);
- mru->reap_all = 1;
- mutex_spinunlock(&mru->lock, 0);
+ if (mru->queued) {
+ mutex_spinunlock(&mru->lock, 0);
+ cancel_rearming_delayed_workqueue(xfs_mru_reap_wq, &mru->work);
+ mutex_spinlock(&mru->lock);
+ }
- queue_work(xfs_mru_reap_wq, &mru->work.work);
- flush_workqueue(xfs_mru_reap_wq);
+ _xfs_mru_cache_migrate(mru, jiffies + mru->grp_count * mru->grp_time);
+ _xfs_mru_cache_clear_reap_list(mru);
- mutex_spinlock(&mru->lock);
- WARN_ON_ONCE(mru->reap_all != 0);
- mru->reap_all = 0;
- if (restart)
- queue_delayed_work(xfs_mru_reap_wq, &mru->work, mru->grp_time);
mutex_spinunlock(&mru->lock, 0);
}
@@ -433,8 +418,7 @@ xfs_mru_cache_destroy(
if (!mru || !mru->lists)
return;
- /* we don't want the reaper to restart here */
- xfs_mru_cache_flush(mru, 0);
+ xfs_mru_cache_flush(mru);
kmem_free(mru->lists, mru->grp_count * sizeof(*mru->lists));
kmem_free(mru, sizeof(*mru));
diff --git a/fs/xfs/xfs_mru_cache.h b/fs/xfs/xfs_mru_cache.h
index 624fd10..dd58ea1 100644
--- a/fs/xfs/xfs_mru_cache.h
+++ b/fs/xfs/xfs_mru_cache.h
@@ -32,11 +32,9 @@ typedef struct xfs_mru_cache
unsigned int grp_time; /* Time period spanned by grps. */
unsigned int lru_grp; /* Group containing time zero. */
unsigned long time_zero; /* Time first element was added. */
- unsigned long next_reap; /* Time that the reaper should
- next do something. */
- unsigned int reap_all; /* if set, reap all lists */
xfs_mru_cache_free_func_t free_func; /* Function pointer for freeing. */
struct delayed_work work; /* Workqueue data for reaping. */
+ unsigned int queued; /* work has been queued */
} xfs_mru_cache_t;
int xfs_mru_cache_init(void);
@@ -44,7 +42,7 @@ void xfs_mru_cache_uninit(void);
int xfs_mru_cache_create(struct xfs_mru_cache **mrup, unsigned int lifetime_ms,
unsigned int grp_count,
xfs_mru_cache_free_func_t free_func);
-void xfs_mru_cache_flush(xfs_mru_cache_t *mru, int restart);
+void xfs_mru_cache_flush(xfs_mru_cache_t *mru);
void xfs_mru_cache_destroy(struct xfs_mru_cache *mru);
int xfs_mru_cache_insert(struct xfs_mru_cache *mru, unsigned long key,
void *value);
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index 79b5227..6034592 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -589,7 +589,30 @@ xfs_setattr(
code = xfs_igrow_start(ip, vap->va_size, credp);
}
xfs_iunlock(ip, XFS_ILOCK_EXCL);
- vn_iowait(vp); /* wait for the completion of any pending DIOs */
+
+ /*
+ * We are going to log the inode size change in this
+ * transaction so any previous writes that are beyond the on
+ * disk EOF and the new EOF that have not been written out need
+ * to be written here. If we do not write the data out, we
+ * expose ourselves to the null files problem.
+ *
+ * Only flush from the on disk size to the smaller of the in
+ * memory file size or the new size as that's the range we
+ * really care about here and prevents waiting for other data
+ * not within the range we care about here.
+ */
+ if (!code &&
+ (ip->i_size != ip->i_d.di_size) &&
+ (vap->va_size > ip->i_d.di_size)) {
+ code = bhv_vop_flush_pages(XFS_ITOV(ip),
+ ip->i_d.di_size, vap->va_size,
+ XFS_B_ASYNC, FI_NONE);
+ }
+
+ /* wait for all I/O to complete */
+ vn_iowait(vp);
+
if (!code)
code = xfs_itruncate_data(ip, vap->va_size);
if (code) {
@@ -1059,6 +1082,9 @@ xfs_fsync(
if (XFS_FORCED_SHUTDOWN(ip->i_mount))
return XFS_ERROR(EIO);
+ if (flag & FSYNC_DATA)
+ filemap_fdatawait(vn_to_inode(XFS_ITOV(ip))->i_mapping);
+
/*
* We always need to make sure that the required inode state
* is safe on disk. The vnode might be clean but because
@@ -3746,12 +3772,16 @@ xfs_inode_flush(
sync_lsn = log->l_last_sync_lsn;
GRANT_UNLOCK(log, s);
- if ((XFS_LSN_CMP(iip->ili_last_lsn, sync_lsn) <= 0))
- return 0;
+ if ((XFS_LSN_CMP(iip->ili_last_lsn, sync_lsn) > 0)) {
+ if (flags & FLUSH_SYNC)
+ log_flags |= XFS_LOG_SYNC;
+ error = xfs_log_force(mp, iip->ili_last_lsn, log_flags);
+ if (error)
+ return error;
+ }
- if (flags & FLUSH_SYNC)
- log_flags |= XFS_LOG_SYNC;
- return xfs_log_force(mp, iip->ili_last_lsn, log_flags);
+ if (ip->i_update_core == 0)
+ return 0;
}
}
@@ -3765,9 +3795,6 @@ xfs_inode_flush(
if (flags & FLUSH_INODE) {
int flush_flags;
- if (xfs_ipincount(ip))
- return EAGAIN;
-
if (flags & FLUSH_SYNC) {
xfs_ilock(ip, XFS_ILOCK_SHARED);
xfs_iflock(ip);
@@ -4434,9 +4461,12 @@ xfs_free_file_space(
while (!error && !done) {
/*
- * allocate and setup the transaction
+ * allocate and setup the transaction. Allow this
+ * transaction to dip into the reserve blocks to ensure
+ * the freeing of the space succeeds at ENOSPC.
*/
tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
+ tp->t_flags |= XFS_TRANS_RESERVE;
error = xfs_trans_reserve(tp,
resblks,
XFS_WRITE_LOG_RES(mp),
OpenPOWER on IntegriCloud