summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorJiri Kosina <jkosina@suse.cz>2011-04-26 10:22:15 +0200
committerJiri Kosina <jkosina@suse.cz>2011-04-26 10:22:59 +0200
commit07f9479a40cc778bc1462ada11f95b01360ae4ff (patch)
tree0676cf38df3844004bb3ebfd99dfa67a4a8998f5 /fs
parent9d5e6bdb3013acfb311ab407eeca0b6a6a3dedbf (diff)
parentcd2e49e90f1cae7726c9a2c54488d881d7f1cd1c (diff)
downloadop-kernel-dev-07f9479a40cc778bc1462ada11f95b01360ae4ff.zip
op-kernel-dev-07f9479a40cc778bc1462ada11f95b01360ae4ff.tar.gz
Merge branch 'master' into for-next
Fast-forwarded to current state of Linus' tree as there are patches to be applied for files that didn't exist on the old branch.
Diffstat (limited to 'fs')
-rw-r--r--fs/9p/acl.c6
-rw-r--r--fs/9p/fid.c4
-rw-r--r--fs/9p/v9fs.h7
-rw-r--r--fs/9p/vfs_dentry.c4
-rw-r--r--fs/9p/vfs_file.c13
-rw-r--r--fs/9p/vfs_inode.c29
-rw-r--r--fs/9p/vfs_inode_dotl.c24
-rw-r--r--fs/9p/vfs_super.c82
-rw-r--r--fs/adfs/adfs.h25
-rw-r--r--fs/adfs/dir_f.c23
-rw-r--r--fs/adfs/dir_fplus.c119
-rw-r--r--fs/adfs/inode.c63
-rw-r--r--fs/adfs/map.c2
-rw-r--r--fs/adfs/super.c23
-rw-r--r--fs/affs/Makefile2
-rw-r--r--fs/affs/file.c2
-rw-r--r--fs/afs/cache.c12
-rw-r--r--fs/afs/cell.c2
-rw-r--r--fs/aio.c81
-rw-r--r--fs/attr.c6
-rw-r--r--fs/autofs4/autofs_i.h2
-rw-r--r--fs/autofs4/dev-ioctl.c4
-rw-r--r--fs/autofs4/expire.c84
-rw-r--r--fs/autofs4/root.c64
-rw-r--r--fs/autofs4/waitq.c6
-rw-r--r--fs/befs/ChangeLog10
-rw-r--r--fs/befs/befs_fs_types.h2
-rw-r--r--fs/befs/btree.c2
-rw-r--r--fs/befs/linuxvfs.c3
-rw-r--r--fs/bfs/dir.c2
-rw-r--r--fs/bfs/file.c1
-rw-r--r--fs/binfmt_elf.c10
-rw-r--r--fs/binfmt_flat.c2
-rw-r--r--fs/bio-integrity.c3
-rw-r--r--fs/bio.c14
-rw-r--r--fs/block_dev.c35
-rw-r--r--fs/btrfs/acl.c11
-rw-r--r--fs/btrfs/btrfs_inode.h3
-rw-r--r--fs/btrfs/compression.c17
-rw-r--r--fs/btrfs/ctree.c159
-rw-r--r--fs/btrfs/ctree.h32
-rw-r--r--fs/btrfs/delayed-ref.c6
-rw-r--r--fs/btrfs/dir-item.c45
-rw-r--r--fs/btrfs/disk-io.c217
-rw-r--r--fs/btrfs/extent-tree.c354
-rw-r--r--fs/btrfs/extent_io.c87
-rw-r--r--fs/btrfs/extent_io.h3
-rw-r--r--fs/btrfs/extent_map.c2
-rw-r--r--fs/btrfs/file-item.c5
-rw-r--r--fs/btrfs/file.c391
-rw-r--r--fs/btrfs/free-space-cache.c713
-rw-r--r--fs/btrfs/free-space-cache.h2
-rw-r--r--fs/btrfs/inode-map.c3
-rw-r--r--fs/btrfs/inode.c557
-rw-r--r--fs/btrfs/ioctl.c112
-rw-r--r--fs/btrfs/ordered-data.c8
-rw-r--r--fs/btrfs/relocation.c10
-rw-r--r--fs/btrfs/root-tree.c24
-rw-r--r--fs/btrfs/super.c66
-rw-r--r--fs/btrfs/transaction.c64
-rw-r--r--fs/btrfs/transaction.h4
-rw-r--r--fs/btrfs/tree-log.c57
-rw-r--r--fs/btrfs/volumes.c235
-rw-r--r--fs/btrfs/volumes.h12
-rw-r--r--fs/btrfs/xattr.c35
-rw-r--r--fs/btrfs/zlib.c3
-rw-r--r--fs/buffer.c53
-rw-r--r--fs/cachefiles/interface.c2
-rw-r--r--fs/ceph/addr.c4
-rw-r--r--fs/ceph/caps.c2
-rw-r--r--fs/ceph/debugfs.c6
-rw-r--r--fs/ceph/dir.c24
-rw-r--r--fs/ceph/file.c10
-rw-r--r--fs/ceph/inode.c25
-rw-r--r--fs/ceph/mds_client.c6
-rw-r--r--fs/ceph/snap.c6
-rw-r--r--fs/ceph/super.c11
-rw-r--r--fs/ceph/super.h66
-rw-r--r--fs/cifs/AUTHORS2
-rw-r--r--fs/cifs/README16
-rw-r--r--fs/cifs/cache.c2
-rw-r--r--fs/cifs/cifs_debug.c43
-rw-r--r--fs/cifs/cifs_dfs_ref.c2
-rw-r--r--fs/cifs/cifs_spnego.c4
-rw-r--r--fs/cifs/cifs_unicode.c35
-rw-r--r--fs/cifs/cifs_unicode.h2
-rw-r--r--fs/cifs/cifsencrypt.c21
-rw-r--r--fs/cifs/cifsfs.c6
-rw-r--r--fs/cifs/cifsglob.h13
-rw-r--r--fs/cifs/cifssmb.c16
-rw-r--r--fs/cifs/connect.c72
-rw-r--r--fs/cifs/dir.c2
-rw-r--r--fs/cifs/file.c100
-rw-r--r--fs/cifs/link.c4
-rw-r--r--fs/cifs/misc.c3
-rw-r--r--fs/cifs/sess.c23
-rw-r--r--fs/coda/Makefile2
-rw-r--r--fs/coda/sysctl.c17
-rw-r--r--fs/compat.c3
-rw-r--r--fs/configfs/dir.c2
-rw-r--r--fs/dcache.c89
-rw-r--r--fs/devpts/inode.c21
-rw-r--r--fs/direct-io.c7
-rw-r--r--fs/dlm/lock.c2
-rw-r--r--fs/dlm/lowcomms.c2
-rw-r--r--fs/dlm/recover.c2
-rw-r--r--fs/drop_caches.c24
-rw-r--r--fs/ecryptfs/crypto.c23
-rw-r--r--fs/ecryptfs/ecryptfs_kernel.h37
-rw-r--r--fs/ecryptfs/file.c34
-rw-r--r--fs/ecryptfs/inode.c84
-rw-r--r--fs/ecryptfs/keystore.c272
-rw-r--r--fs/ecryptfs/kthread.c6
-rw-r--r--fs/ecryptfs/main.c82
-rw-r--r--fs/ecryptfs/mmap.c61
-rw-r--r--fs/ecryptfs/read_write.c12
-rw-r--r--fs/ecryptfs/super.c17
-rw-r--r--fs/efs/inode.c1
-rw-r--r--fs/eventpoll.c60
-rw-r--r--fs/exec.c2
-rw-r--r--fs/exofs/common.h22
-rw-r--r--fs/exofs/dir.c33
-rw-r--r--fs/exofs/exofs.h6
-rw-r--r--fs/exofs/file.c16
-rw-r--r--fs/exofs/inode.c52
-rw-r--r--fs/exofs/super.c190
-rw-r--r--fs/ext2/acl.c2
-rw-r--r--fs/ext2/balloc.c6
-rw-r--r--fs/ext2/ext2.h6
-rw-r--r--fs/ext2/inode.c10
-rw-r--r--fs/ext2/ioctl.c6
-rw-r--r--fs/ext2/super.c2
-rw-r--r--fs/ext2/xattr.c2
-rw-r--r--fs/ext3/acl.c2
-rw-r--r--fs/ext3/balloc.c10
-rw-r--r--fs/ext3/inode.c11
-rw-r--r--fs/ext3/ioctl.c6
-rw-r--r--fs/ext3/resize.c2
-rw-r--r--fs/ext3/super.c2
-rw-r--r--fs/ext4/acl.c2
-rw-r--r--fs/ext4/balloc.c5
-rw-r--r--fs/ext4/ext4.h12
-rw-r--r--fs/ext4/ext4_jbd2.h11
-rw-r--r--fs/ext4/extents.c223
-rw-r--r--fs/ext4/fsync.c33
-rw-r--r--fs/ext4/ialloc.c8
-rw-r--r--fs/ext4/inode.c467
-rw-r--r--fs/ext4/ioctl.c15
-rw-r--r--fs/ext4/mballoc.c36
-rw-r--r--fs/ext4/mballoc.h2
-rw-r--r--fs/ext4/migrate.c12
-rw-r--r--fs/ext4/namei.c13
-rw-r--r--fs/ext4/page-io.c16
-rw-r--r--fs/ext4/resize.c12
-rw-r--r--fs/ext4/super.c126
-rw-r--r--fs/ext4/xattr.c4
-rw-r--r--fs/fat/inode.c1
-rw-r--r--fs/fcntl.c2
-rw-r--r--fs/fhandle.c1
-rw-r--r--fs/fifo.c3
-rw-r--r--fs/filesystems.c3
-rw-r--r--fs/freevxfs/vxfs_fshead.c2
-rw-r--r--fs/freevxfs/vxfs_lookup.c2
-rw-r--r--fs/freevxfs/vxfs_olt.h2
-rw-r--r--fs/freevxfs/vxfs_subr.c1
-rw-r--r--fs/fs-writeback.c143
-rw-r--r--fs/fuse/cuse.c12
-rw-r--r--fs/fuse/dev.c27
-rw-r--r--fs/fuse/dir.c38
-rw-r--r--fs/fuse/file.c4
-rw-r--r--fs/fuse/fuse_i.h1
-rw-r--r--fs/fuse/inode.c1
-rw-r--r--fs/generic_acl.c2
-rw-r--r--fs/gfs2/Makefile2
-rw-r--r--fs/gfs2/aops.c5
-rw-r--r--fs/gfs2/bmap.c2
-rw-r--r--fs/gfs2/dir.c2
-rw-r--r--fs/gfs2/file.c60
-rw-r--r--fs/gfs2/glock.c8
-rw-r--r--fs/gfs2/glops.c4
-rw-r--r--fs/gfs2/inode.c56
-rw-r--r--fs/gfs2/inode.h3
-rw-r--r--fs/gfs2/log.c4
-rw-r--r--fs/gfs2/lops.c12
-rw-r--r--fs/gfs2/meta_io.c3
-rw-r--r--fs/gfs2/ops_fstype.c2
-rw-r--r--fs/gfs2/rgrp.c4
-rw-r--r--fs/gfs2/super.c16
-rw-r--r--fs/hfs/inode.c2
-rw-r--r--fs/hfsplus/inode.c2
-rw-r--r--fs/hfsplus/ioctl.c2
-rw-r--r--fs/hpfs/file.c1
-rw-r--r--fs/hugetlbfs/inode.c3
-rw-r--r--fs/inode.c687
-rw-r--r--fs/internal.h8
-rw-r--r--fs/ioctl.c21
-rw-r--r--fs/isofs/inode.c1
-rw-r--r--fs/jbd/commit.c24
-rw-r--r--fs/jbd/journal.c4
-rw-r--r--fs/jbd/revoke.c2
-rw-r--r--fs/jbd/transaction.c2
-rw-r--r--fs/jbd2/commit.c28
-rw-r--r--fs/jbd2/journal.c7
-rw-r--r--fs/jbd2/revoke.c2
-rw-r--r--fs/jbd2/transaction.c2
-rw-r--r--fs/jffs2/TODO2
-rw-r--r--fs/jffs2/acl.c2
-rw-r--r--fs/jffs2/compr_zlib.c7
-rw-r--r--fs/jffs2/readinode.c2
-rw-r--r--fs/jffs2/summary.c4
-rw-r--r--fs/jffs2/wbuf.c2
-rw-r--r--fs/jffs2/xattr.c2
-rw-r--r--fs/jfs/Makefile2
-rw-r--r--fs/jfs/inode.c1
-rw-r--r--fs/jfs/ioctl.c2
-rw-r--r--fs/jfs/jfs_dmap.c4
-rw-r--r--fs/jfs/jfs_extent.c6
-rw-r--r--fs/jfs/jfs_imap.c14
-rw-r--r--fs/jfs/jfs_logmgr.h2
-rw-r--r--fs/jfs/jfs_metapage.c1
-rw-r--r--fs/jfs/jfs_metapage.h2
-rw-r--r--fs/jfs/jfs_txnmgr.c2
-rw-r--r--fs/jfs/resize.c4
-rw-r--r--fs/jfs/super.c2
-rw-r--r--fs/jfs/xattr.c2
-rw-r--r--fs/locks.c12
-rw-r--r--fs/logfs/compr.c2
-rw-r--r--fs/logfs/dev_bdev.c2
-rw-r--r--fs/logfs/dev_mtd.c2
-rw-r--r--fs/logfs/dir.c2
-rw-r--r--fs/logfs/file.c2
-rw-r--r--fs/logfs/inode.c2
-rw-r--r--fs/logfs/readwrite.c2
-rw-r--r--fs/mbcache.c2
-rw-r--r--fs/minix/Kconfig8
-rw-r--r--fs/minix/inode.c1
-rw-r--r--fs/minix/minix.h74
-rw-r--r--fs/mpage.c8
-rw-r--r--fs/namei.c56
-rw-r--r--fs/namespace.c18
-rw-r--r--fs/ncpfs/Makefile2
-rw-r--r--fs/ncpfs/inode.c2
-rw-r--r--fs/nfs/callback_xdr.c2
-rw-r--r--fs/nfs/dir.c89
-rw-r--r--fs/nfs/file.c5
-rw-r--r--fs/nfs/getroot.c4
-rw-r--r--fs/nfs/inode.c10
-rw-r--r--fs/nfs/internal.h27
-rw-r--r--fs/nfs/namespace.c113
-rw-r--r--fs/nfs/nfs3proc.c2
-rw-r--r--fs/nfs/nfs4_fs.h5
-rw-r--r--fs/nfs/nfs4filelayout.c352
-rw-r--r--fs/nfs/nfs4filelayout.h4
-rw-r--r--fs/nfs/nfs4filelayoutdev.c178
-rw-r--r--fs/nfs/nfs4proc.c300
-rw-r--r--fs/nfs/nfs4state.c3
-rw-r--r--fs/nfs/nfs4xdr.c313
-rw-r--r--fs/nfs/pagelist.c12
-rw-r--r--fs/nfs/pnfs.c142
-rw-r--r--fs/nfs/pnfs.h83
-rw-r--r--fs/nfs/proc.c2
-rw-r--r--fs/nfs/write.c233
-rw-r--r--fs/nfs_common/nfsacl.c3
-rw-r--r--fs/nfsd/export.c1
-rw-r--r--fs/nfsd/lockd.c1
-rw-r--r--fs/nfsd/nfs3xdr.c2
-rw-r--r--fs/nfsd/nfs4idmap.c1
-rw-r--r--fs/nfsd/nfs4proc.c4
-rw-r--r--fs/nfsd/nfs4state.c170
-rw-r--r--fs/nfsd/nfs4xdr.c5
-rw-r--r--fs/nfsd/nfsctl.c35
-rw-r--r--fs/nfsd/nfsxdr.c2
-rw-r--r--fs/nfsd/state.h12
-rw-r--r--fs/nfsd/vfs.c11
-rw-r--r--fs/nilfs2/alloc.c12
-rw-r--r--fs/nilfs2/alloc.h2
-rw-r--r--fs/nilfs2/bmap.c12
-rw-r--r--fs/nilfs2/bmap.h3
-rw-r--r--fs/nilfs2/btnode.c7
-rw-r--r--fs/nilfs2/btree.c6
-rw-r--r--fs/nilfs2/dir.c5
-rw-r--r--fs/nilfs2/direct.c4
-rw-r--r--fs/nilfs2/file.c15
-rw-r--r--fs/nilfs2/gcinode.c1
-rw-r--r--fs/nilfs2/inode.c84
-rw-r--r--fs/nilfs2/ioctl.c115
-rw-r--r--fs/nilfs2/mdt.c9
-rw-r--r--fs/nilfs2/mdt.h2
-rw-r--r--fs/nilfs2/namei.c2
-rw-r--r--fs/nilfs2/nilfs.h47
-rw-r--r--fs/nilfs2/page.c5
-rw-r--r--fs/nilfs2/page.h3
-rw-r--r--fs/nilfs2/recovery.c32
-rw-r--r--fs/nilfs2/sb.h85
-rw-r--r--fs/nilfs2/segbuf.c2
-rw-r--r--fs/nilfs2/segment.c258
-rw-r--r--fs/nilfs2/segment.h14
-rw-r--r--fs/nilfs2/super.c214
-rw-r--r--fs/nilfs2/the_nilfs.c44
-rw-r--r--fs/nilfs2/the_nilfs.h51
-rw-r--r--fs/notify/fanotify/fanotify_user.c2
-rw-r--r--fs/notify/inode_mark.c42
-rw-r--r--fs/notify/inotify/inotify_fsnotify.c3
-rw-r--r--fs/notify/inotify/inotify_user.c39
-rw-r--r--fs/notify/mark.c3
-rw-r--r--fs/notify/vfsmount_mark.c1
-rw-r--r--fs/ntfs/Makefile19
-rw-r--r--fs/ntfs/aops.c4
-rw-r--r--fs/ntfs/attrib.c4
-rw-r--r--fs/ntfs/compress.c5
-rw-r--r--fs/ntfs/inode.c8
-rw-r--r--fs/ntfs/layout.h12
-rw-r--r--fs/ntfs/logfile.c2
-rw-r--r--fs/ntfs/logfile.h2
-rw-r--r--fs/ntfs/mft.c8
-rw-r--r--fs/ntfs/runlist.c2
-rw-r--r--fs/ntfs/super.c14
-rw-r--r--fs/ocfs2/Makefile4
-rw-r--r--fs/ocfs2/acl.c3
-rw-r--r--fs/ocfs2/alloc.c216
-rw-r--r--fs/ocfs2/aops.c83
-rw-r--r--fs/ocfs2/aops.h2
-rw-r--r--fs/ocfs2/buffer_head_io.c49
-rw-r--r--fs/ocfs2/cluster/heartbeat.c9
-rw-r--r--fs/ocfs2/cluster/masklog.c20
-rw-r--r--fs/ocfs2/cluster/masklog.h105
-rw-r--r--fs/ocfs2/cluster/quorum.c4
-rw-r--r--fs/ocfs2/cluster/tcp.c12
-rw-r--r--fs/ocfs2/dcache.c45
-rw-r--r--fs/ocfs2/dir.c121
-rw-r--r--fs/ocfs2/dlm/Makefile2
-rw-r--r--fs/ocfs2/dlm/dlmconvert.c6
-rw-r--r--fs/ocfs2/dlm/dlmdomain.c36
-rw-r--r--fs/ocfs2/dlm/dlmlock.c10
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c10
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c9
-rw-r--r--fs/ocfs2/dlm/dlmunlock.c4
-rw-r--r--fs/ocfs2/dlmfs/Makefile2
-rw-r--r--fs/ocfs2/dlmglue.c246
-rw-r--r--fs/ocfs2/export.c47
-rw-r--r--fs/ocfs2/extent_map.c10
-rw-r--r--fs/ocfs2/file.c220
-rw-r--r--fs/ocfs2/heartbeat.c4
-rw-r--r--fs/ocfs2/inode.c138
-rw-r--r--fs/ocfs2/ioctl.c43
-rw-r--r--fs/ocfs2/journal.c170
-rw-r--r--fs/ocfs2/journal.h2
-rw-r--r--fs/ocfs2/localalloc.c109
-rw-r--r--fs/ocfs2/locks.c1
-rw-r--r--fs/ocfs2/mmap.c7
-rw-r--r--fs/ocfs2/namei.c177
-rw-r--r--fs/ocfs2/ocfs2.h33
-rw-r--r--fs/ocfs2/ocfs2_fs.h4
-rw-r--r--fs/ocfs2/ocfs2_trace.h2739
-rw-r--r--fs/ocfs2/quota_global.c47
-rw-r--r--fs/ocfs2/quota_local.c16
-rw-r--r--fs/ocfs2/refcounttree.c158
-rw-r--r--fs/ocfs2/reservations.c57
-rw-r--r--fs/ocfs2/reservations.h2
-rw-r--r--fs/ocfs2/resize.c23
-rw-r--r--fs/ocfs2/slot_map.c16
-rw-r--r--fs/ocfs2/stackglue.h2
-rw-r--r--fs/ocfs2/suballoc.c193
-rw-r--r--fs/ocfs2/super.c91
-rw-r--r--fs/ocfs2/symlink.c14
-rw-r--r--fs/ocfs2/sysfile.c1
-rw-r--r--fs/ocfs2/uptodate.c73
-rw-r--r--fs/ocfs2/xattr.c159
-rw-r--r--fs/omfs/file.c1
-rw-r--r--fs/open.c13
-rw-r--r--fs/partitions/check.c7
-rw-r--r--fs/partitions/ldm.c16
-rw-r--r--fs/proc/array.c4
-rw-r--r--fs/proc/base.c189
-rw-r--r--fs/proc/generic.c8
-rw-r--r--fs/proc/inode.c2
-rw-r--r--fs/proc/internal.h1
-rw-r--r--fs/proc/root.c32
-rw-r--r--fs/proc/task_mmu.c138
-rw-r--r--fs/proc/task_nommu.c6
-rw-r--r--fs/pstore/Kconfig2
-rw-r--r--fs/pstore/inode.c76
-rw-r--r--fs/pstore/internal.h3
-rw-r--r--fs/pstore/platform.c31
-rw-r--r--fs/qnx4/inode.c1
-rw-r--r--fs/quota/dquot.c56
-rw-r--r--fs/ramfs/file-nommu.c1
-rw-r--r--fs/reiserfs/Makefile4
-rw-r--r--fs/reiserfs/inode.c1
-rw-r--r--fs/reiserfs/ioctl.c4
-rw-r--r--fs/reiserfs/journal.c4
-rw-r--r--fs/reiserfs/lock.c2
-rw-r--r--fs/reiserfs/super.c4
-rw-r--r--fs/reiserfs/xattr.c2
-rw-r--r--fs/reiserfs/xattr_acl.c2
-rw-r--r--fs/select.c3
-rw-r--r--fs/squashfs/Kconfig12
-rw-r--r--fs/squashfs/cache.c4
-rw-r--r--fs/squashfs/decompressor.c34
-rw-r--r--fs/squashfs/decompressor.h7
-rw-r--r--fs/squashfs/dir.c9
-rw-r--r--fs/squashfs/lzo_wrapper.c4
-rw-r--r--fs/squashfs/namei.c12
-rw-r--r--fs/squashfs/squashfs.h1
-rw-r--r--fs/squashfs/squashfs_fs.h4
-rw-r--r--fs/squashfs/super.c15
-rw-r--r--fs/squashfs/xz_wrapper.c53
-rw-r--r--fs/squashfs/zlib_wrapper.c10
-rw-r--r--fs/super.c2
-rw-r--r--fs/sync.c28
-rw-r--r--fs/sysv/itree.c1
-rw-r--r--fs/ubifs/Kconfig11
-rw-r--r--fs/ubifs/budget.c2
-rw-r--r--fs/ubifs/commit.c2
-rw-r--r--fs/ubifs/debug.c65
-rw-r--r--fs/ubifs/debug.h152
-rw-r--r--fs/ubifs/file.c14
-rw-r--r--fs/ubifs/ioctl.c2
-rw-r--r--fs/ubifs/lprops.c2
-rw-r--r--fs/ubifs/lpt.c7
-rw-r--r--fs/ubifs/lpt_commit.c4
-rw-r--r--fs/ubifs/orphan.c2
-rw-r--r--fs/ubifs/recovery.c26
-rw-r--r--fs/ubifs/super.c33
-rw-r--r--fs/ubifs/xattr.c4
-rw-r--r--fs/udf/balloc.c9
-rw-r--r--fs/udf/file.c1
-rw-r--r--fs/udf/inode.c1
-rw-r--r--fs/ufs/inode.c3
-rw-r--r--fs/ufs/super.c6
-rw-r--r--fs/ufs/truncate.c1
-rw-r--r--fs/ufs/util.h2
-rw-r--r--fs/utimes.c2
-rw-r--r--fs/xattr.c4
-rw-r--r--fs/xfs/Makefile12
-rw-r--r--fs/xfs/linux-2.6/kmem.c9
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c12
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c392
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.h40
-rw-r--r--fs/xfs/linux-2.6/xfs_file.c8
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl.c4
-rw-r--r--fs/xfs/linux-2.6/xfs_iops.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_linux.h23
-rw-r--r--fs/xfs/linux-2.6/xfs_message.c126
-rw-r--r--fs/xfs/linux-2.6/xfs_message.h40
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c293
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.c265
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.h2
-rw-r--r--fs/xfs/linux-2.6/xfs_sysctl.c2
-rw-r--r--fs/xfs/quota/xfs_dquot.c50
-rw-r--r--fs/xfs/quota/xfs_dquot_item.c5
-rw-r--r--fs/xfs/quota/xfs_qm.c56
-rw-r--r--fs/xfs/quota/xfs_qm.h5
-rw-r--r--fs/xfs/quota/xfs_qm_bhv.c5
-rw-r--r--fs/xfs/quota/xfs_qm_syscalls.c91
-rw-r--r--fs/xfs/quota/xfs_trans_dquot.c5
-rw-r--r--fs/xfs/support/debug.c107
-rw-r--r--fs/xfs/support/debug.h61
-rw-r--r--fs/xfs/xfs_alloc.c188
-rw-r--r--fs/xfs/xfs_bmap.c24
-rw-r--r--fs/xfs/xfs_buf_item.c17
-rw-r--r--fs/xfs/xfs_da_btree.c9
-rw-r--r--fs/xfs/xfs_dfrag.c4
-rw-r--r--fs/xfs/xfs_dir2.c2
-rw-r--r--fs/xfs/xfs_dir2_node.c25
-rw-r--r--fs/xfs/xfs_error.c22
-rw-r--r--fs/xfs/xfs_error.h19
-rw-r--r--fs/xfs/xfs_fsops.c6
-rw-r--r--fs/xfs/xfs_ialloc.c82
-rw-r--r--fs/xfs/xfs_inode.c133
-rw-r--r--fs/xfs/xfs_inode.h27
-rw-r--r--fs/xfs/xfs_inode_item.c73
-rw-r--r--fs/xfs/xfs_iomap.c12
-rw-r--r--fs/xfs/xfs_itable.c2
-rw-r--r--fs/xfs/xfs_log.c162
-rw-r--r--fs/xfs/xfs_log_priv.h7
-rw-r--r--fs/xfs/xfs_log_recover.c227
-rw-r--r--fs/xfs/xfs_mount.c148
-rw-r--r--fs/xfs/xfs_mount.h9
-rw-r--r--fs/xfs/xfs_quota.h3
-rw-r--r--fs/xfs/xfs_rtalloc.c92
-rw-r--r--fs/xfs/xfs_rtalloc.h2
-rw-r--r--fs/xfs/xfs_rw.c58
-rw-r--r--fs/xfs/xfs_trans.h2
-rw-r--r--fs/xfs/xfs_trans_ail.c423
-rw-r--r--fs/xfs/xfs_trans_buf.c9
-rw-r--r--fs/xfs/xfs_trans_inode.c24
-rw-r--r--fs/xfs/xfs_trans_priv.h22
-rw-r--r--fs/xfs/xfs_vnodeops.c81
-rw-r--r--fs/xfs/xfs_vnodeops.h1
490 files changed, 13578 insertions, 8324 deletions
diff --git a/fs/9p/acl.c b/fs/9p/acl.c
index 5154552..535ab6e 100644
--- a/fs/9p/acl.c
+++ b/fs/9p/acl.c
@@ -262,7 +262,7 @@ static int v9fs_xattr_get_acl(struct dentry *dentry, const char *name,
if (strcmp(name, "") != 0)
return -EINVAL;
- v9ses = v9fs_inode2v9ses(dentry->d_inode);
+ v9ses = v9fs_dentry2v9ses(dentry);
/*
* We allow set/get/list of acl when access=client is not specified
*/
@@ -312,7 +312,7 @@ static int v9fs_xattr_set_acl(struct dentry *dentry, const char *name,
if (strcmp(name, "") != 0)
return -EINVAL;
- v9ses = v9fs_inode2v9ses(dentry->d_inode);
+ v9ses = v9fs_dentry2v9ses(dentry);
/*
* set the attribute on the remote. Without even looking at the
* xattr value. We leave it to the server to validate
@@ -323,7 +323,7 @@ static int v9fs_xattr_set_acl(struct dentry *dentry, const char *name,
if (S_ISLNK(inode->i_mode))
return -EOPNOTSUPP;
- if (!is_owner_or_cap(inode))
+ if (!inode_owner_or_capable(inode))
return -EPERM;
if (value) {
/* update the cached acl value */
diff --git a/fs/9p/fid.c b/fs/9p/fid.c
index cd63e00..85b67ff 100644
--- a/fs/9p/fid.c
+++ b/fs/9p/fid.c
@@ -134,7 +134,7 @@ static struct p9_fid *v9fs_fid_lookup_with_uid(struct dentry *dentry,
struct v9fs_session_info *v9ses;
struct p9_fid *fid, *old_fid = NULL;
- v9ses = v9fs_inode2v9ses(dentry->d_inode);
+ v9ses = v9fs_dentry2v9ses(dentry);
access = v9ses->flags & V9FS_ACCESS_MASK;
fid = v9fs_fid_find(dentry, uid, any);
if (fid)
@@ -237,7 +237,7 @@ struct p9_fid *v9fs_fid_lookup(struct dentry *dentry)
int any, access;
struct v9fs_session_info *v9ses;
- v9ses = v9fs_inode2v9ses(dentry->d_inode);
+ v9ses = v9fs_dentry2v9ses(dentry);
access = v9ses->flags & V9FS_ACCESS_MASK;
switch (access) {
case V9FS_ACCESS_SINGLE:
diff --git a/fs/9p/v9fs.h b/fs/9p/v9fs.h
index bd8496d..e5ebedf 100644
--- a/fs/9p/v9fs.h
+++ b/fs/9p/v9fs.h
@@ -116,7 +116,6 @@ struct v9fs_session_info {
struct list_head slist; /* list of sessions registered with v9fs */
struct backing_dev_info bdi;
struct rw_semaphore rename_sem;
- struct p9_fid *root_fid; /* Used for file system sync */
};
/* cache_validity flags */
@@ -130,6 +129,7 @@ struct v9fs_inode {
#endif
unsigned int cache_validity;
struct p9_fid *writeback_fid;
+ struct mutex v_mutex;
struct inode vfs_inode;
};
@@ -173,6 +173,11 @@ static inline struct v9fs_session_info *v9fs_inode2v9ses(struct inode *inode)
return (inode->i_sb->s_fs_info);
}
+static inline struct v9fs_session_info *v9fs_dentry2v9ses(struct dentry *dentry)
+{
+ return dentry->d_sb->s_fs_info;
+}
+
static inline int v9fs_proto_dotu(struct v9fs_session_info *v9ses)
{
return v9ses->flags & V9FS_PROTO_2000U;
diff --git a/fs/9p/vfs_dentry.c b/fs/9p/vfs_dentry.c
index b6a3b9f..e022890 100644
--- a/fs/9p/vfs_dentry.c
+++ b/fs/9p/vfs_dentry.c
@@ -126,7 +126,9 @@ static int v9fs_lookup_revalidate(struct dentry *dentry, struct nameidata *nd)
retval = v9fs_refresh_inode_dotl(fid, inode);
else
retval = v9fs_refresh_inode(fid, inode);
- if (retval <= 0)
+ if (retval == -ENOENT)
+ return 0;
+ if (retval < 0)
return retval;
}
out_valid:
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
index 78bcb97..ffed558 100644
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -90,7 +90,9 @@ int v9fs_file_open(struct inode *inode, struct file *file)
}
file->private_data = fid;
- if (v9ses->cache && !v9inode->writeback_fid) {
+ mutex_lock(&v9inode->v_mutex);
+ if (v9ses->cache && !v9inode->writeback_fid &&
+ ((file->f_flags & O_ACCMODE) != O_RDONLY)) {
/*
* clone a fid and add it to writeback_fid
* we do it during open time instead of
@@ -101,10 +103,12 @@ int v9fs_file_open(struct inode *inode, struct file *file)
fid = v9fs_writeback_fid(file->f_path.dentry);
if (IS_ERR(fid)) {
err = PTR_ERR(fid);
+ mutex_unlock(&v9inode->v_mutex);
goto out_error;
}
v9inode->writeback_fid = (void *) fid;
}
+ mutex_unlock(&v9inode->v_mutex);
#ifdef CONFIG_9P_FSCACHE
if (v9ses->cache)
v9fs_cache_inode_set_cookie(inode, file);
@@ -504,9 +508,12 @@ v9fs_file_write(struct file *filp, const char __user * data,
if (!count)
goto out;
- return v9fs_file_write_internal(filp->f_path.dentry->d_inode,
+ retval = v9fs_file_write_internal(filp->f_path.dentry->d_inode,
filp->private_data,
- data, count, offset, 1);
+ data, count, &origin, 1);
+ /* update offset on successful write */
+ if (retval > 0)
+ *offset = origin;
out:
return retval;
}
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 8a2c232..7f6c677 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -221,6 +221,7 @@ struct inode *v9fs_alloc_inode(struct super_block *sb)
#endif
v9inode->writeback_fid = NULL;
v9inode->cache_validity = 0;
+ mutex_init(&v9inode->v_mutex);
return &v9inode->vfs_inode;
}
@@ -650,7 +651,9 @@ v9fs_vfs_create(struct inode *dir, struct dentry *dentry, int mode,
/* if we are opening a file, assign the open fid to the file */
if (nd && nd->flags & LOOKUP_OPEN) {
v9inode = V9FS_I(dentry->d_inode);
- if (v9ses->cache && !v9inode->writeback_fid) {
+ mutex_lock(&v9inode->v_mutex);
+ if (v9ses->cache && !v9inode->writeback_fid &&
+ ((flags & O_ACCMODE) != O_RDONLY)) {
/*
* clone a fid and add it to writeback_fid
* we do it during open time instead of
@@ -661,10 +664,12 @@ v9fs_vfs_create(struct inode *dir, struct dentry *dentry, int mode,
inode_fid = v9fs_writeback_fid(dentry);
if (IS_ERR(inode_fid)) {
err = PTR_ERR(inode_fid);
+ mutex_unlock(&v9inode->v_mutex);
goto error;
}
v9inode->writeback_fid = (void *) inode_fid;
}
+ mutex_unlock(&v9inode->v_mutex);
filp = lookup_instantiate_filp(nd, dentry, generic_file_open);
if (IS_ERR(filp)) {
err = PTR_ERR(filp);
@@ -931,7 +936,7 @@ v9fs_vfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
P9_DPRINTK(P9_DEBUG_VFS, "dentry: %p\n", dentry);
err = -EPERM;
- v9ses = v9fs_inode2v9ses(dentry->d_inode);
+ v9ses = v9fs_dentry2v9ses(dentry);
if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
generic_fillattr(dentry->d_inode, stat);
return 0;
@@ -967,8 +972,12 @@ static int v9fs_vfs_setattr(struct dentry *dentry, struct iattr *iattr)
struct p9_wstat wstat;
P9_DPRINTK(P9_DEBUG_VFS, "\n");
+ retval = inode_change_ok(dentry->d_inode, iattr);
+ if (retval)
+ return retval;
+
retval = -EPERM;
- v9ses = v9fs_inode2v9ses(dentry->d_inode);
+ v9ses = v9fs_dentry2v9ses(dentry);
fid = v9fs_fid_lookup(dentry);
if(IS_ERR(fid))
return PTR_ERR(fid);
@@ -993,12 +1002,7 @@ static int v9fs_vfs_setattr(struct dentry *dentry, struct iattr *iattr)
if (iattr->ia_valid & ATTR_GID)
wstat.n_gid = iattr->ia_gid;
}
- if ((iattr->ia_valid & ATTR_SIZE) &&
- iattr->ia_size != i_size_read(dentry->d_inode)) {
- retval = vmtruncate(dentry->d_inode, iattr->ia_size);
- if (retval)
- return retval;
- }
+
/* Write all dirty data */
if (S_ISREG(dentry->d_inode->i_mode))
filemap_write_and_wait(dentry->d_inode->i_mapping);
@@ -1006,6 +1010,11 @@ static int v9fs_vfs_setattr(struct dentry *dentry, struct iattr *iattr)
retval = p9_client_wstat(fid, &wstat);
if (retval < 0)
return retval;
+
+ if ((iattr->ia_valid & ATTR_SIZE) &&
+ iattr->ia_size != i_size_read(dentry->d_inode))
+ truncate_setsize(dentry->d_inode, iattr->ia_size);
+
v9fs_invalidate_inode_attr(dentry->d_inode);
setattr_copy(dentry->d_inode, iattr);
@@ -1130,7 +1139,7 @@ static int v9fs_readlink(struct dentry *dentry, char *buffer, int buflen)
P9_DPRINTK(P9_DEBUG_VFS, " %s\n", dentry->d_name.name);
retval = -EPERM;
- v9ses = v9fs_inode2v9ses(dentry->d_inode);
+ v9ses = v9fs_dentry2v9ses(dentry);
fid = v9fs_fid_lookup(dentry);
if (IS_ERR(fid))
return PTR_ERR(fid);
diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
index 67c138e..82a7c38 100644
--- a/fs/9p/vfs_inode_dotl.c
+++ b/fs/9p/vfs_inode_dotl.c
@@ -245,7 +245,9 @@ v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int omode,
v9fs_set_create_acl(dentry, dacl, pacl);
v9inode = V9FS_I(inode);
- if (v9ses->cache && !v9inode->writeback_fid) {
+ mutex_lock(&v9inode->v_mutex);
+ if (v9ses->cache && !v9inode->writeback_fid &&
+ ((flags & O_ACCMODE) != O_RDONLY)) {
/*
* clone a fid and add it to writeback_fid
* we do it during open time instead of
@@ -256,10 +258,12 @@ v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int omode,
inode_fid = v9fs_writeback_fid(dentry);
if (IS_ERR(inode_fid)) {
err = PTR_ERR(inode_fid);
+ mutex_unlock(&v9inode->v_mutex);
goto error;
}
v9inode->writeback_fid = (void *) inode_fid;
}
+ mutex_unlock(&v9inode->v_mutex);
/* Since we are opening a file, assign the open fid to the file */
filp = lookup_instantiate_filp(nd, dentry, generic_file_open);
if (IS_ERR(filp)) {
@@ -391,7 +395,7 @@ v9fs_vfs_getattr_dotl(struct vfsmount *mnt, struct dentry *dentry,
P9_DPRINTK(P9_DEBUG_VFS, "dentry: %p\n", dentry);
err = -EPERM;
- v9ses = v9fs_inode2v9ses(dentry->d_inode);
+ v9ses = v9fs_dentry2v9ses(dentry);
if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
generic_fillattr(dentry->d_inode, stat);
return 0;
@@ -448,17 +452,11 @@ int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr)
p9attr.mtime_nsec = iattr->ia_mtime.tv_nsec;
retval = -EPERM;
- v9ses = v9fs_inode2v9ses(dentry->d_inode);
+ v9ses = v9fs_dentry2v9ses(dentry);
fid = v9fs_fid_lookup(dentry);
if (IS_ERR(fid))
return PTR_ERR(fid);
- if ((iattr->ia_valid & ATTR_SIZE) &&
- iattr->ia_size != i_size_read(dentry->d_inode)) {
- retval = vmtruncate(dentry->d_inode, iattr->ia_size);
- if (retval)
- return retval;
- }
/* Write all dirty data */
if (S_ISREG(dentry->d_inode->i_mode))
filemap_write_and_wait(dentry->d_inode->i_mapping);
@@ -466,8 +464,12 @@ int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr)
retval = p9_client_setattr(fid, &p9attr);
if (retval < 0)
return retval;
- v9fs_invalidate_inode_attr(dentry->d_inode);
+ if ((iattr->ia_valid & ATTR_SIZE) &&
+ iattr->ia_size != i_size_read(dentry->d_inode))
+ truncate_setsize(dentry->d_inode, iattr->ia_size);
+
+ v9fs_invalidate_inode_attr(dentry->d_inode);
setattr_copy(dentry->d_inode, iattr);
mark_inode_dirty(dentry->d_inode);
if (iattr->ia_valid & ATTR_MODE) {
@@ -809,7 +811,7 @@ v9fs_vfs_follow_link_dotl(struct dentry *dentry, struct nameidata *nd)
fid = v9fs_fid_lookup(dentry);
if (IS_ERR(fid)) {
__putname(link);
- link = ERR_PTR(PTR_ERR(fid));
+ link = ERR_CAST(fid);
goto ndset;
}
retval = p9_client_readlink(fid, &target);
diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c
index 09fd08d..feef6cd 100644
--- a/fs/9p/vfs_super.c
+++ b/fs/9p/vfs_super.c
@@ -154,6 +154,7 @@ static struct dentry *v9fs_mount(struct file_system_type *fs_type, int flags,
retval = PTR_ERR(inode);
goto release_sb;
}
+
root = d_alloc_root(inode);
if (!root) {
iput(inode);
@@ -185,21 +186,10 @@ static struct dentry *v9fs_mount(struct file_system_type *fs_type, int flags,
p9stat_free(st);
kfree(st);
}
- v9fs_fid_add(root, fid);
retval = v9fs_get_acl(inode, fid);
if (retval)
goto release_sb;
- /*
- * Add the root fid to session info. This is used
- * for file system sync. We want a cloned fid here
- * so that we can do a sync_filesystem after a
- * shrink_dcache_for_umount
- */
- v9ses->root_fid = v9fs_fid_clone(root);
- if (IS_ERR(v9ses->root_fid)) {
- retval = PTR_ERR(v9ses->root_fid);
- goto release_sb;
- }
+ v9fs_fid_add(root, fid);
P9_DPRINTK(P9_DEBUG_VFS, " simple set mount, return 0\n");
return dget(sb->s_root);
@@ -210,11 +200,15 @@ close_session:
v9fs_session_close(v9ses);
kfree(v9ses);
return ERR_PTR(retval);
+
release_sb:
/*
- * we will do the session_close and root dentry
- * release in the below call.
+ * we will do the session_close and root dentry release
+ * in the below call. But we need to clunk fid, because we haven't
+ * attached the fid to dentry so it won't get clunked
+ * automatically.
*/
+ p9_client_clunk(fid);
deactivate_locked_super(sb);
return ERR_PTR(retval);
}
@@ -232,7 +226,7 @@ static void v9fs_kill_super(struct super_block *s)
P9_DPRINTK(P9_DEBUG_VFS, " %p\n", s);
kill_anon_super(s);
- p9_client_clunk(v9ses->root_fid);
+
v9fs_session_cancel(v9ses);
v9fs_session_close(v9ses);
kfree(v9ses);
@@ -262,7 +256,7 @@ static int v9fs_statfs(struct dentry *dentry, struct kstatfs *buf)
goto done;
}
- v9ses = v9fs_inode2v9ses(dentry->d_inode);
+ v9ses = v9fs_dentry2v9ses(dentry);
if (v9fs_proto_dotl(v9ses)) {
res = p9_client_statfs(fid, &rs);
if (res == 0) {
@@ -285,14 +279,6 @@ done:
return res;
}
-static int v9fs_sync_fs(struct super_block *sb, int wait)
-{
- struct v9fs_session_info *v9ses = sb->s_fs_info;
-
- P9_DPRINTK(P9_DEBUG_VFS, "v9fs_sync_fs: super_block %p\n", sb);
- return p9_client_sync_fs(v9ses->root_fid);
-}
-
static int v9fs_drop_inode(struct inode *inode)
{
struct v9fs_session_info *v9ses;
@@ -307,6 +293,51 @@ static int v9fs_drop_inode(struct inode *inode)
return 1;
}
+static int v9fs_write_inode(struct inode *inode,
+ struct writeback_control *wbc)
+{
+ int ret;
+ struct p9_wstat wstat;
+ struct v9fs_inode *v9inode;
+ /*
+ * send an fsync request to server irrespective of
+ * wbc->sync_mode.
+ */
+ P9_DPRINTK(P9_DEBUG_VFS, "%s: inode %p\n", __func__, inode);
+ v9inode = V9FS_I(inode);
+ if (!v9inode->writeback_fid)
+ return 0;
+ v9fs_blank_wstat(&wstat);
+
+ ret = p9_client_wstat(v9inode->writeback_fid, &wstat);
+ if (ret < 0) {
+ __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
+ return ret;
+ }
+ return 0;
+}
+
+static int v9fs_write_inode_dotl(struct inode *inode,
+ struct writeback_control *wbc)
+{
+ int ret;
+ struct v9fs_inode *v9inode;
+ /*
+ * send an fsync request to server irrespective of
+ * wbc->sync_mode.
+ */
+ P9_DPRINTK(P9_DEBUG_VFS, "%s: inode %p\n", __func__, inode);
+ v9inode = V9FS_I(inode);
+ if (!v9inode->writeback_fid)
+ return 0;
+ ret = p9_client_fsync(v9inode->writeback_fid, 0);
+ if (ret < 0) {
+ __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
+ return ret;
+ }
+ return 0;
+}
+
static const struct super_operations v9fs_super_ops = {
.alloc_inode = v9fs_alloc_inode,
.destroy_inode = v9fs_destroy_inode,
@@ -314,17 +345,18 @@ static const struct super_operations v9fs_super_ops = {
.evict_inode = v9fs_evict_inode,
.show_options = generic_show_options,
.umount_begin = v9fs_umount_begin,
+ .write_inode = v9fs_write_inode,
};
static const struct super_operations v9fs_super_ops_dotl = {
.alloc_inode = v9fs_alloc_inode,
.destroy_inode = v9fs_destroy_inode,
- .sync_fs = v9fs_sync_fs,
.statfs = v9fs_statfs,
.drop_inode = v9fs_drop_inode,
.evict_inode = v9fs_evict_inode,
.show_options = generic_show_options,
.umount_begin = v9fs_umount_begin,
+ .write_inode = v9fs_write_inode_dotl,
};
struct file_system_type v9fs_fs_type = {
diff --git a/fs/adfs/adfs.h b/fs/adfs/adfs.h
index 2ff622f..718ac1f 100644
--- a/fs/adfs/adfs.h
+++ b/fs/adfs/adfs.h
@@ -50,6 +50,7 @@ struct adfs_sb_info {
gid_t s_gid; /* owner gid */
umode_t s_owner_mask; /* ADFS owner perm -> unix perm */
umode_t s_other_mask; /* ADFS other perm -> unix perm */
+ int s_ftsuffix; /* ,xyz hex filetype suffix option */
__u32 s_ids_per_zone; /* max. no ids in one zone */
__u32 s_idlen; /* length of ID in map */
@@ -79,6 +80,10 @@ struct adfs_dir {
int nr_buffers;
struct buffer_head *bh[4];
+
+ /* big directories need allocated buffers */
+ struct buffer_head **bh_fplus;
+
unsigned int pos;
unsigned int parent_id;
@@ -89,7 +94,7 @@ struct adfs_dir {
/*
* This is the overall maximum name length
*/
-#define ADFS_MAX_NAME_LEN 256
+#define ADFS_MAX_NAME_LEN (256 + 4) /* +4 for ,xyz hex filetype suffix */
struct object_info {
__u32 parent_id; /* parent object id */
__u32 file_id; /* object id */
@@ -97,10 +102,26 @@ struct object_info {
__u32 execaddr; /* execution address */
__u32 size; /* size */
__u8 attr; /* RISC OS attributes */
- unsigned char name_len; /* name length */
+ unsigned int name_len; /* name length */
char name[ADFS_MAX_NAME_LEN];/* file name */
+
+ /* RISC OS file type (12-bit: derived from loadaddr) */
+ __u16 filetype;
};
+/* RISC OS 12-bit filetype converts to ,xyz hex filename suffix */
+static inline int append_filetype_suffix(char *buf, __u16 filetype)
+{
+ if (filetype == 0xffff) /* no explicit 12-bit file type was set */
+ return 0;
+
+ *buf++ = ',';
+ *buf++ = hex_asc_lo(filetype >> 8);
+ *buf++ = hex_asc_lo(filetype >> 4);
+ *buf++ = hex_asc_lo(filetype >> 0);
+ return 4;
+}
+
struct adfs_dir_ops {
int (*read)(struct super_block *sb, unsigned int id, unsigned int sz, struct adfs_dir *dir);
int (*setpos)(struct adfs_dir *dir, unsigned int fpos);
diff --git a/fs/adfs/dir_f.c b/fs/adfs/dir_f.c
index bafc712..4bbe853 100644
--- a/fs/adfs/dir_f.c
+++ b/fs/adfs/dir_f.c
@@ -52,7 +52,6 @@ static inline int adfs_readname(char *buf, char *ptr, int maxlen)
*buf++ = *ptr;
ptr++;
}
- *buf = '\0';
return buf - old_buf;
}
@@ -208,7 +207,8 @@ release_buffers:
* convert a disk-based directory entry to a Linux ADFS directory entry
*/
static inline void
-adfs_dir2obj(struct object_info *obj, struct adfs_direntry *de)
+adfs_dir2obj(struct adfs_dir *dir, struct object_info *obj,
+ struct adfs_direntry *de)
{
obj->name_len = adfs_readname(obj->name, de->dirobname, ADFS_F_NAME_LEN);
obj->file_id = adfs_readval(de->dirinddiscadd, 3);
@@ -216,6 +216,23 @@ adfs_dir2obj(struct object_info *obj, struct adfs_direntry *de)
obj->execaddr = adfs_readval(de->direxec, 4);
obj->size = adfs_readval(de->dirlen, 4);
obj->attr = de->newdiratts;
+ obj->filetype = -1;
+
+ /*
+ * object is a file and is filetyped and timestamped?
+ * RISC OS 12-bit filetype is stored in load_address[19:8]
+ */
+ if ((0 == (obj->attr & ADFS_NDA_DIRECTORY)) &&
+ (0xfff00000 == (0xfff00000 & obj->loadaddr))) {
+ obj->filetype = (__u16) ((0x000fff00 & obj->loadaddr) >> 8);
+
+ /* optionally append the ,xyz hex filetype suffix */
+ if (ADFS_SB(dir->sb)->s_ftsuffix)
+ obj->name_len +=
+ append_filetype_suffix(
+ &obj->name[obj->name_len],
+ obj->filetype);
+ }
}
/*
@@ -260,7 +277,7 @@ __adfs_dir_get(struct adfs_dir *dir, int pos, struct object_info *obj)
if (!de.dirobname[0])
return -ENOENT;
- adfs_dir2obj(obj, &de);
+ adfs_dir2obj(dir, obj, &de);
return 0;
}
diff --git a/fs/adfs/dir_fplus.c b/fs/adfs/dir_fplus.c
index 1796bb35..d9e3bee 100644
--- a/fs/adfs/dir_fplus.c
+++ b/fs/adfs/dir_fplus.c
@@ -8,6 +8,7 @@
* published by the Free Software Foundation.
*/
#include <linux/buffer_head.h>
+#include <linux/slab.h>
#include "adfs.h"
#include "dir_fplus.h"
@@ -22,30 +23,53 @@ adfs_fplus_read(struct super_block *sb, unsigned int id, unsigned int sz, struct
dir->nr_buffers = 0;
+ /* start off using fixed bh set - only alloc for big dirs */
+ dir->bh_fplus = &dir->bh[0];
+
block = __adfs_block_map(sb, id, 0);
if (!block) {
adfs_error(sb, "dir object %X has a hole at offset 0", id);
goto out;
}
- dir->bh[0] = sb_bread(sb, block);
- if (!dir->bh[0])
+ dir->bh_fplus[0] = sb_bread(sb, block);
+ if (!dir->bh_fplus[0])
goto out;
dir->nr_buffers += 1;
- h = (struct adfs_bigdirheader *)dir->bh[0]->b_data;
+ h = (struct adfs_bigdirheader *)dir->bh_fplus[0]->b_data;
size = le32_to_cpu(h->bigdirsize);
if (size != sz) {
- printk(KERN_WARNING "adfs: adfs_fplus_read: directory header size\n"
- " does not match directory size\n");
+ printk(KERN_WARNING "adfs: adfs_fplus_read:"
+ " directory header size %X\n"
+ " does not match directory size %X\n",
+ size, sz);
}
if (h->bigdirversion[0] != 0 || h->bigdirversion[1] != 0 ||
h->bigdirversion[2] != 0 || size & 2047 ||
- h->bigdirstartname != cpu_to_le32(BIGDIRSTARTNAME))
+ h->bigdirstartname != cpu_to_le32(BIGDIRSTARTNAME)) {
+ printk(KERN_WARNING "adfs: dir object %X has"
+ " malformed dir header\n", id);
goto out;
+ }
size >>= sb->s_blocksize_bits;
+ if (size > sizeof(dir->bh)/sizeof(dir->bh[0])) {
+ /* this directory is too big for fixed bh set, must allocate */
+ struct buffer_head **bh_fplus =
+ kzalloc(size * sizeof(struct buffer_head *),
+ GFP_KERNEL);
+ if (!bh_fplus) {
+ adfs_error(sb, "not enough memory for"
+ " dir object %X (%d blocks)", id, size);
+ goto out;
+ }
+ dir->bh_fplus = bh_fplus;
+ /* copy over the pointer to the block that we've already read */
+ dir->bh_fplus[0] = dir->bh[0];
+ }
+
for (blk = 1; blk < size; blk++) {
block = __adfs_block_map(sb, id, blk);
if (!block) {
@@ -53,25 +77,44 @@ adfs_fplus_read(struct super_block *sb, unsigned int id, unsigned int sz, struct
goto out;
}
- dir->bh[blk] = sb_bread(sb, block);
- if (!dir->bh[blk])
+ dir->bh_fplus[blk] = sb_bread(sb, block);
+ if (!dir->bh_fplus[blk]) {
+ adfs_error(sb, "dir object %X failed read for"
+ " offset %d, mapped block %X",
+ id, blk, block);
goto out;
- dir->nr_buffers = blk;
+ }
+
+ dir->nr_buffers += 1;
}
- t = (struct adfs_bigdirtail *)(dir->bh[size - 1]->b_data + (sb->s_blocksize - 8));
+ t = (struct adfs_bigdirtail *)
+ (dir->bh_fplus[size - 1]->b_data + (sb->s_blocksize - 8));
if (t->bigdirendname != cpu_to_le32(BIGDIRENDNAME) ||
t->bigdirendmasseq != h->startmasseq ||
- t->reserved[0] != 0 || t->reserved[1] != 0)
+ t->reserved[0] != 0 || t->reserved[1] != 0) {
+ printk(KERN_WARNING "adfs: dir object %X has "
+ "malformed dir end\n", id);
goto out;
+ }
dir->parent_id = le32_to_cpu(h->bigdirparent);
dir->sb = sb;
return 0;
+
out:
- for (i = 0; i < dir->nr_buffers; i++)
- brelse(dir->bh[i]);
+ if (dir->bh_fplus) {
+ for (i = 0; i < dir->nr_buffers; i++)
+ brelse(dir->bh_fplus[i]);
+
+ if (&dir->bh[0] != dir->bh_fplus)
+ kfree(dir->bh_fplus);
+
+ dir->bh_fplus = NULL;
+ }
+
+ dir->nr_buffers = 0;
dir->sb = NULL;
return ret;
}
@@ -79,7 +122,8 @@ out:
static int
adfs_fplus_setpos(struct adfs_dir *dir, unsigned int fpos)
{
- struct adfs_bigdirheader *h = (struct adfs_bigdirheader *)dir->bh[0]->b_data;
+ struct adfs_bigdirheader *h =
+ (struct adfs_bigdirheader *) dir->bh_fplus[0]->b_data;
int ret = -ENOENT;
if (fpos <= le32_to_cpu(h->bigdirentries)) {
@@ -102,21 +146,27 @@ dir_memcpy(struct adfs_dir *dir, unsigned int offset, void *to, int len)
partial = sb->s_blocksize - offset;
if (partial >= len)
- memcpy(to, dir->bh[buffer]->b_data + offset, len);
+ memcpy(to, dir->bh_fplus[buffer]->b_data + offset, len);
else {
char *c = (char *)to;
remainder = len - partial;
- memcpy(c, dir->bh[buffer]->b_data + offset, partial);
- memcpy(c + partial, dir->bh[buffer + 1]->b_data, remainder);
+ memcpy(c,
+ dir->bh_fplus[buffer]->b_data + offset,
+ partial);
+
+ memcpy(c + partial,
+ dir->bh_fplus[buffer + 1]->b_data,
+ remainder);
}
}
static int
adfs_fplus_getnext(struct adfs_dir *dir, struct object_info *obj)
{
- struct adfs_bigdirheader *h = (struct adfs_bigdirheader *)dir->bh[0]->b_data;
+ struct adfs_bigdirheader *h =
+ (struct adfs_bigdirheader *) dir->bh_fplus[0]->b_data;
struct adfs_bigdirentry bde;
unsigned int offset;
int i, ret = -ENOENT;
@@ -147,6 +197,24 @@ adfs_fplus_getnext(struct adfs_dir *dir, struct object_info *obj)
if (obj->name[i] == '/')
obj->name[i] = '.';
+ obj->filetype = -1;
+
+ /*
+ * object is a file and is filetyped and timestamped?
+ * RISC OS 12-bit filetype is stored in load_address[19:8]
+ */
+ if ((0 == (obj->attr & ADFS_NDA_DIRECTORY)) &&
+ (0xfff00000 == (0xfff00000 & obj->loadaddr))) {
+ obj->filetype = (__u16) ((0x000fff00 & obj->loadaddr) >> 8);
+
+ /* optionally append the ,xyz hex filetype suffix */
+ if (ADFS_SB(dir->sb)->s_ftsuffix)
+ obj->name_len +=
+ append_filetype_suffix(
+ &obj->name[obj->name_len],
+ obj->filetype);
+ }
+
dir->pos += 1;
ret = 0;
out:
@@ -160,7 +228,7 @@ adfs_fplus_sync(struct adfs_dir *dir)
int i;
for (i = dir->nr_buffers - 1; i >= 0; i--) {
- struct buffer_head *bh = dir->bh[i];
+ struct buffer_head *bh = dir->bh_fplus[i];
sync_dirty_buffer(bh);
if (buffer_req(bh) && !buffer_uptodate(bh))
err = -EIO;
@@ -174,8 +242,17 @@ adfs_fplus_free(struct adfs_dir *dir)
{
int i;
- for (i = 0; i < dir->nr_buffers; i++)
- brelse(dir->bh[i]);
+ if (dir->bh_fplus) {
+ for (i = 0; i < dir->nr_buffers; i++)
+ brelse(dir->bh_fplus[i]);
+
+ if (&dir->bh[0] != dir->bh_fplus)
+ kfree(dir->bh_fplus);
+
+ dir->bh_fplus = NULL;
+ }
+
+ dir->nr_buffers = 0;
dir->sb = NULL;
}
diff --git a/fs/adfs/inode.c b/fs/adfs/inode.c
index 09fe401..d5250c5 100644
--- a/fs/adfs/inode.c
+++ b/fs/adfs/inode.c
@@ -72,32 +72,18 @@ static sector_t _adfs_bmap(struct address_space *mapping, sector_t block)
static const struct address_space_operations adfs_aops = {
.readpage = adfs_readpage,
.writepage = adfs_writepage,
- .sync_page = block_sync_page,
.write_begin = adfs_write_begin,
.write_end = generic_write_end,
.bmap = _adfs_bmap
};
-static inline unsigned int
-adfs_filetype(struct inode *inode)
-{
- unsigned int type;
-
- if (ADFS_I(inode)->stamped)
- type = (ADFS_I(inode)->loadaddr >> 8) & 0xfff;
- else
- type = (unsigned int) -1;
-
- return type;
-}
-
/*
* Convert ADFS attributes and filetype to Linux permission.
*/
static umode_t
adfs_atts2mode(struct super_block *sb, struct inode *inode)
{
- unsigned int filetype, attr = ADFS_I(inode)->attr;
+ unsigned int attr = ADFS_I(inode)->attr;
umode_t mode, rmask;
struct adfs_sb_info *asb = ADFS_SB(sb);
@@ -106,9 +92,7 @@ adfs_atts2mode(struct super_block *sb, struct inode *inode)
return S_IFDIR | S_IXUGO | mode;
}
- filetype = adfs_filetype(inode);
-
- switch (filetype) {
+ switch (ADFS_I(inode)->filetype) {
case 0xfc0: /* LinkFS */
return S_IFLNK|S_IRWXUGO;
@@ -174,50 +158,48 @@ adfs_mode2atts(struct super_block *sb, struct inode *inode)
/*
* Convert an ADFS time to Unix time. ADFS has a 40-bit centi-second time
- * referenced to 1 Jan 1900 (til 2248)
+ * referenced to 1 Jan 1900 (til 2248) so we need to discard 2208988800 seconds
+ * of time to convert from RISC OS epoch to Unix epoch.
*/
static void
adfs_adfs2unix_time(struct timespec *tv, struct inode *inode)
{
unsigned int high, low;
+ /* 01 Jan 1970 00:00:00 (Unix epoch) as nanoseconds since
+ * 01 Jan 1900 00:00:00 (RISC OS epoch)
+ */
+ static const s64 nsec_unix_epoch_diff_risc_os_epoch =
+ 2208988800000000000LL;
+ s64 nsec;
if (ADFS_I(inode)->stamped == 0)
goto cur_time;
- high = ADFS_I(inode)->loadaddr << 24;
- low = ADFS_I(inode)->execaddr;
+ high = ADFS_I(inode)->loadaddr & 0xFF; /* top 8 bits of timestamp */
+ low = ADFS_I(inode)->execaddr; /* bottom 32 bits of timestamp */
- high |= low >> 8;
- low &= 255;
+ /* convert 40-bit centi-seconds to 32-bit seconds
+ * going via nanoseconds to retain precision
+ */
+ nsec = (((s64) high << 32) | (s64) low) * 10000000; /* cs to ns */
/* Files dated pre 01 Jan 1970 00:00:00. */
- if (high < 0x336e996a)
+ if (nsec < nsec_unix_epoch_diff_risc_os_epoch)
goto too_early;
- /* Files dated post 18 Jan 2038 03:14:05. */
- if (high >= 0x656e9969)
- goto too_late;
+ /* convert from RISC OS to Unix epoch */
+ nsec -= nsec_unix_epoch_diff_risc_os_epoch;
- /* discard 2208988800 (0x336e996a00) seconds of time */
- high -= 0x336e996a;
-
- /* convert 40-bit centi-seconds to 32-bit seconds */
- tv->tv_sec = (((high % 100) << 8) + low) / 100 + (high / 100 << 8);
- tv->tv_nsec = 0;
+ *tv = ns_to_timespec(nsec);
return;
cur_time:
- *tv = CURRENT_TIME_SEC;
+ *tv = CURRENT_TIME;
return;
too_early:
tv->tv_sec = tv->tv_nsec = 0;
return;
-
- too_late:
- tv->tv_sec = 0x7ffffffd;
- tv->tv_nsec = 0;
- return;
}
/*
@@ -279,7 +261,8 @@ adfs_iget(struct super_block *sb, struct object_info *obj)
ADFS_I(inode)->loadaddr = obj->loadaddr;
ADFS_I(inode)->execaddr = obj->execaddr;
ADFS_I(inode)->attr = obj->attr;
- ADFS_I(inode)->stamped = ((obj->loadaddr & 0xfff00000) == 0xfff00000);
+ ADFS_I(inode)->filetype = obj->filetype;
+ ADFS_I(inode)->stamped = ((obj->loadaddr & 0xfff00000) == 0xfff00000);
inode->i_mode = adfs_atts2mode(sb, inode);
adfs_adfs2unix_time(&inode->i_mtime, inode);
diff --git a/fs/adfs/map.c b/fs/adfs/map.c
index d1a5932..6935f052 100644
--- a/fs/adfs/map.c
+++ b/fs/adfs/map.c
@@ -51,7 +51,7 @@ static DEFINE_RWLOCK(adfs_map_lock);
/*
* This is fun. We need to load up to 19 bits from the map at an
- * arbitary bit alignment. (We're limited to 19 bits by F+ version 2).
+ * arbitrary bit alignment. (We're limited to 19 bits by F+ version 2).
*/
#define GET_FRAG_ID(_map,_start,_idmask) \
({ \
diff --git a/fs/adfs/super.c b/fs/adfs/super.c
index 06d7388..c8bf36a 100644
--- a/fs/adfs/super.c
+++ b/fs/adfs/super.c
@@ -138,17 +138,20 @@ static int adfs_show_options(struct seq_file *seq, struct vfsmount *mnt)
seq_printf(seq, ",ownmask=%o", asb->s_owner_mask);
if (asb->s_other_mask != ADFS_DEFAULT_OTHER_MASK)
seq_printf(seq, ",othmask=%o", asb->s_other_mask);
+ if (asb->s_ftsuffix != 0)
+ seq_printf(seq, ",ftsuffix=%u", asb->s_ftsuffix);
return 0;
}
-enum {Opt_uid, Opt_gid, Opt_ownmask, Opt_othmask, Opt_err};
+enum {Opt_uid, Opt_gid, Opt_ownmask, Opt_othmask, Opt_ftsuffix, Opt_err};
static const match_table_t tokens = {
{Opt_uid, "uid=%u"},
{Opt_gid, "gid=%u"},
{Opt_ownmask, "ownmask=%o"},
{Opt_othmask, "othmask=%o"},
+ {Opt_ftsuffix, "ftsuffix=%u"},
{Opt_err, NULL}
};
@@ -189,6 +192,11 @@ static int parse_options(struct super_block *sb, char *options)
return -EINVAL;
asb->s_other_mask = option;
break;
+ case Opt_ftsuffix:
+ if (match_int(args, &option))
+ return -EINVAL;
+ asb->s_ftsuffix = option;
+ break;
default:
printk("ADFS-fs: unrecognised mount option \"%s\" "
"or missing value\n", p);
@@ -366,6 +374,7 @@ static int adfs_fill_super(struct super_block *sb, void *data, int silent)
asb->s_gid = 0;
asb->s_owner_mask = ADFS_DEFAULT_OWNER_MASK;
asb->s_other_mask = ADFS_DEFAULT_OTHER_MASK;
+ asb->s_ftsuffix = 0;
if (parse_options(sb, data))
goto error;
@@ -445,11 +454,13 @@ static int adfs_fill_super(struct super_block *sb, void *data, int silent)
root_obj.parent_id = root_obj.file_id = le32_to_cpu(dr->root);
root_obj.name_len = 0;
- root_obj.loadaddr = 0;
- root_obj.execaddr = 0;
+ /* Set root object date as 01 Jan 1987 00:00:00 */
+ root_obj.loadaddr = 0xfff0003f;
+ root_obj.execaddr = 0xec22c000;
root_obj.size = ADFS_NEWDIR_SIZE;
root_obj.attr = ADFS_NDA_DIRECTORY | ADFS_NDA_OWNER_READ |
ADFS_NDA_OWNER_WRITE | ADFS_NDA_PUBLIC_READ;
+ root_obj.filetype = -1;
/*
* If this is a F+ disk with variable length directories,
@@ -463,6 +474,12 @@ static int adfs_fill_super(struct super_block *sb, void *data, int silent)
asb->s_dir = &adfs_f_dir_ops;
asb->s_namelen = ADFS_F_NAME_LEN;
}
+ /*
+ * ,xyz hex filetype suffix may be added by driver
+ * to files that have valid RISC OS filetype
+ */
+ if (asb->s_ftsuffix)
+ asb->s_namelen += 4;
sb->s_d_op = &adfs_dentry_operations;
root = adfs_iget(sb, &root_obj);
diff --git a/fs/affs/Makefile b/fs/affs/Makefile
index b2c4f54..3988b4a 100644
--- a/fs/affs/Makefile
+++ b/fs/affs/Makefile
@@ -2,7 +2,7 @@
# Makefile for the Linux affs filesystem routines.
#
-#EXTRA_CFLAGS=-DDEBUG=1
+#ccflags-y := -DDEBUG=1
obj-$(CONFIG_AFFS_FS) += affs.o
diff --git a/fs/affs/file.c b/fs/affs/file.c
index 0a90dcd..acf321b 100644
--- a/fs/affs/file.c
+++ b/fs/affs/file.c
@@ -429,7 +429,6 @@ static sector_t _affs_bmap(struct address_space *mapping, sector_t block)
const struct address_space_operations affs_aops = {
.readpage = affs_readpage,
.writepage = affs_writepage,
- .sync_page = block_sync_page,
.write_begin = affs_write_begin,
.write_end = generic_write_end,
.bmap = _affs_bmap
@@ -786,7 +785,6 @@ out:
const struct address_space_operations affs_aops_ofs = {
.readpage = affs_readpage_ofs,
//.writepage = affs_writepage_ofs,
- //.sync_page = affs_sync_page_ofs,
.write_begin = affs_write_begin_ofs,
.write_end = affs_write_end_ofs
};
diff --git a/fs/afs/cache.c b/fs/afs/cache.c
index 0fb315d..577763c3 100644
--- a/fs/afs/cache.c
+++ b/fs/afs/cache.c
@@ -98,7 +98,7 @@ static uint16_t afs_cell_cache_get_key(const void *cookie_netfs_data,
}
/*
- * provide new auxilliary cache data
+ * provide new auxiliary cache data
*/
static uint16_t afs_cell_cache_get_aux(const void *cookie_netfs_data,
void *buffer, uint16_t bufmax)
@@ -117,7 +117,7 @@ static uint16_t afs_cell_cache_get_aux(const void *cookie_netfs_data,
}
/*
- * check that the auxilliary data indicates that the entry is still valid
+ * check that the auxiliary data indicates that the entry is still valid
*/
static enum fscache_checkaux afs_cell_cache_check_aux(void *cookie_netfs_data,
const void *buffer,
@@ -150,7 +150,7 @@ static uint16_t afs_vlocation_cache_get_key(const void *cookie_netfs_data,
}
/*
- * provide new auxilliary cache data
+ * provide new auxiliary cache data
*/
static uint16_t afs_vlocation_cache_get_aux(const void *cookie_netfs_data,
void *buffer, uint16_t bufmax)
@@ -172,7 +172,7 @@ static uint16_t afs_vlocation_cache_get_aux(const void *cookie_netfs_data,
}
/*
- * check that the auxilliary data indicates that the entry is still valid
+ * check that the auxiliary data indicates that the entry is still valid
*/
static
enum fscache_checkaux afs_vlocation_cache_check_aux(void *cookie_netfs_data,
@@ -283,7 +283,7 @@ static void afs_vnode_cache_get_attr(const void *cookie_netfs_data,
}
/*
- * provide new auxilliary cache data
+ * provide new auxiliary cache data
*/
static uint16_t afs_vnode_cache_get_aux(const void *cookie_netfs_data,
void *buffer, uint16_t bufmax)
@@ -309,7 +309,7 @@ static uint16_t afs_vnode_cache_get_aux(const void *cookie_netfs_data,
}
/*
- * check that the auxilliary data indicates that the entry is still valid
+ * check that the auxiliary data indicates that the entry is still valid
*/
static enum fscache_checkaux afs_vnode_cache_check_aux(void *cookie_netfs_data,
const void *buffer,
diff --git a/fs/afs/cell.c b/fs/afs/cell.c
index 0d5eeadf..3c090b7 100644
--- a/fs/afs/cell.c
+++ b/fs/afs/cell.c
@@ -293,7 +293,7 @@ struct afs_cell *afs_cell_lookup(const char *name, unsigned namesz,
if (!cell) {
/* this should not happen unless user tries to mount
* when root cell is not set. Return an impossibly
- * bizzare errno to alert the user. Things like
+ * bizarre errno to alert the user. Things like
* ENOENT might be "more appropriate" but they happen
* for other reasons.
*/
diff --git a/fs/aio.c b/fs/aio.c
index 7f54f43..e29ec48 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -34,8 +34,6 @@
#include <linux/security.h>
#include <linux/eventfd.h>
#include <linux/blkdev.h>
-#include <linux/mempool.h>
-#include <linux/hash.h>
#include <linux/compat.h>
#include <asm/kmap_types.h>
@@ -65,14 +63,6 @@ static DECLARE_WORK(fput_work, aio_fput_routine);
static DEFINE_SPINLOCK(fput_lock);
static LIST_HEAD(fput_head);
-#define AIO_BATCH_HASH_BITS 3 /* allocated on-stack, so don't go crazy */
-#define AIO_BATCH_HASH_SIZE (1 << AIO_BATCH_HASH_BITS)
-struct aio_batch_entry {
- struct hlist_node list;
- struct address_space *mapping;
-};
-mempool_t *abe_pool;
-
static void aio_kick_handler(struct work_struct *);
static void aio_queue_work(struct kioctx *);
@@ -86,8 +76,7 @@ static int __init aio_setup(void)
kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC);
aio_wq = alloc_workqueue("aio", 0, 1); /* used to limit concurrency */
- abe_pool = mempool_create_kmalloc_pool(1, sizeof(struct aio_batch_entry));
- BUG_ON(!aio_wq || !abe_pool);
+ BUG_ON(!aio_wq);
pr_debug("aio_setup: sizeof(struct page) = %d\n", (int)sizeof(struct page));
@@ -520,7 +509,7 @@ static inline void really_put_req(struct kioctx *ctx, struct kiocb *req)
ctx->reqs_active--;
if (unlikely(!ctx->reqs_active && ctx->dead))
- wake_up(&ctx->wait);
+ wake_up_all(&ctx->wait);
}
static void aio_fput_routine(struct work_struct *data)
@@ -1229,7 +1218,7 @@ static void io_destroy(struct kioctx *ioctx)
* by other CPUs at this point. Right now, we rely on the
* locking done by the above calls to ensure this consistency.
*/
- wake_up(&ioctx->wait);
+ wake_up_all(&ioctx->wait);
put_ioctx(ioctx); /* once for the lookup */
}
@@ -1525,57 +1514,8 @@ static ssize_t aio_setup_iocb(struct kiocb *kiocb, bool compat)
return 0;
}
-static void aio_batch_add(struct address_space *mapping,
- struct hlist_head *batch_hash)
-{
- struct aio_batch_entry *abe;
- struct hlist_node *pos;
- unsigned bucket;
-
- bucket = hash_ptr(mapping, AIO_BATCH_HASH_BITS);
- hlist_for_each_entry(abe, pos, &batch_hash[bucket], list) {
- if (abe->mapping == mapping)
- return;
- }
-
- abe = mempool_alloc(abe_pool, GFP_KERNEL);
-
- /*
- * we should be using igrab here, but
- * we don't want to hammer on the global
- * inode spinlock just to take an extra
- * reference on a file that we must already
- * have a reference to.
- *
- * When we're called, we always have a reference
- * on the file, so we must always have a reference
- * on the inode, so ihold() is safe here.
- */
- ihold(mapping->host);
- abe->mapping = mapping;
- hlist_add_head(&abe->list, &batch_hash[bucket]);
- return;
-}
-
-static void aio_batch_free(struct hlist_head *batch_hash)
-{
- struct aio_batch_entry *abe;
- struct hlist_node *pos, *n;
- int i;
-
- for (i = 0; i < AIO_BATCH_HASH_SIZE; i++) {
- hlist_for_each_entry_safe(abe, pos, n, &batch_hash[i], list) {
- blk_run_address_space(abe->mapping);
- iput(abe->mapping->host);
- hlist_del(&abe->list);
- mempool_free(abe, abe_pool);
- }
- }
-}
-
static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
- struct iocb *iocb, struct hlist_head *batch_hash,
- bool compat)
+ struct iocb *iocb, bool compat)
{
struct kiocb *req;
struct file *file;
@@ -1666,11 +1606,6 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
;
}
spin_unlock_irq(&ctx->ctx_lock);
- if (req->ki_opcode == IOCB_CMD_PREAD ||
- req->ki_opcode == IOCB_CMD_PREADV ||
- req->ki_opcode == IOCB_CMD_PWRITE ||
- req->ki_opcode == IOCB_CMD_PWRITEV)
- aio_batch_add(file->f_mapping, batch_hash);
aio_put_req(req); /* drop extra ref to req */
return 0;
@@ -1687,7 +1622,7 @@ long do_io_submit(aio_context_t ctx_id, long nr,
struct kioctx *ctx;
long ret = 0;
int i;
- struct hlist_head batch_hash[AIO_BATCH_HASH_SIZE] = { { 0, }, };
+ struct blk_plug plug;
if (unlikely(nr < 0))
return -EINVAL;
@@ -1704,6 +1639,8 @@ long do_io_submit(aio_context_t ctx_id, long nr,
return -EINVAL;
}
+ blk_start_plug(&plug);
+
/*
* AKPM: should this return a partial result if some of the IOs were
* successfully submitted?
@@ -1722,11 +1659,11 @@ long do_io_submit(aio_context_t ctx_id, long nr,
break;
}
- ret = io_submit_one(ctx, user_iocb, &tmp, batch_hash, compat);
+ ret = io_submit_one(ctx, user_iocb, &tmp, compat);
if (ret)
break;
}
- aio_batch_free(batch_hash);
+ blk_finish_plug(&plug);
put_ioctx(ctx);
return i ? i : ret;
diff --git a/fs/attr.c b/fs/attr.c
index 7ca4181..91dbe2a 100644
--- a/fs/attr.c
+++ b/fs/attr.c
@@ -59,7 +59,7 @@ int inode_change_ok(const struct inode *inode, struct iattr *attr)
/* Make sure a caller can chmod. */
if (ia_valid & ATTR_MODE) {
- if (!is_owner_or_cap(inode))
+ if (!inode_owner_or_capable(inode))
return -EPERM;
/* Also check the setgid bit! */
if (!in_group_p((ia_valid & ATTR_GID) ? attr->ia_gid :
@@ -69,7 +69,7 @@ int inode_change_ok(const struct inode *inode, struct iattr *attr)
/* Check for setting the inode time. */
if (ia_valid & (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET)) {
- if (!is_owner_or_cap(inode))
+ if (!inode_owner_or_capable(inode))
return -EPERM;
}
@@ -128,7 +128,7 @@ EXPORT_SYMBOL(inode_newsize_ok);
* setattr_copy must be called with i_mutex held.
*
* setattr_copy updates the inode's metadata with that specified
- * in attr. Noticably missing is inode size update, which is more complex
+ * in attr. Noticeably missing is inode size update, which is more complex
* as it requires pagecache updates.
*
* The inode is not marked as dirty after this operation. The rationale is
diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h
index 54f9237..475f9c5 100644
--- a/fs/autofs4/autofs_i.h
+++ b/fs/autofs4/autofs_i.h
@@ -61,8 +61,6 @@ do { \
current->pid, __func__, ##args); \
} while (0)
-extern spinlock_t autofs4_lock;
-
/* Unified info structure. This is pointed to by both the dentry and
inode structures. Each file in the filesystem has an instance of this
structure. It holds a reference to the dentry, so dentries are never
diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs4/dev-ioctl.c
index 1442da4..509fe1e 100644
--- a/fs/autofs4/dev-ioctl.c
+++ b/fs/autofs4/dev-ioctl.c
@@ -372,6 +372,10 @@ static int autofs_dev_ioctl_setpipefd(struct file *fp,
return -EBUSY;
} else {
struct file *pipe = fget(pipefd);
+ if (!pipe) {
+ err = -EBADF;
+ goto out;
+ }
if (!pipe->f_op || !pipe->f_op->write) {
err = -EPIPE;
fput(pipe);
diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c
index f43100b..450f529 100644
--- a/fs/autofs4/expire.c
+++ b/fs/autofs4/expire.c
@@ -87,18 +87,70 @@ done:
}
/*
+ * Calculate and dget next entry in the subdirs list under root.
+ */
+static struct dentry *get_next_positive_subdir(struct dentry *prev,
+ struct dentry *root)
+{
+ struct autofs_sb_info *sbi = autofs4_sbi(root->d_sb);
+ struct list_head *next;
+ struct dentry *p, *q;
+
+ spin_lock(&sbi->lookup_lock);
+
+ if (prev == NULL) {
+ spin_lock(&root->d_lock);
+ prev = dget_dlock(root);
+ next = prev->d_subdirs.next;
+ p = prev;
+ goto start;
+ }
+
+ p = prev;
+ spin_lock(&p->d_lock);
+again:
+ next = p->d_u.d_child.next;
+start:
+ if (next == &root->d_subdirs) {
+ spin_unlock(&p->d_lock);
+ spin_unlock(&sbi->lookup_lock);
+ dput(prev);
+ return NULL;
+ }
+
+ q = list_entry(next, struct dentry, d_u.d_child);
+
+ spin_lock_nested(&q->d_lock, DENTRY_D_LOCK_NESTED);
+ /* Negative dentry - try next */
+ if (!simple_positive(q)) {
+ spin_unlock(&p->d_lock);
+ p = q;
+ goto again;
+ }
+ dget_dlock(q);
+ spin_unlock(&q->d_lock);
+ spin_unlock(&p->d_lock);
+ spin_unlock(&sbi->lookup_lock);
+
+ dput(prev);
+
+ return q;
+}
+
+/*
* Calculate and dget next entry in top down tree traversal.
*/
static struct dentry *get_next_positive_dentry(struct dentry *prev,
struct dentry *root)
{
+ struct autofs_sb_info *sbi = autofs4_sbi(root->d_sb);
struct list_head *next;
struct dentry *p, *ret;
if (prev == NULL)
return dget(root);
- spin_lock(&autofs4_lock);
+ spin_lock(&sbi->lookup_lock);
relock:
p = prev;
spin_lock(&p->d_lock);
@@ -110,7 +162,7 @@ again:
if (p == root) {
spin_unlock(&p->d_lock);
- spin_unlock(&autofs4_lock);
+ spin_unlock(&sbi->lookup_lock);
dput(prev);
return NULL;
}
@@ -140,7 +192,7 @@ again:
dget_dlock(ret);
spin_unlock(&ret->d_lock);
spin_unlock(&p->d_lock);
- spin_unlock(&autofs4_lock);
+ spin_unlock(&sbi->lookup_lock);
dput(prev);
@@ -290,11 +342,8 @@ struct dentry *autofs4_expire_direct(struct super_block *sb,
spin_lock(&sbi->fs_lock);
ino = autofs4_dentry_ino(root);
/* No point expiring a pending mount */
- if (ino->flags & AUTOFS_INF_PENDING) {
- spin_unlock(&sbi->fs_lock);
- return NULL;
- }
- managed_dentry_set_transit(root);
+ if (ino->flags & AUTOFS_INF_PENDING)
+ goto out;
if (!autofs4_direct_busy(mnt, root, timeout, do_now)) {
struct autofs_info *ino = autofs4_dentry_ino(root);
ino->flags |= AUTOFS_INF_EXPIRING;
@@ -302,7 +351,7 @@ struct dentry *autofs4_expire_direct(struct super_block *sb,
spin_unlock(&sbi->fs_lock);
return root;
}
- managed_dentry_clear_transit(root);
+out:
spin_unlock(&sbi->fs_lock);
dput(root);
@@ -336,13 +385,12 @@ struct dentry *autofs4_expire_indirect(struct super_block *sb,
timeout = sbi->exp_timeout;
dentry = NULL;
- while ((dentry = get_next_positive_dentry(dentry, root))) {
+ while ((dentry = get_next_positive_subdir(dentry, root))) {
spin_lock(&sbi->fs_lock);
ino = autofs4_dentry_ino(dentry);
/* No point expiring a pending mount */
if (ino->flags & AUTOFS_INF_PENDING)
- goto cont;
- managed_dentry_set_transit(dentry);
+ goto next;
/*
* Case 1: (i) indirect mount or top level pseudo direct mount
@@ -402,8 +450,6 @@ struct dentry *autofs4_expire_indirect(struct super_block *sb,
}
}
next:
- managed_dentry_clear_transit(dentry);
-cont:
spin_unlock(&sbi->fs_lock);
}
return NULL;
@@ -415,13 +461,13 @@ found:
ino->flags |= AUTOFS_INF_EXPIRING;
init_completion(&ino->expire_complete);
spin_unlock(&sbi->fs_lock);
- spin_lock(&autofs4_lock);
+ spin_lock(&sbi->lookup_lock);
spin_lock(&expired->d_parent->d_lock);
spin_lock_nested(&expired->d_lock, DENTRY_D_LOCK_NESTED);
list_move(&expired->d_parent->d_subdirs, &expired->d_u.d_child);
spin_unlock(&expired->d_lock);
spin_unlock(&expired->d_parent->d_lock);
- spin_unlock(&autofs4_lock);
+ spin_unlock(&sbi->lookup_lock);
return expired;
}
@@ -484,8 +530,6 @@ int autofs4_expire_run(struct super_block *sb,
spin_lock(&sbi->fs_lock);
ino = autofs4_dentry_ino(dentry);
ino->flags &= ~AUTOFS_INF_EXPIRING;
- if (!d_unhashed(dentry))
- managed_dentry_clear_transit(dentry);
complete_all(&ino->expire_complete);
spin_unlock(&sbi->fs_lock);
@@ -513,9 +557,7 @@ int autofs4_do_expire_multi(struct super_block *sb, struct vfsmount *mnt,
spin_lock(&sbi->fs_lock);
ino->flags &= ~AUTOFS_INF_EXPIRING;
spin_lock(&dentry->d_lock);
- if (ret)
- __managed_dentry_clear_transit(dentry);
- else {
+ if (!ret) {
if ((IS_ROOT(dentry) ||
(autofs_type_indirect(sbi->type) &&
IS_ROOT(dentry->d_parent))) &&
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
index e6f84d2..f55ae23 100644
--- a/fs/autofs4/root.c
+++ b/fs/autofs4/root.c
@@ -23,8 +23,6 @@
#include "autofs_i.h"
-DEFINE_SPINLOCK(autofs4_lock);
-
static int autofs4_dir_symlink(struct inode *,struct dentry *,const char *);
static int autofs4_dir_unlink(struct inode *,struct dentry *);
static int autofs4_dir_rmdir(struct inode *,struct dentry *);
@@ -125,15 +123,15 @@ static int autofs4_dir_open(struct inode *inode, struct file *file)
* autofs file system so just let the libfs routines handle
* it.
*/
- spin_lock(&autofs4_lock);
+ spin_lock(&sbi->lookup_lock);
spin_lock(&dentry->d_lock);
if (!d_mountpoint(dentry) && list_empty(&dentry->d_subdirs)) {
spin_unlock(&dentry->d_lock);
- spin_unlock(&autofs4_lock);
+ spin_unlock(&sbi->lookup_lock);
return -ENOENT;
}
spin_unlock(&dentry->d_lock);
- spin_unlock(&autofs4_lock);
+ spin_unlock(&sbi->lookup_lock);
out:
return dcache_dir_open(inode, file);
@@ -171,7 +169,6 @@ static struct dentry *autofs4_lookup_active(struct dentry *dentry)
const unsigned char *str = name->name;
struct list_head *p, *head;
- spin_lock(&autofs4_lock);
spin_lock(&sbi->lookup_lock);
head = &sbi->active_list;
list_for_each(p, head) {
@@ -204,14 +201,12 @@ static struct dentry *autofs4_lookup_active(struct dentry *dentry)
dget_dlock(active);
spin_unlock(&active->d_lock);
spin_unlock(&sbi->lookup_lock);
- spin_unlock(&autofs4_lock);
return active;
}
next:
spin_unlock(&active->d_lock);
}
spin_unlock(&sbi->lookup_lock);
- spin_unlock(&autofs4_lock);
return NULL;
}
@@ -226,7 +221,6 @@ static struct dentry *autofs4_lookup_expiring(struct dentry *dentry)
const unsigned char *str = name->name;
struct list_head *p, *head;
- spin_lock(&autofs4_lock);
spin_lock(&sbi->lookup_lock);
head = &sbi->expiring_list;
list_for_each(p, head) {
@@ -259,14 +253,12 @@ static struct dentry *autofs4_lookup_expiring(struct dentry *dentry)
dget_dlock(expiring);
spin_unlock(&expiring->d_lock);
spin_unlock(&sbi->lookup_lock);
- spin_unlock(&autofs4_lock);
return expiring;
}
next:
spin_unlock(&expiring->d_lock);
}
spin_unlock(&sbi->lookup_lock);
- spin_unlock(&autofs4_lock);
return NULL;
}
@@ -275,17 +267,16 @@ static int autofs4_mount_wait(struct dentry *dentry)
{
struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
struct autofs_info *ino = autofs4_dentry_ino(dentry);
- int status;
+ int status = 0;
if (ino->flags & AUTOFS_INF_PENDING) {
DPRINTK("waiting for mount name=%.*s",
dentry->d_name.len, dentry->d_name.name);
status = autofs4_wait(sbi, dentry, NFY_MOUNT);
DPRINTK("mount wait done status=%d", status);
- ino->last_used = jiffies;
- return status;
}
- return 0;
+ ino->last_used = jiffies;
+ return status;
}
static int do_expire_wait(struct dentry *dentry)
@@ -319,9 +310,12 @@ static struct dentry *autofs4_mountpoint_changed(struct path *path)
*/
if (autofs_type_indirect(sbi->type) && d_unhashed(dentry)) {
struct dentry *parent = dentry->d_parent;
+ struct autofs_info *ino;
struct dentry *new = d_lookup(parent, &dentry->d_name);
if (!new)
return NULL;
+ ino = autofs4_dentry_ino(new);
+ ino->last_used = jiffies;
dput(path->dentry);
path->dentry = new;
}
@@ -338,18 +332,6 @@ static struct vfsmount *autofs4_d_automount(struct path *path)
DPRINTK("dentry=%p %.*s",
dentry, dentry->d_name.len, dentry->d_name.name);
- /*
- * Someone may have manually umounted this or it was a submount
- * that has gone away.
- */
- spin_lock(&dentry->d_lock);
- if (!d_mountpoint(dentry) && list_empty(&dentry->d_subdirs)) {
- if (!(dentry->d_flags & DCACHE_MANAGE_TRANSIT) &&
- (dentry->d_flags & DCACHE_NEED_AUTOMOUNT))
- __managed_dentry_set_transit(path->dentry);
- }
- spin_unlock(&dentry->d_lock);
-
/* The daemon never triggers a mount. */
if (autofs4_oz_mode(sbi))
return NULL;
@@ -418,18 +400,17 @@ static struct vfsmount *autofs4_d_automount(struct path *path)
done:
if (!(ino->flags & AUTOFS_INF_EXPIRING)) {
/*
- * Any needed mounting has been completed and the path updated
- * so turn this into a normal dentry so we don't continually
- * call ->d_automount() and ->d_manage().
- */
- spin_lock(&dentry->d_lock);
- __managed_dentry_clear_transit(dentry);
- /*
+ * Any needed mounting has been completed and the path
+ * updated so clear DCACHE_NEED_AUTOMOUNT so we don't
+ * call ->d_automount() on rootless multi-mounts since
+ * it can lead to an incorrect ELOOP error return.
+ *
* Only clear DMANAGED_AUTOMOUNT for rootless multi-mounts and
* symlinks as in all other cases the dentry will be covered by
* an actual mount so ->d_automount() won't be called during
* the follow.
*/
+ spin_lock(&dentry->d_lock);
if ((!d_mountpoint(dentry) &&
!list_empty(&dentry->d_subdirs)) ||
(dentry->d_inode && S_ISLNK(dentry->d_inode->i_mode)))
@@ -455,6 +436,8 @@ int autofs4_d_manage(struct dentry *dentry, bool rcu_walk)
/* The daemon never waits. */
if (autofs4_oz_mode(sbi)) {
+ if (rcu_walk)
+ return 0;
if (!d_mountpoint(dentry))
return -EISDIR;
return 0;
@@ -612,12 +595,12 @@ static int autofs4_dir_unlink(struct inode *dir, struct dentry *dentry)
dir->i_mtime = CURRENT_TIME;
- spin_lock(&autofs4_lock);
- autofs4_add_expiring(dentry);
+ spin_lock(&sbi->lookup_lock);
+ __autofs4_add_expiring(dentry);
spin_lock(&dentry->d_lock);
__d_drop(dentry);
spin_unlock(&dentry->d_lock);
- spin_unlock(&autofs4_lock);
+ spin_unlock(&sbi->lookup_lock);
return 0;
}
@@ -629,7 +612,7 @@ static int autofs4_dir_unlink(struct inode *dir, struct dentry *dentry)
* set the DMANAGED_AUTOMOUNT and DMANAGED_TRANSIT flags on the leaves
* of the directory tree. There is no need to clear the automount flag
* following a mount or restore it after an expire because these mounts
- * are always covered. However, it is neccessary to ensure that these
+ * are always covered. However, it is necessary to ensure that these
* flags are clear on non-empty directories to avoid unnecessary calls
* during path walks.
*/
@@ -686,20 +669,17 @@ static int autofs4_dir_rmdir(struct inode *dir, struct dentry *dentry)
if (!autofs4_oz_mode(sbi))
return -EACCES;
- spin_lock(&autofs4_lock);
spin_lock(&sbi->lookup_lock);
spin_lock(&dentry->d_lock);
if (!list_empty(&dentry->d_subdirs)) {
spin_unlock(&dentry->d_lock);
spin_unlock(&sbi->lookup_lock);
- spin_unlock(&autofs4_lock);
return -ENOTEMPTY;
}
__autofs4_add_expiring(dentry);
- spin_unlock(&sbi->lookup_lock);
__d_drop(dentry);
spin_unlock(&dentry->d_lock);
- spin_unlock(&autofs4_lock);
+ spin_unlock(&sbi->lookup_lock);
if (sbi->version < 5)
autofs_clear_leaf_automount_flags(dentry);
diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
index 5601005..2543598 100644
--- a/fs/autofs4/waitq.c
+++ b/fs/autofs4/waitq.c
@@ -197,12 +197,12 @@ rename_retry:
seq = read_seqbegin(&rename_lock);
rcu_read_lock();
- spin_lock(&autofs4_lock);
+ spin_lock(&sbi->fs_lock);
for (tmp = dentry ; tmp != root ; tmp = tmp->d_parent)
len += tmp->d_name.len + 1;
if (!len || --len > NAME_MAX) {
- spin_unlock(&autofs4_lock);
+ spin_unlock(&sbi->fs_lock);
rcu_read_unlock();
if (read_seqretry(&rename_lock, seq))
goto rename_retry;
@@ -218,7 +218,7 @@ rename_retry:
p -= tmp->d_name.len;
strncpy(p, tmp->d_name.name, tmp->d_name.len);
}
- spin_unlock(&autofs4_lock);
+ spin_unlock(&sbi->fs_lock);
rcu_read_unlock();
if (read_seqretry(&rename_lock, seq))
goto rename_retry;
diff --git a/fs/befs/ChangeLog b/fs/befs/ChangeLog
index ce8c787..75a461c 100644
--- a/fs/befs/ChangeLog
+++ b/fs/befs/ChangeLog
@@ -24,7 +24,7 @@ Version 0.9 (2002-03-14)
Version 0.64 (2002-02-07)
==========
-* Did the string comparision really right this time (btree.c) [WD]
+* Did the string comparison really right this time (btree.c) [WD]
* Fixed up some places where I assumed that a long int could hold
a pointer value. (btree.c) [WD]
@@ -114,7 +114,7 @@ Version 0.6 (2001-12-15)
More flexible. Will soon be controllable at mount time
(see TODO). [WD]
-* Rewrote datastream positon lookups.
+* Rewrote datastream position lookups.
(datastream.c) [WD]
* Moved the TODO list to its own file.
@@ -150,7 +150,7 @@ Version 0.50 (2001-11-13)
* Anton also told me that the blocksize is not allowed to be larger than
the page size in linux, which is 4k i386. Oops. Added a test for
(blocksize > PAGE_SIZE), and refuse to mount in that case. What this
- practicaly means is that 8k blocksize volumes won't work without a major
+ practically means is that 8k blocksize volumes won't work without a major
restructuring of the driver (or an alpha or other 64bit hardware). [WD]
* Cleaned up the befs_count_blocks() function. Much smarter now.
@@ -183,7 +183,7 @@ Version 0.45 (2001-10-29)
structures into the generic pointer fields of the public structures
with kmalloc(). put_super and put_inode free them. This allows us not
to have to touch the definitions of the public structures in
- include/linux/fs.h. Also, befs_inode_info is huge (becuase of the
+ include/linux/fs.h. Also, befs_inode_info is huge (because of the
symlink string). (super.c, inode.c, befs_fs.h) [WD]
* Fixed a thinko that was corrupting file reads after the first block_run
@@ -404,7 +404,7 @@ Version 0.4 (2001-10-28)
* Fixed compile errors on 2.4.1 kernel (WD)
Resolve rejected patches
- Accomodate changed NLS interface (util.h)
+ Accommodate changed NLS interface (util.h)
Needed to include <linux/slab.h> in most files
Makefile changes
fs/Config.in changes
diff --git a/fs/befs/befs_fs_types.h b/fs/befs/befs_fs_types.h
index 7893eaa..eb557d9 100644
--- a/fs/befs/befs_fs_types.h
+++ b/fs/befs/befs_fs_types.h
@@ -234,7 +234,7 @@ typedef struct {
} PACKED befs_btree_super;
/*
- * Header stucture of each btree node
+ * Header structure of each btree node
*/
typedef struct {
fs64 left;
diff --git a/fs/befs/btree.c b/fs/befs/btree.c
index 4202db7..a66c9b1 100644
--- a/fs/befs/btree.c
+++ b/fs/befs/btree.c
@@ -5,7 +5,7 @@
*
* Licensed under the GNU GPL. See the file COPYING for details.
*
- * 2002-02-05: Sergey S. Kostyliov added binary search withing
+ * 2002-02-05: Sergey S. Kostyliov added binary search within
* btree nodes.
*
* Many thanks to:
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
index b1d0c79..54b8c28 100644
--- a/fs/befs/linuxvfs.c
+++ b/fs/befs/linuxvfs.c
@@ -75,7 +75,6 @@ static const struct inode_operations befs_dir_inode_operations = {
static const struct address_space_operations befs_aops = {
.readpage = befs_readpage,
- .sync_page = block_sync_page,
.bmap = befs_bmap,
};
@@ -735,7 +734,7 @@ parse_options(char *options, befs_mount_options * opts)
/* This function has the responsibiltiy of getting the
* filesystem ready for unmounting.
- * Basicly, we free everything that we allocated in
+ * Basically, we free everything that we allocated in
* befs_read_inode
*/
static void
diff --git a/fs/bfs/dir.c b/fs/bfs/dir.c
index 685ecff..b14cebf 100644
--- a/fs/bfs/dir.c
+++ b/fs/bfs/dir.c
@@ -97,7 +97,7 @@ static int bfs_create(struct inode *dir, struct dentry *dentry, int mode,
if (!inode)
return -ENOSPC;
mutex_lock(&info->bfs_lock);
- ino = find_first_zero_bit(info->si_imap, info->si_lasti);
+ ino = find_first_zero_bit(info->si_imap, info->si_lasti + 1);
if (ino > info->si_lasti) {
mutex_unlock(&info->bfs_lock);
iput(inode);
diff --git a/fs/bfs/file.c b/fs/bfs/file.c
index eb67edd..f20e8a7 100644
--- a/fs/bfs/file.c
+++ b/fs/bfs/file.c
@@ -186,7 +186,6 @@ static sector_t bfs_bmap(struct address_space *mapping, sector_t block)
const struct address_space_operations bfs_aops = {
.readpage = bfs_readpage,
.writepage = bfs_writepage,
- .sync_page = block_sync_page,
.write_begin = bfs_write_begin,
.write_end = generic_write_end,
.bmap = bfs_bmap,
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index d5b640b..303983f 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -570,7 +570,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
unsigned long elf_entry;
unsigned long interp_load_addr = 0;
unsigned long start_code, end_code, start_data, end_data;
- unsigned long reloc_func_desc = 0;
+ unsigned long reloc_func_desc __maybe_unused = 0;
int executable_stack = EXSTACK_DEFAULT;
unsigned long def_flags = 0;
struct {
@@ -941,9 +941,13 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
current->mm->start_stack = bprm->p;
#ifdef arch_randomize_brk
- if ((current->flags & PF_RANDOMIZE) && (randomize_va_space > 1))
+ if ((current->flags & PF_RANDOMIZE) && (randomize_va_space > 1)) {
current->mm->brk = current->mm->start_brk =
arch_randomize_brk(current->mm);
+#ifdef CONFIG_COMPAT_BRK
+ current->brk_randomized = 1;
+#endif
+ }
#endif
if (current->personality & MMAP_PAGE_ZERO) {
@@ -1906,7 +1910,7 @@ static int elf_core_dump(struct coredump_params *cprm)
segs = current->mm->map_count;
segs += elf_core_extra_phdrs();
- gate_vma = get_gate_vma(current);
+ gate_vma = get_gate_vma(current->mm);
if (gate_vma != NULL)
segs++;
diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
index 811384b..397d305 100644
--- a/fs/binfmt_flat.c
+++ b/fs/binfmt_flat.c
@@ -717,7 +717,7 @@ static int load_flat_file(struct linux_binprm * bprm,
* help simplify all this mumbo jumbo
*
* We've got two different sections of relocation entries.
- * The first is the GOT which resides at the begining of the data segment
+ * The first is the GOT which resides at the beginning of the data segment
* and is terminated with a -1. This one can be relocated in place.
* The second is the extra relocation entries tacked after the image's
* data segment. These require a little more processing as the entry is
diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c
index e49cce2..9c5e6b2 100644
--- a/fs/bio-integrity.c
+++ b/fs/bio-integrity.c
@@ -761,6 +761,9 @@ int bioset_integrity_create(struct bio_set *bs, int pool_size)
{
unsigned int max_slab = vecs_to_idx(BIO_MAX_PAGES);
+ if (bs->bio_integrity_pool)
+ return 0;
+
bs->bio_integrity_pool =
mempool_create_slab_pool(pool_size, bip_slab[max_slab].slab);
diff --git a/fs/bio.c b/fs/bio.c
index 4bd454f..840a0d7 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -43,7 +43,7 @@ static mempool_t *bio_split_pool __read_mostly;
* unsigned short
*/
#define BV(x) { .nr_vecs = x, .name = "biovec-"__stringify(x) }
-struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly = {
+static struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly = {
BV(1), BV(4), BV(16), BV(64), BV(128), BV(BIO_MAX_PAGES),
};
#undef BV
@@ -111,7 +111,7 @@ static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
if (!slab)
goto out_unlock;
- printk("bio: create slab <%s> at %d\n", bslab->name, entry);
+ printk(KERN_INFO "bio: create slab <%s> at %d\n", bslab->name, entry);
bslab->slab = slab;
bslab->slab_ref = 1;
bslab->slab_size = sz;
@@ -1436,7 +1436,7 @@ EXPORT_SYMBOL(bio_flush_dcache_pages);
* preferred way to end I/O on a bio, it takes care of clearing
* BIO_UPTODATE on error. @error is 0 on success, and and one of the
* established -Exxxx (-EIO, for instance) error values in case
- * something went wrong. Noone should call bi_end_io() directly on a
+ * something went wrong. No one should call bi_end_io() directly on a
* bio unless they own it and thus know that it has an end_io
* function.
**/
@@ -1636,9 +1636,6 @@ struct bio_set *bioset_create(unsigned int pool_size, unsigned int front_pad)
if (!bs->bio_pool)
goto bad;
- if (bioset_integrity_create(bs, pool_size))
- goto bad;
-
if (!biovec_create_pools(bs, pool_size))
return bs;
@@ -1656,12 +1653,10 @@ static void __init biovec_init_slabs(void)
int size;
struct biovec_slab *bvs = bvec_slabs + i;
-#ifndef CONFIG_BLK_DEV_INTEGRITY
if (bvs->nr_vecs <= BIO_INLINE_VECS) {
bvs->slab = NULL;
continue;
}
-#endif
size = bvs->nr_vecs * sizeof(struct bio_vec);
bvs->slab = kmem_cache_create(bvs->name, size, 0,
@@ -1684,6 +1679,9 @@ static int __init init_bio(void)
if (!fs_bio_set)
panic("bio: can't allocate bios\n");
+ if (bioset_integrity_create(fs_bio_set, BIO_POOL_SIZE))
+ panic("bio: can't create integrity pool\n");
+
bio_split_pool = mempool_create_kmalloc_pool(BIO_SPLIT_ENTRIES,
sizeof(struct bio_pair));
if (!bio_split_pool)
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 8892870..5147bdd 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -55,11 +55,13 @@ EXPORT_SYMBOL(I_BDEV);
static void bdev_inode_switch_bdi(struct inode *inode,
struct backing_dev_info *dst)
{
- spin_lock(&inode_lock);
+ spin_lock(&inode_wb_list_lock);
+ spin_lock(&inode->i_lock);
inode->i_data.backing_dev_info = dst;
if (inode->i_state & I_DIRTY)
list_move(&inode->i_wb_list, &dst->wb.b_dirty);
- spin_unlock(&inode_lock);
+ spin_unlock(&inode->i_lock);
+ spin_unlock(&inode_wb_list_lock);
}
static sector_t max_block(struct block_device *bdev)
@@ -651,7 +653,7 @@ void bd_forget(struct inode *inode)
* @whole: whole block device containing @bdev, may equal @bdev
* @holder: holder trying to claim @bdev
*
- * Test whther @bdev can be claimed by @holder.
+ * Test whether @bdev can be claimed by @holder.
*
* CONTEXT:
* spin_lock(&bdev_lock).
@@ -1087,6 +1089,7 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
if (!disk)
goto out;
+ disk_block_events(disk);
mutex_lock_nested(&bdev->bd_mutex, for_part);
if (!bdev->bd_openers) {
bdev->bd_disk = disk;
@@ -1108,10 +1111,11 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
*/
disk_put_part(bdev->bd_part);
bdev->bd_part = NULL;
- module_put(disk->fops->owner);
- put_disk(disk);
bdev->bd_disk = NULL;
mutex_unlock(&bdev->bd_mutex);
+ disk_unblock_events(disk);
+ module_put(disk->fops->owner);
+ put_disk(disk);
goto restart;
}
if (ret)
@@ -1148,9 +1152,6 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
bd_set_size(bdev, (loff_t)bdev->bd_part->nr_sects << 9);
}
} else {
- module_put(disk->fops->owner);
- put_disk(disk);
- disk = NULL;
if (bdev->bd_contains == bdev) {
if (bdev->bd_disk->fops->open) {
ret = bdev->bd_disk->fops->open(bdev, mode);
@@ -1160,11 +1161,15 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
if (bdev->bd_invalidated)
rescan_partitions(bdev->bd_disk, bdev);
}
+ /* only one opener holds refs to the module and disk */
+ module_put(disk->fops->owner);
+ put_disk(disk);
}
bdev->bd_openers++;
if (for_part)
bdev->bd_part_count++;
mutex_unlock(&bdev->bd_mutex);
+ disk_unblock_events(disk);
return 0;
out_clear:
@@ -1177,10 +1182,10 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
bdev->bd_contains = NULL;
out_unlock_bdev:
mutex_unlock(&bdev->bd_mutex);
- out:
- if (disk)
- module_put(disk->fops->owner);
+ disk_unblock_events(disk);
+ module_put(disk->fops->owner);
put_disk(disk);
+ out:
bdput(bdev);
return ret;
@@ -1446,14 +1451,13 @@ int blkdev_put(struct block_device *bdev, fmode_t mode)
if (bdev_free) {
if (bdev->bd_write_holder) {
disk_unblock_events(bdev->bd_disk);
- bdev->bd_write_holder = false;
- } else
disk_check_events(bdev->bd_disk);
+ bdev->bd_write_holder = false;
+ }
}
mutex_unlock(&bdev->bd_mutex);
- } else
- disk_check_events(bdev->bd_disk);
+ }
return __blkdev_put(bdev, mode, 0);
}
@@ -1527,7 +1531,6 @@ static int blkdev_releasepage(struct page *page, gfp_t wait)
static const struct address_space_operations def_blk_aops = {
.readpage = blkdev_readpage,
.writepage = blkdev_writepage,
- .sync_page = block_sync_page,
.write_begin = blkdev_write_begin,
.write_end = blkdev_write_end,
.writepages = generic_writepages,
diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c
index 9c94934..5d505aa 100644
--- a/fs/btrfs/acl.c
+++ b/fs/btrfs/acl.c
@@ -170,7 +170,7 @@ static int btrfs_xattr_acl_set(struct dentry *dentry, const char *name,
int ret;
struct posix_acl *acl = NULL;
- if (!is_owner_or_cap(dentry->d_inode))
+ if (!inode_owner_or_capable(dentry->d_inode))
return -EPERM;
if (!IS_POSIXACL(dentry->d_inode))
@@ -178,16 +178,17 @@ static int btrfs_xattr_acl_set(struct dentry *dentry, const char *name,
if (value) {
acl = posix_acl_from_xattr(value, size);
- if (acl == NULL) {
- value = NULL;
- size = 0;
+ if (acl) {
+ ret = posix_acl_valid(acl);
+ if (ret)
+ goto out;
} else if (IS_ERR(acl)) {
return PTR_ERR(acl);
}
}
ret = btrfs_set_acl(NULL, dentry->d_inode, acl, type);
-
+out:
posix_acl_release(acl);
return ret;
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index ccc991c..57c3bb2 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -136,9 +136,8 @@ struct btrfs_inode {
* items we think we'll end up using, and reserved_extents is the number
* of extent items we've reserved metadata for.
*/
- spinlock_t accounting_lock;
atomic_t outstanding_extents;
- int reserved_extents;
+ atomic_t reserved_extents;
/*
* ordered_data_close is set by truncate when a file that used
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 4d2110e..41d1d7c 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -340,6 +340,8 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
WARN_ON(start & ((u64)PAGE_CACHE_SIZE - 1));
cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS);
+ if (!cb)
+ return -ENOMEM;
atomic_set(&cb->pending_bios, 0);
cb->errors = 0;
cb->inode = inode;
@@ -354,6 +356,10 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS);
+ if(!bio) {
+ kfree(cb);
+ return -ENOMEM;
+ }
bio->bi_private = cb;
bio->bi_end_io = end_compressed_bio_write;
atomic_inc(&cb->pending_bios);
@@ -657,8 +663,9 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
atomic_inc(&cb->pending_bios);
if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
- btrfs_lookup_bio_sums(root, inode, comp_bio,
- sums);
+ ret = btrfs_lookup_bio_sums(root, inode,
+ comp_bio, sums);
+ BUG_ON(ret);
}
sums += (comp_bio->bi_size + root->sectorsize - 1) /
root->sectorsize;
@@ -683,8 +690,10 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 0);
BUG_ON(ret);
- if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM))
- btrfs_lookup_bio_sums(root, inode, comp_bio, sums);
+ if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
+ ret = btrfs_lookup_bio_sums(root, inode, comp_bio, sums);
+ BUG_ON(ret);
+ }
ret = btrfs_map_bio(root, READ, comp_bio, mirror_num, 0);
BUG_ON(ret);
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index b5baff0..84d7ca1 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -147,10 +147,11 @@ noinline void btrfs_release_path(struct btrfs_root *root, struct btrfs_path *p)
struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
{
struct extent_buffer *eb;
- spin_lock(&root->node_lock);
- eb = root->node;
+
+ rcu_read_lock();
+ eb = rcu_dereference(root->node);
extent_buffer_get(eb);
- spin_unlock(&root->node_lock);
+ rcu_read_unlock();
return eb;
}
@@ -165,14 +166,8 @@ struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
while (1) {
eb = btrfs_root_node(root);
btrfs_tree_lock(eb);
-
- spin_lock(&root->node_lock);
- if (eb == root->node) {
- spin_unlock(&root->node_lock);
+ if (eb == root->node)
break;
- }
- spin_unlock(&root->node_lock);
-
btrfs_tree_unlock(eb);
free_extent_buffer(eb);
}
@@ -458,10 +453,8 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
else
parent_start = 0;
- spin_lock(&root->node_lock);
- root->node = cow;
extent_buffer_get(cow);
- spin_unlock(&root->node_lock);
+ rcu_assign_pointer(root->node, cow);
btrfs_free_tree_block(trans, root, buf, parent_start,
last_ref);
@@ -542,6 +535,9 @@ noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
ret = __btrfs_cow_block(trans, root, buf, parent,
parent_slot, cow_ret, search_start, 0);
+
+ trace_btrfs_cow_block(root, buf, *cow_ret);
+
return ret;
}
@@ -686,6 +682,8 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
if (!cur) {
cur = read_tree_block(root, blocknr,
blocksize, gen);
+ if (!cur)
+ return -EIO;
} else if (!uptodate) {
btrfs_read_buffer(cur, gen);
}
@@ -732,122 +730,6 @@ static inline unsigned int leaf_data_end(struct btrfs_root *root,
return btrfs_item_offset_nr(leaf, nr - 1);
}
-/*
- * extra debugging checks to make sure all the items in a key are
- * well formed and in the proper order
- */
-static int check_node(struct btrfs_root *root, struct btrfs_path *path,
- int level)
-{
- struct extent_buffer *parent = NULL;
- struct extent_buffer *node = path->nodes[level];
- struct btrfs_disk_key parent_key;
- struct btrfs_disk_key node_key;
- int parent_slot;
- int slot;
- struct btrfs_key cpukey;
- u32 nritems = btrfs_header_nritems(node);
-
- if (path->nodes[level + 1])
- parent = path->nodes[level + 1];
-
- slot = path->slots[level];
- BUG_ON(nritems == 0);
- if (parent) {
- parent_slot = path->slots[level + 1];
- btrfs_node_key(parent, &parent_key, parent_slot);
- btrfs_node_key(node, &node_key, 0);
- BUG_ON(memcmp(&parent_key, &node_key,
- sizeof(struct btrfs_disk_key)));
- BUG_ON(btrfs_node_blockptr(parent, parent_slot) !=
- btrfs_header_bytenr(node));
- }
- BUG_ON(nritems > BTRFS_NODEPTRS_PER_BLOCK(root));
- if (slot != 0) {
- btrfs_node_key_to_cpu(node, &cpukey, slot - 1);
- btrfs_node_key(node, &node_key, slot);
- BUG_ON(comp_keys(&node_key, &cpukey) <= 0);
- }
- if (slot < nritems - 1) {
- btrfs_node_key_to_cpu(node, &cpukey, slot + 1);
- btrfs_node_key(node, &node_key, slot);
- BUG_ON(comp_keys(&node_key, &cpukey) >= 0);
- }
- return 0;
-}
-
-/*
- * extra checking to make sure all the items in a leaf are
- * well formed and in the proper order
- */
-static int check_leaf(struct btrfs_root *root, struct btrfs_path *path,
- int level)
-{
- struct extent_buffer *leaf = path->nodes[level];
- struct extent_buffer *parent = NULL;
- int parent_slot;
- struct btrfs_key cpukey;
- struct btrfs_disk_key parent_key;
- struct btrfs_disk_key leaf_key;
- int slot = path->slots[0];
-
- u32 nritems = btrfs_header_nritems(leaf);
-
- if (path->nodes[level + 1])
- parent = path->nodes[level + 1];
-
- if (nritems == 0)
- return 0;
-
- if (parent) {
- parent_slot = path->slots[level + 1];
- btrfs_node_key(parent, &parent_key, parent_slot);
- btrfs_item_key(leaf, &leaf_key, 0);
-
- BUG_ON(memcmp(&parent_key, &leaf_key,
- sizeof(struct btrfs_disk_key)));
- BUG_ON(btrfs_node_blockptr(parent, parent_slot) !=
- btrfs_header_bytenr(leaf));
- }
- if (slot != 0 && slot < nritems - 1) {
- btrfs_item_key(leaf, &leaf_key, slot);
- btrfs_item_key_to_cpu(leaf, &cpukey, slot - 1);
- if (comp_keys(&leaf_key, &cpukey) <= 0) {
- btrfs_print_leaf(root, leaf);
- printk(KERN_CRIT "slot %d offset bad key\n", slot);
- BUG_ON(1);
- }
- if (btrfs_item_offset_nr(leaf, slot - 1) !=
- btrfs_item_end_nr(leaf, slot)) {
- btrfs_print_leaf(root, leaf);
- printk(KERN_CRIT "slot %d offset bad\n", slot);
- BUG_ON(1);
- }
- }
- if (slot < nritems - 1) {
- btrfs_item_key(leaf, &leaf_key, slot);
- btrfs_item_key_to_cpu(leaf, &cpukey, slot + 1);
- BUG_ON(comp_keys(&leaf_key, &cpukey) >= 0);
- if (btrfs_item_offset_nr(leaf, slot) !=
- btrfs_item_end_nr(leaf, slot + 1)) {
- btrfs_print_leaf(root, leaf);
- printk(KERN_CRIT "slot %d offset bad\n", slot);
- BUG_ON(1);
- }
- }
- BUG_ON(btrfs_item_offset_nr(leaf, 0) +
- btrfs_item_size_nr(leaf, 0) != BTRFS_LEAF_DATA_SIZE(root));
- return 0;
-}
-
-static noinline int check_block(struct btrfs_root *root,
- struct btrfs_path *path, int level)
-{
- return 0;
- if (level == 0)
- return check_leaf(root, path, level);
- return check_node(root, path, level);
-}
/*
* search for key in the extent_buffer. The items start at offset p,
@@ -1046,9 +928,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
goto enospc;
}
- spin_lock(&root->node_lock);
- root->node = child;
- spin_unlock(&root->node_lock);
+ rcu_assign_pointer(root->node, child);
add_root_to_dirty_list(root);
btrfs_tree_unlock(child);
@@ -1188,7 +1068,6 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
}
}
/* double check we haven't messed things up */
- check_block(root, path, level);
if (orig_ptr !=
btrfs_node_blockptr(path->nodes[level], path->slots[level]))
BUG();
@@ -1798,12 +1677,6 @@ cow_done:
if (!cow)
btrfs_unlock_up_safe(p, level + 1);
- ret = check_block(root, p, level);
- if (ret) {
- ret = -1;
- goto done;
- }
-
ret = bin_search(b, key, level, &slot);
if (level != 0) {
@@ -2130,10 +2003,8 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
btrfs_mark_buffer_dirty(c);
- spin_lock(&root->node_lock);
old = root->node;
- root->node = c;
- spin_unlock(&root->node_lock);
+ rcu_assign_pointer(root->node, c);
/* the super has an extra ref to root->node */
free_extent_buffer(old);
@@ -3840,7 +3711,8 @@ int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
unsigned long ptr;
path = btrfs_alloc_path();
- BUG_ON(!path);
+ if (!path)
+ return -ENOMEM;
ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
if (!ret) {
leaf = path->nodes[0];
@@ -4217,6 +4089,7 @@ find_next_key:
}
btrfs_set_path_blocking(path);
cur = read_node_slot(root, cur, slot);
+ BUG_ON(!cur);
btrfs_tree_lock(cur);
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 7f78cc7..2e61fe1 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -28,6 +28,7 @@
#include <linux/wait.h>
#include <linux/slab.h>
#include <linux/kobject.h>
+#include <trace/events/btrfs.h>
#include <asm/kmap_types.h>
#include "extent_io.h"
#include "extent_map.h"
@@ -40,6 +41,7 @@ extern struct kmem_cache *btrfs_trans_handle_cachep;
extern struct kmem_cache *btrfs_transaction_cachep;
extern struct kmem_cache *btrfs_bit_radix_cachep;
extern struct kmem_cache *btrfs_path_cachep;
+extern struct kmem_cache *btrfs_free_space_cachep;
struct btrfs_ordered_sum;
#define BTRFS_MAGIC "_BHRfS_M"
@@ -738,8 +740,10 @@ struct btrfs_space_info {
*/
unsigned long reservation_progress;
- int full; /* indicates that we cannot allocate any more
+ int full:1; /* indicates that we cannot allocate any more
chunks for this space */
+ int chunk_alloc:1; /* set if we are allocating a chunk */
+
int force_alloc; /* set if we need to force a chunk alloc for
this space */
@@ -782,9 +786,6 @@ struct btrfs_free_cluster {
/* first extent starting offset */
u64 window_start;
- /* if this cluster simply points at a bitmap in the block group */
- bool points_to_bitmap;
-
struct btrfs_block_group_cache *block_group;
/*
* when a cluster is allocated from a block group, we put the
@@ -1283,6 +1284,9 @@ struct btrfs_root {
#define BTRFS_INODE_NODUMP (1 << 8)
#define BTRFS_INODE_NOATIME (1 << 9)
#define BTRFS_INODE_DIRSYNC (1 << 10)
+#define BTRFS_INODE_COMPRESS (1 << 11)
+
+#define BTRFS_INODE_ROOT_ITEM_INIT (1 << 31)
/* some macros to generate set/get funcs for the struct fields. This
* assumes there is a lefoo_to_cpu for every type, so lets make a simple
@@ -2157,6 +2161,8 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans,
u64 root_objectid, u64 owner, u64 offset);
int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len);
+int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
+ u64 num_bytes, int reserve, int sinfo);
int btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
@@ -2227,10 +2233,12 @@ u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo);
int btrfs_error_unpin_extent_range(struct btrfs_root *root,
u64 start, u64 end);
int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr,
- u64 num_bytes);
+ u64 num_bytes, u64 *actual_bytes);
int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u64 type);
+int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range);
+int btrfs_init_space_info(struct btrfs_fs_info *fs_info);
/* ctree.c */
int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key,
int level, int *slot);
@@ -2355,6 +2363,8 @@ int btrfs_find_dead_roots(struct btrfs_root *root, u64 objectid);
int btrfs_find_orphan_roots(struct btrfs_root *tree_root);
int btrfs_set_root_node(struct btrfs_root_item *item,
struct extent_buffer *node);
+void btrfs_check_and_init_root_item(struct btrfs_root_item *item);
+
/* dir-item.c */
int btrfs_insert_dir_item(struct btrfs_trans_handle *trans,
struct btrfs_root *root, const char *name,
@@ -2392,6 +2402,9 @@ struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans,
struct btrfs_path *path, u64 dir,
const char *name, u16 name_len,
int mod);
+int verify_dir_item(struct btrfs_root *root,
+ struct extent_buffer *leaf,
+ struct btrfs_dir_item *dir_item);
/* orphan.c */
int btrfs_insert_orphan_item(struct btrfs_trans_handle *trans,
@@ -2528,7 +2541,7 @@ int btrfs_update_inode(struct btrfs_trans_handle *trans,
struct inode *inode);
int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode);
int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode);
-void btrfs_orphan_cleanup(struct btrfs_root *root);
+int btrfs_orphan_cleanup(struct btrfs_root *root);
void btrfs_orphan_pre_snapshot(struct btrfs_trans_handle *trans,
struct btrfs_pending_snapshot *pending,
u64 *bytes_to_reserve);
@@ -2536,7 +2549,7 @@ void btrfs_orphan_post_snapshot(struct btrfs_trans_handle *trans,
struct btrfs_pending_snapshot *pending);
void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
-int btrfs_cont_expand(struct inode *inode, loff_t size);
+int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size);
int btrfs_invalidate_inodes(struct btrfs_root *root);
void btrfs_add_delayed_iput(struct inode *inode);
void btrfs_run_delayed_iputs(struct btrfs_root *root);
@@ -2565,6 +2578,11 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode,
int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
struct inode *inode, u64 start, u64 end);
int btrfs_release_file(struct inode *inode, struct file *file);
+void btrfs_drop_pages(struct page **pages, size_t num_pages);
+int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
+ struct page **pages, size_t num_pages,
+ loff_t pos, size_t write_bytes,
+ struct extent_state **cached);
/* tree-defrag.c */
int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index e807b14..bce28f6 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -483,6 +483,8 @@ static noinline int add_delayed_ref_head(struct btrfs_trans_handle *trans,
INIT_LIST_HEAD(&head_ref->cluster);
mutex_init(&head_ref->mutex);
+ trace_btrfs_delayed_ref_head(ref, head_ref, action);
+
existing = tree_insert(&delayed_refs->root, &ref->rb_node);
if (existing) {
@@ -537,6 +539,8 @@ static noinline int add_delayed_tree_ref(struct btrfs_trans_handle *trans,
}
full_ref->level = level;
+ trace_btrfs_delayed_tree_ref(ref, full_ref, action);
+
existing = tree_insert(&delayed_refs->root, &ref->rb_node);
if (existing) {
@@ -591,6 +595,8 @@ static noinline int add_delayed_data_ref(struct btrfs_trans_handle *trans,
full_ref->objectid = owner;
full_ref->offset = offset;
+ trace_btrfs_delayed_data_ref(ref, full_ref, action);
+
existing = tree_insert(&delayed_refs->root, &ref->rb_node);
if (existing) {
diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c
index f0cad5a..c62f02f 100644
--- a/fs/btrfs/dir-item.c
+++ b/fs/btrfs/dir-item.c
@@ -151,7 +151,7 @@ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root
ret = PTR_ERR(dir_item);
if (ret == -EEXIST)
goto second_insert;
- goto out;
+ goto out_free;
}
leaf = path->nodes[0];
@@ -170,7 +170,7 @@ second_insert:
/* FIXME, use some real flag for selecting the extra index */
if (root == root->fs_info->tree_root) {
ret = 0;
- goto out;
+ goto out_free;
}
btrfs_release_path(root, path);
@@ -180,7 +180,7 @@ second_insert:
name, name_len);
if (IS_ERR(dir_item)) {
ret2 = PTR_ERR(dir_item);
- goto out;
+ goto out_free;
}
leaf = path->nodes[0];
btrfs_cpu_key_to_disk(&disk_key, location);
@@ -192,7 +192,9 @@ second_insert:
name_ptr = (unsigned long)(dir_item + 1);
write_extent_buffer(leaf, name, name_ptr, name_len);
btrfs_mark_buffer_dirty(leaf);
-out:
+
+out_free:
+
btrfs_free_path(path);
if (ret)
return ret;
@@ -377,6 +379,9 @@ struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root,
leaf = path->nodes[0];
dir_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dir_item);
+ if (verify_dir_item(root, leaf, dir_item))
+ return NULL;
+
total_len = btrfs_item_size_nr(leaf, path->slots[0]);
while (cur < total_len) {
this_len = sizeof(*dir_item) +
@@ -429,3 +434,35 @@ int btrfs_delete_one_dir_name(struct btrfs_trans_handle *trans,
}
return ret;
}
+
+int verify_dir_item(struct btrfs_root *root,
+ struct extent_buffer *leaf,
+ struct btrfs_dir_item *dir_item)
+{
+ u16 namelen = BTRFS_NAME_LEN;
+ u8 type = btrfs_dir_type(leaf, dir_item);
+
+ if (type >= BTRFS_FT_MAX) {
+ printk(KERN_CRIT "btrfs: invalid dir item type: %d\n",
+ (int)type);
+ return 1;
+ }
+
+ if (type == BTRFS_FT_XATTR)
+ namelen = XATTR_NAME_MAX;
+
+ if (btrfs_dir_name_len(leaf, dir_item) > namelen) {
+ printk(KERN_CRIT "btrfS: invalid dir item name len: %u\n",
+ (unsigned)btrfs_dir_data_len(leaf, dir_item));
+ return 1;
+ }
+
+ /* BTRFS_MAX_XATTR_SIZE is the same for all dir items */
+ if (btrfs_dir_data_len(leaf, dir_item) > BTRFS_MAX_XATTR_SIZE(root)) {
+ printk(KERN_CRIT "btrfs: invalid dir item data len: %u\n",
+ (unsigned)btrfs_dir_data_len(leaf, dir_item));
+ return 1;
+ }
+
+ return 0;
+}
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 100b07f..68c84c8 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -29,6 +29,7 @@
#include <linux/crc32c.h>
#include <linux/slab.h>
#include <linux/migrate.h>
+#include <asm/unaligned.h>
#include "compat.h"
#include "ctree.h"
#include "disk-io.h"
@@ -198,7 +199,7 @@ u32 btrfs_csum_data(struct btrfs_root *root, char *data, u32 seed, size_t len)
void btrfs_csum_final(u32 crc, char *result)
{
- *(__le32 *)result = ~cpu_to_le32(crc);
+ put_unaligned_le32(~crc, result);
}
/*
@@ -323,6 +324,7 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root,
int num_copies = 0;
int mirror_num = 0;
+ clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
while (1) {
ret = read_extent_buffer_pages(io_tree, eb, start, 1,
@@ -331,6 +333,14 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root,
!verify_parent_transid(io_tree, eb, parent_transid))
return ret;
+ /*
+ * This buffer's crc is fine, but its contents are corrupted, so
+ * there is no reason to read the other copies, they won't be
+ * any less wrong.
+ */
+ if (test_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags))
+ return ret;
+
num_copies = btrfs_num_copies(&root->fs_info->mapping_tree,
eb->start, eb->len);
if (num_copies == 1)
@@ -419,6 +429,73 @@ static int check_tree_block_fsid(struct btrfs_root *root,
return ret;
}
+#define CORRUPT(reason, eb, root, slot) \
+ printk(KERN_CRIT "btrfs: corrupt leaf, %s: block=%llu," \
+ "root=%llu, slot=%d\n", reason, \
+ (unsigned long long)btrfs_header_bytenr(eb), \
+ (unsigned long long)root->objectid, slot)
+
+static noinline int check_leaf(struct btrfs_root *root,
+ struct extent_buffer *leaf)
+{
+ struct btrfs_key key;
+ struct btrfs_key leaf_key;
+ u32 nritems = btrfs_header_nritems(leaf);
+ int slot;
+
+ if (nritems == 0)
+ return 0;
+
+ /* Check the 0 item */
+ if (btrfs_item_offset_nr(leaf, 0) + btrfs_item_size_nr(leaf, 0) !=
+ BTRFS_LEAF_DATA_SIZE(root)) {
+ CORRUPT("invalid item offset size pair", leaf, root, 0);
+ return -EIO;
+ }
+
+ /*
+ * Check to make sure each items keys are in the correct order and their
+ * offsets make sense. We only have to loop through nritems-1 because
+ * we check the current slot against the next slot, which verifies the
+ * next slot's offset+size makes sense and that the current's slot
+ * offset is correct.
+ */
+ for (slot = 0; slot < nritems - 1; slot++) {
+ btrfs_item_key_to_cpu(leaf, &leaf_key, slot);
+ btrfs_item_key_to_cpu(leaf, &key, slot + 1);
+
+ /* Make sure the keys are in the right order */
+ if (btrfs_comp_cpu_keys(&leaf_key, &key) >= 0) {
+ CORRUPT("bad key order", leaf, root, slot);
+ return -EIO;
+ }
+
+ /*
+ * Make sure the offset and ends are right, remember that the
+ * item data starts at the end of the leaf and grows towards the
+ * front.
+ */
+ if (btrfs_item_offset_nr(leaf, slot) !=
+ btrfs_item_end_nr(leaf, slot + 1)) {
+ CORRUPT("slot offset bad", leaf, root, slot);
+ return -EIO;
+ }
+
+ /*
+ * Check to make sure that we don't point outside of the leaf,
+ * just incase all the items are consistent to eachother, but
+ * all point outside of the leaf.
+ */
+ if (btrfs_item_end_nr(leaf, slot) >
+ BTRFS_LEAF_DATA_SIZE(root)) {
+ CORRUPT("slot end outside of leaf", leaf, root, slot);
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
#ifdef CONFIG_DEBUG_LOCK_ALLOC
void btrfs_set_buffer_lockdep_class(struct extent_buffer *eb, int level)
{
@@ -485,8 +562,20 @@ static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
btrfs_set_buffer_lockdep_class(eb, found_level);
ret = csum_tree_block(root, eb, 1);
- if (ret)
+ if (ret) {
+ ret = -EIO;
+ goto err;
+ }
+
+ /*
+ * If this is a leaf block and it is corrupt, set the corrupt bit so
+ * that we don't try and read the other copies of this block, just
+ * return -EIO.
+ */
+ if (found_level == 0 && check_leaf(root, eb)) {
+ set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
ret = -EIO;
+ }
end = min_t(u64, eb->len, PAGE_CACHE_SIZE);
end = eb->start + end - 1;
@@ -847,7 +936,6 @@ static const struct address_space_operations btree_aops = {
.writepages = btree_writepages,
.releasepage = btree_releasepage,
.invalidatepage = btree_invalidatepage,
- .sync_page = block_sync_page,
#ifdef CONFIG_MIGRATION
.migratepage = btree_migratepage,
#endif
@@ -1160,7 +1248,10 @@ struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root,
root, fs_info, location->objectid);
path = btrfs_alloc_path();
- BUG_ON(!path);
+ if (!path) {
+ kfree(root);
+ return ERR_PTR(-ENOMEM);
+ }
ret = btrfs_search_slot(NULL, tree_root, location, path, 0, 0);
if (ret == 0) {
l = path->nodes[0];
@@ -1184,8 +1275,10 @@ struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root,
root->commit_root = btrfs_root_node(root);
BUG_ON(!root->node);
out:
- if (location->objectid != BTRFS_TREE_LOG_OBJECTID)
+ if (location->objectid != BTRFS_TREE_LOG_OBJECTID) {
root->ref_cows = 1;
+ btrfs_check_and_init_root_item(&root->root_item);
+ }
return root;
}
@@ -1331,82 +1424,6 @@ static int btrfs_congested_fn(void *congested_data, int bdi_bits)
}
/*
- * this unplugs every device on the box, and it is only used when page
- * is null
- */
-static void __unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
-{
- struct btrfs_device *device;
- struct btrfs_fs_info *info;
-
- info = (struct btrfs_fs_info *)bdi->unplug_io_data;
- list_for_each_entry(device, &info->fs_devices->devices, dev_list) {
- if (!device->bdev)
- continue;
-
- bdi = blk_get_backing_dev_info(device->bdev);
- if (bdi->unplug_io_fn)
- bdi->unplug_io_fn(bdi, page);
- }
-}
-
-static void btrfs_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
-{
- struct inode *inode;
- struct extent_map_tree *em_tree;
- struct extent_map *em;
- struct address_space *mapping;
- u64 offset;
-
- /* the generic O_DIRECT read code does this */
- if (1 || !page) {
- __unplug_io_fn(bdi, page);
- return;
- }
-
- /*
- * page->mapping may change at any time. Get a consistent copy
- * and use that for everything below
- */
- smp_mb();
- mapping = page->mapping;
- if (!mapping)
- return;
-
- inode = mapping->host;
-
- /*
- * don't do the expensive searching for a small number of
- * devices
- */
- if (BTRFS_I(inode)->root->fs_info->fs_devices->open_devices <= 2) {
- __unplug_io_fn(bdi, page);
- return;
- }
-
- offset = page_offset(page);
-
- em_tree = &BTRFS_I(inode)->extent_tree;
- read_lock(&em_tree->lock);
- em = lookup_extent_mapping(em_tree, offset, PAGE_CACHE_SIZE);
- read_unlock(&em_tree->lock);
- if (!em) {
- __unplug_io_fn(bdi, page);
- return;
- }
-
- if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
- free_extent_map(em);
- __unplug_io_fn(bdi, page);
- return;
- }
- offset = offset - em->start;
- btrfs_unplug_page(&BTRFS_I(inode)->root->fs_info->mapping_tree,
- em->block_start + offset, page);
- free_extent_map(em);
-}
-
-/*
* If this fails, caller must call bdi_destroy() to get rid of the
* bdi again.
*/
@@ -1420,8 +1437,6 @@ static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
return err;
bdi->ra_pages = default_backing_dev_info.ra_pages;
- bdi->unplug_io_fn = btrfs_unplug_io_fn;
- bdi->unplug_io_data = info;
bdi->congested_fn = btrfs_congested_fn;
bdi->congested_data = info;
return 0;
@@ -1632,6 +1647,8 @@ struct btrfs_root *open_ctree(struct super_block *sb,
goto fail_bdi;
}
+ fs_info->btree_inode->i_mapping->flags &= ~__GFP_FS;
+
INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
INIT_LIST_HEAD(&fs_info->trans_list);
INIT_LIST_HEAD(&fs_info->dead_roots);
@@ -1762,6 +1779,12 @@ struct btrfs_root *open_ctree(struct super_block *sb,
btrfs_check_super_valid(fs_info, sb->s_flags & MS_RDONLY);
+ /*
+ * In the long term, we'll store the compression type in the super
+ * block, and it'll be used for per file compression control.
+ */
+ fs_info->compress_type = BTRFS_COMPRESS_ZLIB;
+
ret = btrfs_parse_options(tree_root, options);
if (ret) {
err = ret;
@@ -1967,6 +1990,12 @@ struct btrfs_root *open_ctree(struct super_block *sb,
fs_info->metadata_alloc_profile = (u64)-1;
fs_info->system_alloc_profile = fs_info->metadata_alloc_profile;
+ ret = btrfs_init_space_info(fs_info);
+ if (ret) {
+ printk(KERN_ERR "Failed to initial space info: %d\n", ret);
+ goto fail_block_groups;
+ }
+
ret = btrfs_read_block_groups(extent_root);
if (ret) {
printk(KERN_ERR "Failed to read block groups: %d\n", ret);
@@ -2058,9 +2087,14 @@ struct btrfs_root *open_ctree(struct super_block *sb,
if (!(sb->s_flags & MS_RDONLY)) {
down_read(&fs_info->cleanup_work_sem);
- btrfs_orphan_cleanup(fs_info->fs_root);
- btrfs_orphan_cleanup(fs_info->tree_root);
+ err = btrfs_orphan_cleanup(fs_info->fs_root);
+ if (!err)
+ err = btrfs_orphan_cleanup(fs_info->tree_root);
up_read(&fs_info->cleanup_work_sem);
+ if (err) {
+ close_ctree(tree_root);
+ return ERR_PTR(err);
+ }
}
return tree_root;
@@ -2435,8 +2469,12 @@ int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
root_objectid = gang[ret - 1]->root_key.objectid + 1;
for (i = 0; i < ret; i++) {
+ int err;
+
root_objectid = gang[i]->root_key.objectid;
- btrfs_orphan_cleanup(gang[i]);
+ err = btrfs_orphan_cleanup(gang[i]);
+ if (err)
+ return err;
}
root_objectid++;
}
@@ -2947,7 +2985,10 @@ static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
break;
/* opt_discard */
- ret = btrfs_error_discard_extent(root, start, end + 1 - start);
+ if (btrfs_test_opt(root, DISCARD))
+ ret = btrfs_error_discard_extent(root, start,
+ end + 1 - start,
+ NULL);
clear_extent_dirty(unpin, start, end, GFP_NOFS);
btrfs_error_unpin_extent_range(root, start, end);
@@ -3016,7 +3057,7 @@ static int btrfs_cleanup_transaction(struct btrfs_root *root)
btrfs_destroy_pinned_extent(root,
root->fs_info->pinned_extents);
- t->use_count = 0;
+ atomic_set(&t->use_count, 0);
list_del_init(&t->list);
memset(t, 0, sizeof(*t));
kmem_cache_free(btrfs_transaction_cachep, t);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 7b3089b..31f33ba 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -33,11 +33,28 @@
#include "locking.h"
#include "free-space-cache.h"
+/* control flags for do_chunk_alloc's force field
+ * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
+ * if we really need one.
+ *
+ * CHUNK_ALLOC_FORCE means it must try to allocate one
+ *
+ * CHUNK_ALLOC_LIMITED means to only try and allocate one
+ * if we have very few chunks already allocated. This is
+ * used as part of the clustering code to help make sure
+ * we have a good pool of storage to cluster in, without
+ * filling the FS with empty chunks
+ *
+ */
+enum {
+ CHUNK_ALLOC_NO_FORCE = 0,
+ CHUNK_ALLOC_FORCE = 1,
+ CHUNK_ALLOC_LIMITED = 2,
+};
+
static int update_block_group(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
u64 bytenr, u64 num_bytes, int alloc);
-static int update_reserved_bytes(struct btrfs_block_group_cache *cache,
- u64 num_bytes, int reserve, int sinfo);
static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
u64 bytenr, u64 num_bytes, u64 parent,
@@ -442,7 +459,7 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
* allocate blocks for the tree root we can't do the fast caching since
* we likely hold important locks.
*/
- if (!trans->transaction->in_commit &&
+ if (trans && (!trans->transaction->in_commit) &&
(root && root != root->fs_info->tree_root)) {
spin_lock(&cache->lock);
if (cache->cached != BTRFS_CACHE_NO) {
@@ -471,7 +488,7 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
if (load_cache_only)
return 0;
- caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_KERNEL);
+ caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
BUG_ON(!caching_ctl);
INIT_LIST_HEAD(&caching_ctl->list);
@@ -1740,39 +1757,45 @@ static int remove_extent_backref(struct btrfs_trans_handle *trans,
return ret;
}
-static void btrfs_issue_discard(struct block_device *bdev,
+static int btrfs_issue_discard(struct block_device *bdev,
u64 start, u64 len)
{
- blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL, 0);
+ return blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_NOFS, 0);
}
static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
- u64 num_bytes)
+ u64 num_bytes, u64 *actual_bytes)
{
int ret;
- u64 map_length = num_bytes;
+ u64 discarded_bytes = 0;
struct btrfs_multi_bio *multi = NULL;
- if (!btrfs_test_opt(root, DISCARD))
- return 0;
/* Tell the block device(s) that the sectors can be discarded */
- ret = btrfs_map_block(&root->fs_info->mapping_tree, READ,
- bytenr, &map_length, &multi, 0);
+ ret = btrfs_map_block(&root->fs_info->mapping_tree, REQ_DISCARD,
+ bytenr, &num_bytes, &multi, 0);
if (!ret) {
struct btrfs_bio_stripe *stripe = multi->stripes;
int i;
- if (map_length > num_bytes)
- map_length = num_bytes;
for (i = 0; i < multi->num_stripes; i++, stripe++) {
- btrfs_issue_discard(stripe->dev->bdev,
- stripe->physical,
- map_length);
+ ret = btrfs_issue_discard(stripe->dev->bdev,
+ stripe->physical,
+ stripe->length);
+ if (!ret)
+ discarded_bytes += stripe->length;
+ else if (ret != -EOPNOTSUPP)
+ break;
}
kfree(multi);
}
+ if (discarded_bytes && ret == -EOPNOTSUPP)
+ ret = 0;
+
+ if (actual_bytes)
+ *actual_bytes = discarded_bytes;
+
return ret;
}
@@ -3015,7 +3038,8 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags,
found->bytes_readonly = 0;
found->bytes_may_use = 0;
found->full = 0;
- found->force_alloc = 0;
+ found->force_alloc = CHUNK_ALLOC_NO_FORCE;
+ found->chunk_alloc = 0;
*space_info = found;
list_add_rcu(&found->list, &info->space_info);
atomic_set(&found->caching_threads, 0);
@@ -3146,7 +3170,7 @@ again:
if (!data_sinfo->full && alloc_chunk) {
u64 alloc_target;
- data_sinfo->force_alloc = 1;
+ data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
spin_unlock(&data_sinfo->lock);
alloc:
alloc_target = btrfs_get_alloc_profile(root, 1);
@@ -3156,7 +3180,8 @@ alloc:
ret = do_chunk_alloc(trans, root->fs_info->extent_root,
bytes + 2 * 1024 * 1024,
- alloc_target, 0);
+ alloc_target,
+ CHUNK_ALLOC_NO_FORCE);
btrfs_end_transaction(trans, root);
if (ret < 0) {
if (ret != -ENOSPC)
@@ -3235,31 +3260,56 @@ static void force_metadata_allocation(struct btrfs_fs_info *info)
rcu_read_lock();
list_for_each_entry_rcu(found, head, list) {
if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
- found->force_alloc = 1;
+ found->force_alloc = CHUNK_ALLOC_FORCE;
}
rcu_read_unlock();
}
static int should_alloc_chunk(struct btrfs_root *root,
- struct btrfs_space_info *sinfo, u64 alloc_bytes)
+ struct btrfs_space_info *sinfo, u64 alloc_bytes,
+ int force)
{
u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
+ u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
u64 thresh;
- if (sinfo->bytes_used + sinfo->bytes_reserved +
- alloc_bytes + 256 * 1024 * 1024 < num_bytes)
+ if (force == CHUNK_ALLOC_FORCE)
+ return 1;
+
+ /*
+ * in limited mode, we want to have some free space up to
+ * about 1% of the FS size.
+ */
+ if (force == CHUNK_ALLOC_LIMITED) {
+ thresh = btrfs_super_total_bytes(&root->fs_info->super_copy);
+ thresh = max_t(u64, 64 * 1024 * 1024,
+ div_factor_fine(thresh, 1));
+
+ if (num_bytes - num_allocated < thresh)
+ return 1;
+ }
+
+ /*
+ * we have two similar checks here, one based on percentage
+ * and once based on a hard number of 256MB. The idea
+ * is that if we have a good amount of free
+ * room, don't allocate a chunk. A good mount is
+ * less than 80% utilized of the chunks we have allocated,
+ * or more than 256MB free
+ */
+ if (num_allocated + alloc_bytes + 256 * 1024 * 1024 < num_bytes)
return 0;
- if (sinfo->bytes_used + sinfo->bytes_reserved +
- alloc_bytes < div_factor(num_bytes, 8))
+ if (num_allocated + alloc_bytes < div_factor(num_bytes, 8))
return 0;
thresh = btrfs_super_total_bytes(&root->fs_info->super_copy);
+
+ /* 256MB or 5% of the FS */
thresh = max_t(u64, 256 * 1024 * 1024, div_factor_fine(thresh, 5));
if (num_bytes > thresh && sinfo->bytes_used < div_factor(num_bytes, 3))
return 0;
-
return 1;
}
@@ -3269,10 +3319,9 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
{
struct btrfs_space_info *space_info;
struct btrfs_fs_info *fs_info = extent_root->fs_info;
+ int wait_for_alloc = 0;
int ret = 0;
- mutex_lock(&fs_info->chunk_mutex);
-
flags = btrfs_reduce_alloc_profile(extent_root, flags);
space_info = __find_space_info(extent_root->fs_info, flags);
@@ -3283,21 +3332,40 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
}
BUG_ON(!space_info);
+again:
spin_lock(&space_info->lock);
if (space_info->force_alloc)
- force = 1;
+ force = space_info->force_alloc;
if (space_info->full) {
spin_unlock(&space_info->lock);
- goto out;
+ return 0;
}
- if (!force && !should_alloc_chunk(extent_root, space_info,
- alloc_bytes)) {
+ if (!should_alloc_chunk(extent_root, space_info, alloc_bytes, force)) {
spin_unlock(&space_info->lock);
- goto out;
+ return 0;
+ } else if (space_info->chunk_alloc) {
+ wait_for_alloc = 1;
+ } else {
+ space_info->chunk_alloc = 1;
}
+
spin_unlock(&space_info->lock);
+ mutex_lock(&fs_info->chunk_mutex);
+
+ /*
+ * The chunk_mutex is held throughout the entirety of a chunk
+ * allocation, so once we've acquired the chunk_mutex we know that the
+ * other guy is done and we need to recheck and see if we should
+ * allocate.
+ */
+ if (wait_for_alloc) {
+ mutex_unlock(&fs_info->chunk_mutex);
+ wait_for_alloc = 0;
+ goto again;
+ }
+
/*
* If we have mixed data/metadata chunks we want to make sure we keep
* allocating mixed chunks instead of individual chunks.
@@ -3323,9 +3391,10 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
space_info->full = 1;
else
ret = 1;
- space_info->force_alloc = 0;
+
+ space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
+ space_info->chunk_alloc = 0;
spin_unlock(&space_info->lock);
-out:
mutex_unlock(&extent_root->fs_info->chunk_mutex);
return ret;
}
@@ -3996,6 +4065,7 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
u64 to_reserve;
int nr_extents;
+ int reserved_extents;
int ret;
if (btrfs_transaction_in_commit(root->fs_info))
@@ -4003,25 +4073,24 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
num_bytes = ALIGN(num_bytes, root->sectorsize);
- spin_lock(&BTRFS_I(inode)->accounting_lock);
nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents) + 1;
- if (nr_extents > BTRFS_I(inode)->reserved_extents) {
- nr_extents -= BTRFS_I(inode)->reserved_extents;
+ reserved_extents = atomic_read(&BTRFS_I(inode)->reserved_extents);
+
+ if (nr_extents > reserved_extents) {
+ nr_extents -= reserved_extents;
to_reserve = calc_trans_metadata_size(root, nr_extents);
} else {
nr_extents = 0;
to_reserve = 0;
}
- spin_unlock(&BTRFS_I(inode)->accounting_lock);
+
to_reserve += calc_csum_metadata_size(inode, num_bytes);
ret = reserve_metadata_bytes(NULL, root, block_rsv, to_reserve, 1);
if (ret)
return ret;
- spin_lock(&BTRFS_I(inode)->accounting_lock);
- BTRFS_I(inode)->reserved_extents += nr_extents;
+ atomic_add(nr_extents, &BTRFS_I(inode)->reserved_extents);
atomic_inc(&BTRFS_I(inode)->outstanding_extents);
- spin_unlock(&BTRFS_I(inode)->accounting_lock);
block_rsv_add_bytes(block_rsv, to_reserve, 1);
@@ -4036,20 +4105,30 @@ void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
struct btrfs_root *root = BTRFS_I(inode)->root;
u64 to_free;
int nr_extents;
+ int reserved_extents;
num_bytes = ALIGN(num_bytes, root->sectorsize);
atomic_dec(&BTRFS_I(inode)->outstanding_extents);
WARN_ON(atomic_read(&BTRFS_I(inode)->outstanding_extents) < 0);
- spin_lock(&BTRFS_I(inode)->accounting_lock);
- nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents);
- if (nr_extents < BTRFS_I(inode)->reserved_extents) {
- nr_extents = BTRFS_I(inode)->reserved_extents - nr_extents;
- BTRFS_I(inode)->reserved_extents -= nr_extents;
- } else {
- nr_extents = 0;
- }
- spin_unlock(&BTRFS_I(inode)->accounting_lock);
+ reserved_extents = atomic_read(&BTRFS_I(inode)->reserved_extents);
+ do {
+ int old, new;
+
+ nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents);
+ if (nr_extents >= reserved_extents) {
+ nr_extents = 0;
+ break;
+ }
+ old = reserved_extents;
+ nr_extents = reserved_extents - nr_extents;
+ new = reserved_extents - nr_extents;
+ old = atomic_cmpxchg(&BTRFS_I(inode)->reserved_extents,
+ reserved_extents, new);
+ if (likely(old == reserved_extents))
+ break;
+ reserved_extents = old;
+ } while (1);
to_free = calc_csum_metadata_size(inode, num_bytes);
if (nr_extents > 0)
@@ -4223,8 +4302,8 @@ int btrfs_pin_extent(struct btrfs_root *root,
* update size of reserved extents. this function may return -EAGAIN
* if 'reserve' is true or 'sinfo' is false.
*/
-static int update_reserved_bytes(struct btrfs_block_group_cache *cache,
- u64 num_bytes, int reserve, int sinfo)
+int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
+ u64 num_bytes, int reserve, int sinfo)
{
int ret = 0;
if (sinfo) {
@@ -4363,7 +4442,9 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
if (ret)
break;
- ret = btrfs_discard_extent(root, start, end + 1 - start);
+ if (btrfs_test_opt(root, DISCARD))
+ ret = btrfs_discard_extent(root, start,
+ end + 1 - start, NULL);
clear_extent_dirty(unpin, start, end, GFP_NOFS);
unpin_extent_range(root, start, end);
@@ -4704,10 +4785,10 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
btrfs_add_free_space(cache, buf->start, buf->len);
- ret = update_reserved_bytes(cache, buf->len, 0, 0);
+ ret = btrfs_update_reserved_bytes(cache, buf->len, 0, 0);
if (ret == -EAGAIN) {
/* block group became read-only */
- update_reserved_bytes(cache, buf->len, 0, 1);
+ btrfs_update_reserved_bytes(cache, buf->len, 0, 1);
goto out;
}
@@ -4744,6 +4825,11 @@ pin:
}
}
out:
+ /*
+ * Deleting the buffer, clear the corrupt flag since it doesn't matter
+ * anymore.
+ */
+ clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
btrfs_put_block_group(cache);
}
@@ -5191,7 +5277,7 @@ checks:
search_start - offset);
BUG_ON(offset > search_start);
- ret = update_reserved_bytes(block_group, num_bytes, 1,
+ ret = btrfs_update_reserved_bytes(block_group, num_bytes, 1,
(data & BTRFS_BLOCK_GROUP_DATA));
if (ret == -EAGAIN) {
btrfs_add_free_space(block_group, offset, num_bytes);
@@ -5282,11 +5368,13 @@ loop:
if (allowed_chunk_alloc) {
ret = do_chunk_alloc(trans, root, num_bytes +
- 2 * 1024 * 1024, data, 1);
+ 2 * 1024 * 1024, data,
+ CHUNK_ALLOC_LIMITED);
allowed_chunk_alloc = 0;
done_chunk_alloc = 1;
- } else if (!done_chunk_alloc) {
- space_info->force_alloc = 1;
+ } else if (!done_chunk_alloc &&
+ space_info->force_alloc == CHUNK_ALLOC_NO_FORCE) {
+ space_info->force_alloc = CHUNK_ALLOC_LIMITED;
}
if (loop < LOOP_NO_EMPTY_SIZE) {
@@ -5372,7 +5460,8 @@ again:
*/
if (empty_size || root->ref_cows)
ret = do_chunk_alloc(trans, root->fs_info->extent_root,
- num_bytes + 2 * 1024 * 1024, data, 0);
+ num_bytes + 2 * 1024 * 1024, data,
+ CHUNK_ALLOC_NO_FORCE);
WARN_ON(num_bytes < root->sectorsize);
ret = find_free_extent(trans, root, num_bytes, empty_size,
@@ -5384,7 +5473,7 @@ again:
num_bytes = num_bytes & ~(root->sectorsize - 1);
num_bytes = max(num_bytes, min_alloc_size);
do_chunk_alloc(trans, root->fs_info->extent_root,
- num_bytes, data, 1);
+ num_bytes, data, CHUNK_ALLOC_FORCE);
goto again;
}
if (ret == -ENOSPC && btrfs_test_opt(root, ENOSPC_DEBUG)) {
@@ -5397,6 +5486,8 @@ again:
dump_space_info(sinfo, num_bytes, 1);
}
+ trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
+
return ret;
}
@@ -5412,12 +5503,15 @@ int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len)
return -ENOSPC;
}
- ret = btrfs_discard_extent(root, start, len);
+ if (btrfs_test_opt(root, DISCARD))
+ ret = btrfs_discard_extent(root, start, len, NULL);
btrfs_add_free_space(cache, start, len);
- update_reserved_bytes(cache, len, 0, 1);
+ btrfs_update_reserved_bytes(cache, len, 0, 1);
btrfs_put_block_group(cache);
+ trace_btrfs_reserved_extent_free(root, start, len);
+
return ret;
}
@@ -5444,7 +5538,8 @@ static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
path = btrfs_alloc_path();
- BUG_ON(!path);
+ if (!path)
+ return -ENOMEM;
path->leave_spinning = 1;
ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
@@ -5614,7 +5709,7 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
put_caching_control(caching_ctl);
}
- ret = update_reserved_bytes(block_group, ins->offset, 1, 1);
+ ret = btrfs_update_reserved_bytes(block_group, ins->offset, 1, 1);
BUG_ON(ret);
btrfs_put_block_group(block_group);
ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
@@ -6047,6 +6142,8 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
if (reada && level == 1)
reada_walk_down(trans, root, wc, path);
next = read_tree_block(root, bytenr, blocksize, generation);
+ if (!next)
+ return -EIO;
btrfs_tree_lock(next);
btrfs_set_lock_blocking(next);
}
@@ -6438,10 +6535,14 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
path = btrfs_alloc_path();
- BUG_ON(!path);
+ if (!path)
+ return -ENOMEM;
wc = kzalloc(sizeof(*wc), GFP_NOFS);
- BUG_ON(!wc);
+ if (!wc) {
+ btrfs_free_path(path);
+ return -ENOMEM;
+ }
btrfs_assert_tree_locked(parent);
parent_level = btrfs_header_level(parent);
@@ -6899,7 +7000,11 @@ static noinline int get_new_locations(struct inode *reloc_inode,
}
path = btrfs_alloc_path();
- BUG_ON(!path);
+ if (!path) {
+ if (exts != *extents)
+ kfree(exts);
+ return -ENOMEM;
+ }
cur_pos = extent_key->objectid - offset;
last_byte = extent_key->objectid + extent_key->offset;
@@ -6941,6 +7046,10 @@ static noinline int get_new_locations(struct inode *reloc_inode,
struct disk_extent *old = exts;
max *= 2;
exts = kzalloc(sizeof(*exts) * max, GFP_NOFS);
+ if (!exts) {
+ ret = -ENOMEM;
+ goto out;
+ }
memcpy(exts, old, sizeof(*exts) * nr);
if (old != *extents)
kfree(old);
@@ -7423,7 +7532,8 @@ static noinline int replace_extents_in_leaf(struct btrfs_trans_handle *trans,
int ret;
new_extent = kmalloc(sizeof(*new_extent), GFP_NOFS);
- BUG_ON(!new_extent);
+ if (!new_extent)
+ return -ENOMEM;
ref = btrfs_lookup_leaf_ref(root, leaf->start);
BUG_ON(!ref);
@@ -7609,7 +7719,8 @@ int btrfs_cleanup_reloc_trees(struct btrfs_root *root)
reloc_root = btrfs_read_fs_root_no_name(root->fs_info, &location);
BUG_ON(!reloc_root);
- btrfs_orphan_cleanup(reloc_root);
+ ret = btrfs_orphan_cleanup(reloc_root);
+ BUG_ON(ret);
return 0;
}
@@ -7627,7 +7738,8 @@ static noinline int init_reloc_tree(struct btrfs_trans_handle *trans,
return 0;
root_item = kmalloc(sizeof(*root_item), GFP_NOFS);
- BUG_ON(!root_item);
+ if (!root_item)
+ return -ENOMEM;
ret = btrfs_copy_root(trans, root, root->commit_root,
&eb, BTRFS_TREE_RELOC_OBJECTID);
@@ -7653,7 +7765,7 @@ static noinline int init_reloc_tree(struct btrfs_trans_handle *trans,
reloc_root = btrfs_read_fs_root_no_radix(root->fs_info->tree_root,
&root_key);
- BUG_ON(!reloc_root);
+ BUG_ON(IS_ERR(reloc_root));
reloc_root->last_trans = trans->transid;
reloc_root->commit_root = NULL;
reloc_root->ref_tree = &root->fs_info->reloc_ref_tree;
@@ -7906,6 +8018,10 @@ static noinline int relocate_one_extent(struct btrfs_root *extent_root,
eb = read_tree_block(found_root, block_start,
block_size, 0);
+ if (!eb) {
+ ret = -EIO;
+ goto out;
+ }
btrfs_tree_lock(eb);
BUG_ON(level != btrfs_header_level(eb));
@@ -8061,13 +8177,15 @@ int btrfs_set_block_group_ro(struct btrfs_root *root,
alloc_flags = update_block_group_flags(root, cache->flags);
if (alloc_flags != cache->flags)
- do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1);
+ do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
+ CHUNK_ALLOC_FORCE);
ret = set_block_group_ro(cache);
if (!ret)
goto out;
alloc_flags = get_alloc_profile(root, cache->space_info->flags);
- ret = do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1);
+ ret = do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
+ CHUNK_ALLOC_FORCE);
if (ret < 0)
goto out;
ret = set_block_group_ro(cache);
@@ -8080,7 +8198,8 @@ int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u64 type)
{
u64 alloc_flags = get_alloc_profile(root, type);
- return do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1);
+ return do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
+ CHUNK_ALLOC_FORCE);
}
/*
@@ -8621,6 +8740,12 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
BUG_ON(!block_group);
BUG_ON(!block_group->ro);
+ /*
+ * Free the reserved super bytes from this block group before
+ * remove it.
+ */
+ free_excluded_extents(root, block_group);
+
memcpy(&key, &block_group->key, sizeof(key));
if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
BTRFS_BLOCK_GROUP_RAID1 |
@@ -8724,13 +8849,84 @@ out:
return ret;
}
+int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
+{
+ struct btrfs_space_info *space_info;
+ int ret;
+
+ ret = update_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM, 0, 0,
+ &space_info);
+ if (ret)
+ return ret;
+
+ ret = update_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA, 0, 0,
+ &space_info);
+ if (ret)
+ return ret;
+
+ ret = update_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA, 0, 0,
+ &space_info);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
{
return unpin_extent_range(root, start, end);
}
int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr,
- u64 num_bytes)
+ u64 num_bytes, u64 *actual_bytes)
+{
+ return btrfs_discard_extent(root, bytenr, num_bytes, actual_bytes);
+}
+
+int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
{
- return btrfs_discard_extent(root, bytenr, num_bytes);
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ struct btrfs_block_group_cache *cache = NULL;
+ u64 group_trimmed;
+ u64 start;
+ u64 end;
+ u64 trimmed = 0;
+ int ret = 0;
+
+ cache = btrfs_lookup_block_group(fs_info, range->start);
+
+ while (cache) {
+ if (cache->key.objectid >= (range->start + range->len)) {
+ btrfs_put_block_group(cache);
+ break;
+ }
+
+ start = max(range->start, cache->key.objectid);
+ end = min(range->start + range->len,
+ cache->key.objectid + cache->key.offset);
+
+ if (end - start >= range->minlen) {
+ if (!block_group_cache_done(cache)) {
+ ret = cache_block_group(cache, NULL, root, 0);
+ if (!ret)
+ wait_block_group_cache_done(cache);
+ }
+ ret = btrfs_trim_block_group(cache,
+ &group_trimmed,
+ start,
+ end,
+ range->minlen);
+
+ trimmed += group_trimmed;
+ if (ret) {
+ btrfs_put_block_group(cache);
+ break;
+ }
+ }
+
+ cache = next_block_group(fs_info->tree_root, cache);
+ }
+
+ range->len = trimmed;
+ return ret;
}
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 714adc4..3151386 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -690,6 +690,15 @@ static void cache_state(struct extent_state *state,
}
}
+static void uncache_state(struct extent_state **cached_ptr)
+{
+ if (cached_ptr && (*cached_ptr)) {
+ struct extent_state *state = *cached_ptr;
+ *cached_ptr = NULL;
+ free_extent_state(state);
+ }
+}
+
/*
* set some bits on a range in the tree. This may require allocations or
* sleeping, so the gfp mask is used to indicate what is allowed.
@@ -940,10 +949,10 @@ static int clear_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
}
int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
- gfp_t mask)
+ struct extent_state **cached_state, gfp_t mask)
{
- return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL,
- NULL, mask);
+ return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0,
+ NULL, cached_state, mask);
}
static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
@@ -1012,8 +1021,7 @@ int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end,
mask);
}
-int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end,
- gfp_t mask)
+int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
{
return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL,
mask);
@@ -1735,6 +1743,9 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
do {
struct page *page = bvec->bv_page;
+ struct extent_state *cached = NULL;
+ struct extent_state *state;
+
tree = &BTRFS_I(page->mapping->host)->io_tree;
start = ((u64)page->index << PAGE_CACHE_SHIFT) +
@@ -1749,9 +1760,20 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
if (++bvec <= bvec_end)
prefetchw(&bvec->bv_page->flags);
+ spin_lock(&tree->lock);
+ state = find_first_extent_bit_state(tree, start, EXTENT_LOCKED);
+ if (state && state->start == start) {
+ /*
+ * take a reference on the state, unlock will drop
+ * the ref
+ */
+ cache_state(state, &cached);
+ }
+ spin_unlock(&tree->lock);
+
if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
ret = tree->ops->readpage_end_io_hook(page, start, end,
- NULL);
+ state);
if (ret)
uptodate = 0;
}
@@ -1764,15 +1786,16 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
test_bit(BIO_UPTODATE, &bio->bi_flags);
if (err)
uptodate = 0;
+ uncache_state(&cached);
continue;
}
}
if (uptodate) {
- set_extent_uptodate(tree, start, end,
+ set_extent_uptodate(tree, start, end, &cached,
GFP_ATOMIC);
}
- unlock_extent(tree, start, end, GFP_ATOMIC);
+ unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
if (whole_page) {
if (uptodate) {
@@ -1811,6 +1834,7 @@ static void end_bio_extent_preparewrite(struct bio *bio, int err)
do {
struct page *page = bvec->bv_page;
+ struct extent_state *cached = NULL;
tree = &BTRFS_I(page->mapping->host)->io_tree;
start = ((u64)page->index << PAGE_CACHE_SHIFT) +
@@ -1821,13 +1845,14 @@ static void end_bio_extent_preparewrite(struct bio *bio, int err)
prefetchw(&bvec->bv_page->flags);
if (uptodate) {
- set_extent_uptodate(tree, start, end, GFP_ATOMIC);
+ set_extent_uptodate(tree, start, end, &cached,
+ GFP_ATOMIC);
} else {
ClearPageUptodate(page);
SetPageError(page);
}
- unlock_extent(tree, start, end, GFP_ATOMIC);
+ unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
} while (bvec >= bio->bi_io_vec);
@@ -2016,14 +2041,17 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
while (cur <= end) {
if (cur >= last_byte) {
char *userpage;
+ struct extent_state *cached = NULL;
+
iosize = PAGE_CACHE_SIZE - page_offset;
userpage = kmap_atomic(page, KM_USER0);
memset(userpage + page_offset, 0, iosize);
flush_dcache_page(page);
kunmap_atomic(userpage, KM_USER0);
set_extent_uptodate(tree, cur, cur + iosize - 1,
- GFP_NOFS);
- unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
+ &cached, GFP_NOFS);
+ unlock_extent_cached(tree, cur, cur + iosize - 1,
+ &cached, GFP_NOFS);
break;
}
em = get_extent(inode, page, page_offset, cur,
@@ -2063,14 +2091,17 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
/* we've found a hole, just zero and go on */
if (block_start == EXTENT_MAP_HOLE) {
char *userpage;
+ struct extent_state *cached = NULL;
+
userpage = kmap_atomic(page, KM_USER0);
memset(userpage + page_offset, 0, iosize);
flush_dcache_page(page);
kunmap_atomic(userpage, KM_USER0);
set_extent_uptodate(tree, cur, cur + iosize - 1,
- GFP_NOFS);
- unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
+ &cached, GFP_NOFS);
+ unlock_extent_cached(tree, cur, cur + iosize - 1,
+ &cached, GFP_NOFS);
cur = cur + iosize;
page_offset += iosize;
continue;
@@ -2188,10 +2219,12 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
unsigned long nr_written = 0;
if (wbc->sync_mode == WB_SYNC_ALL)
- write_flags = WRITE_SYNC_PLUG;
+ write_flags = WRITE_SYNC;
else
write_flags = WRITE;
+ trace___extent_writepage(page, inode, wbc);
+
WARN_ON(!PageLocked(page));
pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
if (page->index > end_index ||
@@ -2787,9 +2820,12 @@ int extent_prepare_write(struct extent_io_tree *tree,
iocount++;
block_start = block_start + iosize;
} else {
- set_extent_uptodate(tree, block_start, cur_end,
+ struct extent_state *cached = NULL;
+
+ set_extent_uptodate(tree, block_start, cur_end, &cached,
GFP_NOFS);
- unlock_extent(tree, block_start, cur_end, GFP_NOFS);
+ unlock_extent_cached(tree, block_start, cur_end,
+ &cached, GFP_NOFS);
block_start = cur_end + 1;
}
page_offset = block_start & (PAGE_CACHE_SIZE - 1);
@@ -3455,7 +3491,7 @@ int set_extent_buffer_uptodate(struct extent_io_tree *tree,
num_pages = num_extent_pages(eb->start, eb->len);
set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
- GFP_NOFS);
+ NULL, GFP_NOFS);
for (i = 0; i < num_pages; i++) {
page = extent_buffer_page(eb, i);
if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
@@ -3690,6 +3726,7 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
"wanted %lu %lu\n", (unsigned long long)eb->start,
eb->len, start, min_len);
WARN_ON(1);
+ return -EINVAL;
}
p = extent_buffer_page(eb, i);
@@ -3882,6 +3919,12 @@ static void move_pages(struct page *dst_page, struct page *src_page,
kunmap_atomic(dst_kaddr, KM_USER0);
}
+static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
+{
+ unsigned long distance = (src > dst) ? src - dst : dst - src;
+ return distance < len;
+}
+
static void copy_pages(struct page *dst_page, struct page *src_page,
unsigned long dst_off, unsigned long src_off,
unsigned long len)
@@ -3889,10 +3932,12 @@ static void copy_pages(struct page *dst_page, struct page *src_page,
char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
char *src_kaddr;
- if (dst_page != src_page)
+ if (dst_page != src_page) {
src_kaddr = kmap_atomic(src_page, KM_USER1);
- else
+ } else {
src_kaddr = dst_kaddr;
+ BUG_ON(areas_overlap(src_off, dst_off, len));
+ }
memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
kunmap_atomic(dst_kaddr, KM_USER0);
@@ -3967,7 +4012,7 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
"len %lu len %lu\n", dst_offset, len, dst->len);
BUG_ON(1);
}
- if (dst_offset < src_offset) {
+ if (!areas_overlap(src_offset, dst_offset, len)) {
memcpy_extent_buffer(dst, dst_offset, src_offset, len);
return;
}
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 9318dfe..af2d717 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -31,6 +31,7 @@
#define EXTENT_BUFFER_UPTODATE 0
#define EXTENT_BUFFER_BLOCKING 1
#define EXTENT_BUFFER_DIRTY 2
+#define EXTENT_BUFFER_CORRUPT 3
/* these are flags for extent_clear_unlock_delalloc */
#define EXTENT_CLEAR_UNLOCK_PAGE 0x1
@@ -207,7 +208,7 @@ int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
int bits, int exclusive_bits, u64 *failed_start,
struct extent_state **cached_state, gfp_t mask);
int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
- gfp_t mask);
+ struct extent_state **cached_state, gfp_t mask);
int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
gfp_t mask);
int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index 2b6c12e..a24a3f2 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -243,7 +243,7 @@ out:
* Insert @em into @tree or perform a simple forward/backward merge with
* existing mappings. The extent_map struct passed in will be inserted
* into the tree directly, with an additional reference taken, or a
- * reference dropped if the merge attempt was successfull.
+ * reference dropped if the merge attempt was successful.
*/
int add_extent_mapping(struct extent_map_tree *tree,
struct extent_map *em)
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index 4f19a3e..a6a9d4e 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -48,7 +48,8 @@ int btrfs_insert_file_extent(struct btrfs_trans_handle *trans,
struct extent_buffer *leaf;
path = btrfs_alloc_path();
- BUG_ON(!path);
+ if (!path)
+ return -ENOMEM;
file_key.objectid = objectid;
file_key.offset = pos;
btrfs_set_key_type(&file_key, BTRFS_EXTENT_DATA_KEY);
@@ -169,6 +170,8 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
if (bio->bi_size > PAGE_CACHE_SIZE * 8)
path->reada = 2;
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index f447b78..75899a0 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -45,14 +45,14 @@
* and be replaced with calls into generic code.
*/
static noinline int btrfs_copy_from_user(loff_t pos, int num_pages,
- int write_bytes,
+ size_t write_bytes,
struct page **prepared_pages,
struct iov_iter *i)
{
size_t copied = 0;
+ size_t total_copied = 0;
int pg = 0;
int offset = pos & (PAGE_CACHE_SIZE - 1);
- int total_copied = 0;
while (write_bytes > 0) {
size_t count = min_t(size_t,
@@ -88,9 +88,8 @@ static noinline int btrfs_copy_from_user(loff_t pos, int num_pages,
total_copied += copied;
/* Return to btrfs_file_aio_write to fault page */
- if (unlikely(copied == 0)) {
+ if (unlikely(copied == 0))
break;
- }
if (unlikely(copied < PAGE_CACHE_SIZE - offset)) {
offset += copied;
@@ -105,12 +104,10 @@ static noinline int btrfs_copy_from_user(loff_t pos, int num_pages,
/*
* unlocks pages after btrfs_file_write is done with them
*/
-static noinline void btrfs_drop_pages(struct page **pages, size_t num_pages)
+void btrfs_drop_pages(struct page **pages, size_t num_pages)
{
size_t i;
for (i = 0; i < num_pages; i++) {
- if (!pages[i])
- break;
/* page checked is some magic around finding pages that
* have been modified without going through btrfs_set_page_dirty
* clear it here
@@ -130,17 +127,13 @@ static noinline void btrfs_drop_pages(struct page **pages, size_t num_pages)
* this also makes the decision about creating an inline extent vs
* doing real data extents, marking pages dirty and delalloc as required.
*/
-static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct file *file,
- struct page **pages,
- size_t num_pages,
- loff_t pos,
- size_t write_bytes)
+int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
+ struct page **pages, size_t num_pages,
+ loff_t pos, size_t write_bytes,
+ struct extent_state **cached)
{
int err = 0;
int i;
- struct inode *inode = fdentry(file)->d_inode;
u64 num_bytes;
u64 start_pos;
u64 end_of_last_block;
@@ -153,8 +146,9 @@ static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans,
end_of_last_block = start_pos + num_bytes - 1;
err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
- NULL);
- BUG_ON(err);
+ cached);
+ if (err)
+ return err;
for (i = 0; i < num_pages; i++) {
struct page *p = pages[i];
@@ -162,13 +156,14 @@ static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans,
ClearPageChecked(p);
set_page_dirty(p);
}
- if (end_pos > isize) {
+
+ /*
+ * we've only changed i_size in ram, and we haven't updated
+ * the disk i_size. There is no need to log the inode
+ * at this time.
+ */
+ if (end_pos > isize)
i_size_write(inode, end_pos);
- /* we've only changed i_size in ram, and we haven't updated
- * the disk i_size. There is no need to log the inode
- * at this time.
- */
- }
return 0;
}
@@ -610,6 +605,8 @@ again:
key.offset = split;
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
+ if (ret < 0)
+ goto out;
if (ret > 0 && path->slots[0] > 0)
path->slots[0]--;
@@ -819,12 +816,11 @@ static noinline int prepare_pages(struct btrfs_root *root, struct file *file,
last_pos = ((u64)index + num_pages) << PAGE_CACHE_SHIFT;
if (start_pos > inode->i_size) {
- err = btrfs_cont_expand(inode, start_pos);
+ err = btrfs_cont_expand(inode, i_size_read(inode), start_pos);
if (err)
return err;
}
- memset(pages, 0, num_pages * sizeof(struct page *));
again:
for (i = 0; i < num_pages; i++) {
pages[i] = grab_cache_page(inode->i_mapping, index + i);
@@ -896,156 +892,71 @@ fail:
}
-static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
- const struct iovec *iov,
- unsigned long nr_segs, loff_t pos)
+static noinline ssize_t __btrfs_buffered_write(struct file *file,
+ struct iov_iter *i,
+ loff_t pos)
{
- struct file *file = iocb->ki_filp;
struct inode *inode = fdentry(file)->d_inode;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct page **pages = NULL;
- struct iov_iter i;
- loff_t *ppos = &iocb->ki_pos;
- loff_t start_pos;
- ssize_t num_written = 0;
- ssize_t err = 0;
- size_t count;
- size_t ocount;
- int ret = 0;
- int nrptrs;
unsigned long first_index;
unsigned long last_index;
- int will_write;
- int buffered = 0;
- int copied = 0;
- int dirty_pages = 0;
-
- will_write = ((file->f_flags & O_DSYNC) || IS_SYNC(inode) ||
- (file->f_flags & O_DIRECT));
-
- start_pos = pos;
-
- vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
-
- mutex_lock(&inode->i_mutex);
-
- err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ);
- if (err)
- goto out;
- count = ocount;
-
- current->backing_dev_info = inode->i_mapping->backing_dev_info;
- err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
- if (err)
- goto out;
-
- if (count == 0)
- goto out;
-
- err = file_remove_suid(file);
- if (err)
- goto out;
-
- /*
- * If BTRFS flips readonly due to some impossible error
- * (fs_info->fs_state now has BTRFS_SUPER_FLAG_ERROR),
- * although we have opened a file as writable, we have
- * to stop this write operation to ensure FS consistency.
- */
- if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
- err = -EROFS;
- goto out;
- }
-
- file_update_time(file);
- BTRFS_I(inode)->sequence++;
-
- if (unlikely(file->f_flags & O_DIRECT)) {
- num_written = generic_file_direct_write(iocb, iov, &nr_segs,
- pos, ppos, count,
- ocount);
- /*
- * the generic O_DIRECT will update in-memory i_size after the
- * DIOs are done. But our endio handlers that update the on
- * disk i_size never update past the in memory i_size. So we
- * need one more update here to catch any additions to the
- * file
- */
- if (inode->i_size != BTRFS_I(inode)->disk_i_size) {
- btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
- mark_inode_dirty(inode);
- }
-
- if (num_written < 0) {
- ret = num_written;
- num_written = 0;
- goto out;
- } else if (num_written == count) {
- /* pick up pos changes done by the generic code */
- pos = *ppos;
- goto out;
- }
- /*
- * We are going to do buffered for the rest of the range, so we
- * need to make sure to invalidate the buffered pages when we're
- * done.
- */
- buffered = 1;
- pos += num_written;
- }
+ size_t num_written = 0;
+ int nrptrs;
+ int ret = 0;
- iov_iter_init(&i, iov, nr_segs, count, num_written);
- nrptrs = min((iov_iter_count(&i) + PAGE_CACHE_SIZE - 1) /
+ nrptrs = min((iov_iter_count(i) + PAGE_CACHE_SIZE - 1) /
PAGE_CACHE_SIZE, PAGE_CACHE_SIZE /
(sizeof(struct page *)));
pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
- if (!pages) {
- ret = -ENOMEM;
- goto out;
- }
-
- /* generic_write_checks can change our pos */
- start_pos = pos;
+ if (!pages)
+ return -ENOMEM;
first_index = pos >> PAGE_CACHE_SHIFT;
- last_index = (pos + iov_iter_count(&i)) >> PAGE_CACHE_SHIFT;
+ last_index = (pos + iov_iter_count(i)) >> PAGE_CACHE_SHIFT;
- while (iov_iter_count(&i) > 0) {
+ while (iov_iter_count(i) > 0) {
size_t offset = pos & (PAGE_CACHE_SIZE - 1);
- size_t write_bytes = min(iov_iter_count(&i),
+ size_t write_bytes = min(iov_iter_count(i),
nrptrs * (size_t)PAGE_CACHE_SIZE -
offset);
size_t num_pages = (write_bytes + offset +
PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ size_t dirty_pages;
+ size_t copied;
WARN_ON(num_pages > nrptrs);
- memset(pages, 0, sizeof(struct page *) * nrptrs);
/*
* Fault pages before locking them in prepare_pages
* to avoid recursive lock
*/
- if (unlikely(iov_iter_fault_in_readable(&i, write_bytes))) {
+ if (unlikely(iov_iter_fault_in_readable(i, write_bytes))) {
ret = -EFAULT;
- goto out;
+ break;
}
ret = btrfs_delalloc_reserve_space(inode,
num_pages << PAGE_CACHE_SHIFT);
if (ret)
- goto out;
+ break;
+ /*
+ * This is going to setup the pages array with the number of
+ * pages we want, so we don't really need to worry about the
+ * contents of pages from loop to loop
+ */
ret = prepare_pages(root, file, pages, num_pages,
pos, first_index, last_index,
write_bytes);
if (ret) {
btrfs_delalloc_release_space(inode,
num_pages << PAGE_CACHE_SHIFT);
- goto out;
+ break;
}
copied = btrfs_copy_from_user(pos, num_pages,
- write_bytes, pages, &i);
+ write_bytes, pages, i);
/*
* if we have trouble faulting in the pages, fall
@@ -1061,6 +972,13 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
PAGE_CACHE_SIZE - 1) >>
PAGE_CACHE_SHIFT;
+ /*
+ * If we had a short copy we need to release the excess delaloc
+ * bytes we reserved. We need to increment outstanding_extents
+ * because btrfs_delalloc_release_space will decrement it, but
+ * we still have an outstanding extent for the chunk we actually
+ * managed to copy.
+ */
if (num_pages > dirty_pages) {
if (copied > 0)
atomic_inc(
@@ -1071,39 +989,157 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
}
if (copied > 0) {
- dirty_and_release_pages(NULL, root, file, pages,
- dirty_pages, pos, copied);
+ ret = btrfs_dirty_pages(root, inode, pages,
+ dirty_pages, pos, copied,
+ NULL);
+ if (ret) {
+ btrfs_delalloc_release_space(inode,
+ dirty_pages << PAGE_CACHE_SHIFT);
+ btrfs_drop_pages(pages, num_pages);
+ break;
+ }
}
btrfs_drop_pages(pages, num_pages);
- if (copied > 0) {
- if (will_write) {
- filemap_fdatawrite_range(inode->i_mapping, pos,
- pos + copied - 1);
- } else {
- balance_dirty_pages_ratelimited_nr(
- inode->i_mapping,
- dirty_pages);
- if (dirty_pages <
- (root->leafsize >> PAGE_CACHE_SHIFT) + 1)
- btrfs_btree_balance_dirty(root, 1);
- btrfs_throttle(root);
- }
- }
+ cond_resched();
+
+ balance_dirty_pages_ratelimited_nr(inode->i_mapping,
+ dirty_pages);
+ if (dirty_pages < (root->leafsize >> PAGE_CACHE_SHIFT) + 1)
+ btrfs_btree_balance_dirty(root, 1);
+ btrfs_throttle(root);
pos += copied;
num_written += copied;
+ }
- cond_resched();
+ kfree(pages);
+
+ return num_written ? num_written : ret;
+}
+
+static ssize_t __btrfs_direct_write(struct kiocb *iocb,
+ const struct iovec *iov,
+ unsigned long nr_segs, loff_t pos,
+ loff_t *ppos, size_t count, size_t ocount)
+{
+ struct file *file = iocb->ki_filp;
+ struct inode *inode = fdentry(file)->d_inode;
+ struct iov_iter i;
+ ssize_t written;
+ ssize_t written_buffered;
+ loff_t endbyte;
+ int err;
+
+ written = generic_file_direct_write(iocb, iov, &nr_segs, pos, ppos,
+ count, ocount);
+
+ /*
+ * the generic O_DIRECT will update in-memory i_size after the
+ * DIOs are done. But our endio handlers that update the on
+ * disk i_size never update past the in memory i_size. So we
+ * need one more update here to catch any additions to the
+ * file
+ */
+ if (inode->i_size != BTRFS_I(inode)->disk_i_size) {
+ btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
+ mark_inode_dirty(inode);
}
+
+ if (written < 0 || written == count)
+ return written;
+
+ pos += written;
+ count -= written;
+ iov_iter_init(&i, iov, nr_segs, count, written);
+ written_buffered = __btrfs_buffered_write(file, &i, pos);
+ if (written_buffered < 0) {
+ err = written_buffered;
+ goto out;
+ }
+ endbyte = pos + written_buffered - 1;
+ err = filemap_write_and_wait_range(file->f_mapping, pos, endbyte);
+ if (err)
+ goto out;
+ written += written_buffered;
+ *ppos = pos + written_buffered;
+ invalidate_mapping_pages(file->f_mapping, pos >> PAGE_CACHE_SHIFT,
+ endbyte >> PAGE_CACHE_SHIFT);
out:
- mutex_unlock(&inode->i_mutex);
- if (ret)
- err = ret;
+ return written ? written : err;
+}
- kfree(pages);
- *ppos = pos;
+static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
+ const struct iovec *iov,
+ unsigned long nr_segs, loff_t pos)
+{
+ struct file *file = iocb->ki_filp;
+ struct inode *inode = fdentry(file)->d_inode;
+ struct btrfs_root *root = BTRFS_I(inode)->root;
+ loff_t *ppos = &iocb->ki_pos;
+ ssize_t num_written = 0;
+ ssize_t err = 0;
+ size_t count, ocount;
+
+ vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
+
+ mutex_lock(&inode->i_mutex);
+
+ err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ);
+ if (err) {
+ mutex_unlock(&inode->i_mutex);
+ goto out;
+ }
+ count = ocount;
+
+ current->backing_dev_info = inode->i_mapping->backing_dev_info;
+ err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
+ if (err) {
+ mutex_unlock(&inode->i_mutex);
+ goto out;
+ }
+
+ if (count == 0) {
+ mutex_unlock(&inode->i_mutex);
+ goto out;
+ }
+
+ err = file_remove_suid(file);
+ if (err) {
+ mutex_unlock(&inode->i_mutex);
+ goto out;
+ }
+
+ /*
+ * If BTRFS flips readonly due to some impossible error
+ * (fs_info->fs_state now has BTRFS_SUPER_FLAG_ERROR),
+ * although we have opened a file as writable, we have
+ * to stop this write operation to ensure FS consistency.
+ */
+ if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
+ mutex_unlock(&inode->i_mutex);
+ err = -EROFS;
+ goto out;
+ }
+
+ file_update_time(file);
+ BTRFS_I(inode)->sequence++;
+
+ if (unlikely(file->f_flags & O_DIRECT)) {
+ num_written = __btrfs_direct_write(iocb, iov, nr_segs,
+ pos, ppos, count, ocount);
+ } else {
+ struct iov_iter i;
+
+ iov_iter_init(&i, iov, nr_segs, count, num_written);
+
+ num_written = __btrfs_buffered_write(file, &i, pos);
+ if (num_written > 0)
+ *ppos = pos + num_written;
+ }
+
+ mutex_unlock(&inode->i_mutex);
/*
* we want to make sure fsync finds this change
@@ -1118,43 +1154,12 @@ out:
* one running right now.
*/
BTRFS_I(inode)->last_trans = root->fs_info->generation + 1;
-
- if (num_written > 0 && will_write) {
- struct btrfs_trans_handle *trans;
-
- err = btrfs_wait_ordered_range(inode, start_pos, num_written);
- if (err)
+ if (num_written > 0 || num_written == -EIOCBQUEUED) {
+ err = generic_write_sync(file, pos, num_written);
+ if (err < 0 && num_written > 0)
num_written = err;
-
- if ((file->f_flags & O_DSYNC) || IS_SYNC(inode)) {
- trans = btrfs_start_transaction(root, 0);
- if (IS_ERR(trans)) {
- num_written = PTR_ERR(trans);
- goto done;
- }
- mutex_lock(&inode->i_mutex);
- ret = btrfs_log_dentry_safe(trans, root,
- file->f_dentry);
- mutex_unlock(&inode->i_mutex);
- if (ret == 0) {
- ret = btrfs_sync_log(trans, root);
- if (ret == 0)
- btrfs_end_transaction(trans, root);
- else
- btrfs_commit_transaction(trans, root);
- } else if (ret != BTRFS_NO_LOG_SYNC) {
- btrfs_commit_transaction(trans, root);
- } else {
- btrfs_end_transaction(trans, root);
- }
- }
- if (file->f_flags & O_DIRECT && buffered) {
- invalidate_mapping_pages(inode->i_mapping,
- start_pos >> PAGE_CACHE_SHIFT,
- (start_pos + num_written - 1) >> PAGE_CACHE_SHIFT);
- }
}
-done:
+out:
current->backing_dev_info = NULL;
return num_written ? num_written : err;
}
@@ -1197,6 +1202,7 @@ int btrfs_sync_file(struct file *file, int datasync)
int ret = 0;
struct btrfs_trans_handle *trans;
+ trace_btrfs_sync_file(file, datasync);
/* we wait first, since the writeback may change the inode */
root->log_batch++;
@@ -1324,7 +1330,8 @@ static long btrfs_fallocate(struct file *file, int mode,
goto out;
if (alloc_start > inode->i_size) {
- ret = btrfs_cont_expand(inode, alloc_start);
+ ret = btrfs_cont_expand(inode, i_size_read(inode),
+ alloc_start);
if (ret)
goto out;
}
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index a039065..11d2e9c 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -24,6 +24,7 @@
#include "free-space-cache.h"
#include "transaction.h"
#include "disk-io.h"
+#include "extent_io.h"
#define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8)
#define MAX_CACHE_BYTES_PER_GIG (32 * 1024)
@@ -81,6 +82,8 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root,
return ERR_PTR(-ENOENT);
}
+ inode->i_mapping->flags &= ~__GFP_FS;
+
spin_lock(&block_group->lock);
if (!root->fs_info->closing) {
block_group->inode = igrab(inode);
@@ -222,6 +225,7 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
u64 num_entries;
u64 num_bitmaps;
u64 generation;
+ u64 used = btrfs_block_group_used(&block_group->item);
u32 cur_crc = ~(u32)0;
pgoff_t index = 0;
unsigned long first_page_offset;
@@ -393,7 +397,8 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
break;
need_loop = 1;
- e = kzalloc(sizeof(struct btrfs_free_space), GFP_NOFS);
+ e = kmem_cache_zalloc(btrfs_free_space_cachep,
+ GFP_NOFS);
if (!e) {
kunmap(page);
unlock_page(page);
@@ -405,7 +410,7 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
e->bytes = le64_to_cpu(entry->bytes);
if (!e->bytes) {
kunmap(page);
- kfree(e);
+ kmem_cache_free(btrfs_free_space_cachep, e);
unlock_page(page);
page_cache_release(page);
goto free_cache;
@@ -420,7 +425,8 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
if (!e->bitmap) {
kunmap(page);
- kfree(e);
+ kmem_cache_free(
+ btrfs_free_space_cachep, e);
unlock_page(page);
page_cache_release(page);
goto free_cache;
@@ -465,6 +471,17 @@ next:
index++;
}
+ spin_lock(&block_group->tree_lock);
+ if (block_group->free_space != (block_group->key.offset - used -
+ block_group->bytes_super)) {
+ spin_unlock(&block_group->tree_lock);
+ printk(KERN_ERR "block group %llu has an wrong amount of free "
+ "space\n", block_group->key.objectid);
+ ret = 0;
+ goto free_cache;
+ }
+ spin_unlock(&block_group->tree_lock);
+
ret = 1;
out:
kfree(checksums);
@@ -491,18 +508,23 @@ int btrfs_write_out_cache(struct btrfs_root *root,
struct inode *inode;
struct rb_node *node;
struct list_head *pos, *n;
+ struct page **pages;
struct page *page;
struct extent_state *cached_state = NULL;
+ struct btrfs_free_cluster *cluster = NULL;
+ struct extent_io_tree *unpin = NULL;
struct list_head bitmap_list;
struct btrfs_key key;
+ u64 start, end, len;
u64 bytes = 0;
u32 *crc, *checksums;
- pgoff_t index = 0, last_index = 0;
unsigned long first_page_offset;
- int num_checksums;
+ int index = 0, num_pages = 0;
int entries = 0;
int bitmaps = 0;
int ret = 0;
+ bool next_page = false;
+ bool out_of_space = false;
root = root->fs_info->tree_root;
@@ -530,24 +552,43 @@ int btrfs_write_out_cache(struct btrfs_root *root,
return 0;
}
- last_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT;
+ num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
+ PAGE_CACHE_SHIFT;
filemap_write_and_wait(inode->i_mapping);
btrfs_wait_ordered_range(inode, inode->i_size &
~(root->sectorsize - 1), (u64)-1);
/* We need a checksum per page. */
- num_checksums = i_size_read(inode) / PAGE_CACHE_SIZE;
- crc = checksums = kzalloc(sizeof(u32) * num_checksums, GFP_NOFS);
+ crc = checksums = kzalloc(sizeof(u32) * num_pages, GFP_NOFS);
if (!crc) {
iput(inode);
return 0;
}
+ pages = kzalloc(sizeof(struct page *) * num_pages, GFP_NOFS);
+ if (!pages) {
+ kfree(crc);
+ iput(inode);
+ return 0;
+ }
+
/* Since the first page has all of our checksums and our generation we
* need to calculate the offset into the page that we can start writing
* our entries.
*/
- first_page_offset = (sizeof(u32) * num_checksums) + sizeof(u64);
+ first_page_offset = (sizeof(u32) * num_pages) + sizeof(u64);
+
+ /* Get the cluster for this block_group if it exists */
+ if (!list_empty(&block_group->cluster_list))
+ cluster = list_entry(block_group->cluster_list.next,
+ struct btrfs_free_cluster,
+ block_group_list);
+
+ /*
+ * We shouldn't have switched the pinned extents yet so this is the
+ * right one
+ */
+ unpin = root->fs_info->pinned_extents;
/*
* Lock all pages first so we can lock the extent safely.
@@ -557,20 +598,18 @@ int btrfs_write_out_cache(struct btrfs_root *root,
* after find_get_page at this point. Just putting this here so people
* know and don't freak out.
*/
- while (index <= last_index) {
+ while (index < num_pages) {
page = grab_cache_page(inode->i_mapping, index);
if (!page) {
- pgoff_t i = 0;
+ int i;
- while (i < index) {
- page = find_get_page(inode->i_mapping, i);
- unlock_page(page);
- page_cache_release(page);
- page_cache_release(page);
- i++;
+ for (i = 0; i < num_pages; i++) {
+ unlock_page(pages[i]);
+ page_cache_release(pages[i]);
}
goto out_free;
}
+ pages[index] = page;
index++;
}
@@ -578,6 +617,12 @@ int btrfs_write_out_cache(struct btrfs_root *root,
lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
0, &cached_state, GFP_NOFS);
+ /*
+ * When searching for pinned extents, we need to start at our start
+ * offset.
+ */
+ start = block_group->key.objectid;
+
/* Write out the extent entries */
do {
struct btrfs_free_space_entry *entry;
@@ -585,18 +630,25 @@ int btrfs_write_out_cache(struct btrfs_root *root,
unsigned long offset = 0;
unsigned long start_offset = 0;
+ next_page = false;
+
if (index == 0) {
start_offset = first_page_offset;
offset = start_offset;
}
- page = find_get_page(inode->i_mapping, index);
+ if (index >= num_pages) {
+ out_of_space = true;
+ break;
+ }
+
+ page = pages[index];
addr = kmap(page);
entry = addr + start_offset;
memset(addr, 0, PAGE_CACHE_SIZE);
- while (1) {
+ while (node && !next_page) {
struct btrfs_free_space *e;
e = rb_entry(node, struct btrfs_free_space, offset_index);
@@ -612,12 +664,49 @@ int btrfs_write_out_cache(struct btrfs_root *root,
entry->type = BTRFS_FREE_SPACE_EXTENT;
}
node = rb_next(node);
- if (!node)
- break;
+ if (!node && cluster) {
+ node = rb_first(&cluster->root);
+ cluster = NULL;
+ }
offset += sizeof(struct btrfs_free_space_entry);
if (offset + sizeof(struct btrfs_free_space_entry) >=
PAGE_CACHE_SIZE)
+ next_page = true;
+ entry++;
+ }
+
+ /*
+ * We want to add any pinned extents to our free space cache
+ * so we don't leak the space
+ */
+ while (!next_page && (start < block_group->key.objectid +
+ block_group->key.offset)) {
+ ret = find_first_extent_bit(unpin, start, &start, &end,
+ EXTENT_DIRTY);
+ if (ret) {
+ ret = 0;
+ break;
+ }
+
+ /* This pinned extent is out of our range */
+ if (start >= block_group->key.objectid +
+ block_group->key.offset)
break;
+
+ len = block_group->key.objectid +
+ block_group->key.offset - start;
+ len = min(len, end + 1 - start);
+
+ entries++;
+ entry->offset = cpu_to_le64(start);
+ entry->bytes = cpu_to_le64(len);
+ entry->type = BTRFS_FREE_SPACE_EXTENT;
+
+ start = end + 1;
+ offset += sizeof(struct btrfs_free_space_entry);
+ if (offset + sizeof(struct btrfs_free_space_entry) >=
+ PAGE_CACHE_SIZE)
+ next_page = true;
entry++;
}
*crc = ~(u32)0;
@@ -630,25 +719,8 @@ int btrfs_write_out_cache(struct btrfs_root *root,
bytes += PAGE_CACHE_SIZE;
- ClearPageChecked(page);
- set_page_extent_mapped(page);
- SetPageUptodate(page);
- set_page_dirty(page);
-
- /*
- * We need to release our reference we got for grab_cache_page,
- * except for the first page which will hold our checksums, we
- * do that below.
- */
- if (index != 0) {
- unlock_page(page);
- page_cache_release(page);
- }
-
- page_cache_release(page);
-
index++;
- } while (node);
+ } while (node || next_page);
/* Write out the bitmaps */
list_for_each_safe(pos, n, &bitmap_list) {
@@ -656,7 +728,11 @@ int btrfs_write_out_cache(struct btrfs_root *root,
struct btrfs_free_space *entry =
list_entry(pos, struct btrfs_free_space, list);
- page = find_get_page(inode->i_mapping, index);
+ if (index >= num_pages) {
+ out_of_space = true;
+ break;
+ }
+ page = pages[index];
addr = kmap(page);
memcpy(addr, entry->bitmap, PAGE_CACHE_SIZE);
@@ -667,64 +743,58 @@ int btrfs_write_out_cache(struct btrfs_root *root,
crc++;
bytes += PAGE_CACHE_SIZE;
- ClearPageChecked(page);
- set_page_extent_mapped(page);
- SetPageUptodate(page);
- set_page_dirty(page);
- unlock_page(page);
- page_cache_release(page);
- page_cache_release(page);
list_del_init(&entry->list);
index++;
}
+ if (out_of_space) {
+ btrfs_drop_pages(pages, num_pages);
+ unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
+ i_size_read(inode) - 1, &cached_state,
+ GFP_NOFS);
+ ret = 0;
+ goto out_free;
+ }
+
/* Zero out the rest of the pages just to make sure */
- while (index <= last_index) {
+ while (index < num_pages) {
void *addr;
- page = find_get_page(inode->i_mapping, index);
-
+ page = pages[index];
addr = kmap(page);
memset(addr, 0, PAGE_CACHE_SIZE);
kunmap(page);
- ClearPageChecked(page);
- set_page_extent_mapped(page);
- SetPageUptodate(page);
- set_page_dirty(page);
- unlock_page(page);
- page_cache_release(page);
- page_cache_release(page);
bytes += PAGE_CACHE_SIZE;
index++;
}
- btrfs_set_extent_delalloc(inode, 0, bytes - 1, &cached_state);
-
/* Write the checksums and trans id to the first page */
{
void *addr;
u64 *gen;
- page = find_get_page(inode->i_mapping, 0);
+ page = pages[0];
addr = kmap(page);
- memcpy(addr, checksums, sizeof(u32) * num_checksums);
- gen = addr + (sizeof(u32) * num_checksums);
+ memcpy(addr, checksums, sizeof(u32) * num_pages);
+ gen = addr + (sizeof(u32) * num_pages);
*gen = trans->transid;
kunmap(page);
- ClearPageChecked(page);
- set_page_extent_mapped(page);
- SetPageUptodate(page);
- set_page_dirty(page);
- unlock_page(page);
- page_cache_release(page);
- page_cache_release(page);
}
- BTRFS_I(inode)->generation = trans->transid;
+ ret = btrfs_dirty_pages(root, inode, pages, num_pages, 0,
+ bytes, &cached_state);
+ btrfs_drop_pages(pages, num_pages);
unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
i_size_read(inode) - 1, &cached_state, GFP_NOFS);
+ if (ret) {
+ ret = 0;
+ goto out_free;
+ }
+
+ BTRFS_I(inode)->generation = trans->transid;
+
filemap_write_and_wait(inode->i_mapping);
key.objectid = BTRFS_FREE_SPACE_OBJECTID;
@@ -775,6 +845,7 @@ out_free:
BTRFS_I(inode)->generation = 0;
}
kfree(checksums);
+ kfree(pages);
btrfs_update_inode(trans, root, inode);
iput(inode);
return ret;
@@ -1187,7 +1258,7 @@ static void free_bitmap(struct btrfs_block_group_cache *block_group,
{
unlink_free_space(block_group, bitmap_info);
kfree(bitmap_info->bitmap);
- kfree(bitmap_info);
+ kmem_cache_free(btrfs_free_space_cachep, bitmap_info);
block_group->total_bitmaps--;
recalculate_thresholds(block_group);
}
@@ -1285,9 +1356,22 @@ static int insert_into_bitmap(struct btrfs_block_group_cache *block_group,
* If we are below the extents threshold then we can add this as an
* extent, and don't have to deal with the bitmap
*/
- if (block_group->free_extents < block_group->extents_thresh &&
- info->bytes > block_group->sectorsize * 4)
- return 0;
+ if (block_group->free_extents < block_group->extents_thresh) {
+ /*
+ * If this block group has some small extents we don't want to
+ * use up all of our free slots in the cache with them, we want
+ * to reserve them to larger extents, however if we have plent
+ * of cache left then go ahead an dadd them, no sense in adding
+ * the overhead of a bitmap if we don't have to.
+ */
+ if (info->bytes <= block_group->sectorsize * 4) {
+ if (block_group->free_extents * 2 <=
+ block_group->extents_thresh)
+ return 0;
+ } else {
+ return 0;
+ }
+ }
/*
* some block groups are so tiny they can't be enveloped by a bitmap, so
@@ -1342,8 +1426,8 @@ new_bitmap:
/* no pre-allocated info, allocate a new one */
if (!info) {
- info = kzalloc(sizeof(struct btrfs_free_space),
- GFP_NOFS);
+ info = kmem_cache_zalloc(btrfs_free_space_cachep,
+ GFP_NOFS);
if (!info) {
spin_lock(&block_group->tree_lock);
ret = -ENOMEM;
@@ -1365,7 +1449,7 @@ out:
if (info) {
if (info->bitmap)
kfree(info->bitmap);
- kfree(info);
+ kmem_cache_free(btrfs_free_space_cachep, info);
}
return ret;
@@ -1398,7 +1482,7 @@ bool try_merge_free_space(struct btrfs_block_group_cache *block_group,
else
__unlink_free_space(block_group, right_info);
info->bytes += right_info->bytes;
- kfree(right_info);
+ kmem_cache_free(btrfs_free_space_cachep, right_info);
merged = true;
}
@@ -1410,7 +1494,7 @@ bool try_merge_free_space(struct btrfs_block_group_cache *block_group,
__unlink_free_space(block_group, left_info);
info->offset = left_info->offset;
info->bytes += left_info->bytes;
- kfree(left_info);
+ kmem_cache_free(btrfs_free_space_cachep, left_info);
merged = true;
}
@@ -1423,7 +1507,7 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
struct btrfs_free_space *info;
int ret = 0;
- info = kzalloc(sizeof(struct btrfs_free_space), GFP_NOFS);
+ info = kmem_cache_zalloc(btrfs_free_space_cachep, GFP_NOFS);
if (!info)
return -ENOMEM;
@@ -1450,7 +1534,7 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
link:
ret = link_free_space(block_group, info);
if (ret)
- kfree(info);
+ kmem_cache_free(btrfs_free_space_cachep, info);
out:
spin_unlock(&block_group->tree_lock);
@@ -1520,7 +1604,7 @@ again:
kfree(info->bitmap);
block_group->total_bitmaps--;
}
- kfree(info);
+ kmem_cache_free(btrfs_free_space_cachep, info);
goto out_lock;
}
@@ -1556,7 +1640,7 @@ again:
/* the hole we're creating ends at the end
* of the info struct, just free the info
*/
- kfree(info);
+ kmem_cache_free(btrfs_free_space_cachep, info);
}
spin_unlock(&block_group->tree_lock);
@@ -1629,30 +1713,28 @@ __btrfs_return_cluster_to_free_space(
{
struct btrfs_free_space *entry;
struct rb_node *node;
- bool bitmap;
spin_lock(&cluster->lock);
if (cluster->block_group != block_group)
goto out;
- bitmap = cluster->points_to_bitmap;
cluster->block_group = NULL;
cluster->window_start = 0;
list_del_init(&cluster->block_group_list);
- cluster->points_to_bitmap = false;
-
- if (bitmap)
- goto out;
node = rb_first(&cluster->root);
while (node) {
+ bool bitmap;
+
entry = rb_entry(node, struct btrfs_free_space, offset_index);
node = rb_next(&entry->offset_index);
rb_erase(&entry->offset_index, &cluster->root);
- BUG_ON(entry->bitmap);
- try_merge_free_space(block_group, entry, false);
+
+ bitmap = (entry->bitmap != NULL);
+ if (!bitmap)
+ try_merge_free_space(block_group, entry, false);
tree_insert_offset(&block_group->free_space_offset,
- entry->offset, &entry->offset_index, 0);
+ entry->offset, &entry->offset_index, bitmap);
}
cluster->root = RB_ROOT;
@@ -1689,7 +1771,7 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
unlink_free_space(block_group, info);
if (info->bitmap)
kfree(info->bitmap);
- kfree(info);
+ kmem_cache_free(btrfs_free_space_cachep, info);
if (need_resched()) {
spin_unlock(&block_group->tree_lock);
cond_resched();
@@ -1722,7 +1804,7 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
entry->offset += bytes;
entry->bytes -= bytes;
if (!entry->bytes)
- kfree(entry);
+ kmem_cache_free(btrfs_free_space_cachep, entry);
else
link_free_space(block_group, entry);
}
@@ -1775,50 +1857,24 @@ int btrfs_return_cluster_to_free_space(
static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
struct btrfs_free_cluster *cluster,
+ struct btrfs_free_space *entry,
u64 bytes, u64 min_start)
{
- struct btrfs_free_space *entry;
int err;
u64 search_start = cluster->window_start;
u64 search_bytes = bytes;
u64 ret = 0;
- spin_lock(&block_group->tree_lock);
- spin_lock(&cluster->lock);
-
- if (!cluster->points_to_bitmap)
- goto out;
-
- if (cluster->block_group != block_group)
- goto out;
-
- /*
- * search_start is the beginning of the bitmap, but at some point it may
- * be a good idea to point to the actual start of the free area in the
- * bitmap, so do the offset_to_bitmap trick anyway, and set bitmap_only
- * to 1 to make sure we get the bitmap entry
- */
- entry = tree_search_offset(block_group,
- offset_to_bitmap(block_group, search_start),
- 1, 0);
- if (!entry || !entry->bitmap)
- goto out;
-
search_start = min_start;
search_bytes = bytes;
err = search_bitmap(block_group, entry, &search_start,
&search_bytes);
if (err)
- goto out;
+ return 0;
ret = search_start;
bitmap_clear_bits(block_group, entry, ret, bytes);
- if (entry->bytes == 0)
- free_bitmap(block_group, entry);
-out:
- spin_unlock(&cluster->lock);
- spin_unlock(&block_group->tree_lock);
return ret;
}
@@ -1836,10 +1892,6 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
struct rb_node *node;
u64 ret = 0;
- if (cluster->points_to_bitmap)
- return btrfs_alloc_from_bitmap(block_group, cluster, bytes,
- min_start);
-
spin_lock(&cluster->lock);
if (bytes > cluster->max_size)
goto out;
@@ -1852,9 +1904,9 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
goto out;
entry = rb_entry(node, struct btrfs_free_space, offset_index);
-
while(1) {
- if (entry->bytes < bytes || entry->offset < min_start) {
+ if (entry->bytes < bytes ||
+ (!entry->bitmap && entry->offset < min_start)) {
struct rb_node *node;
node = rb_next(&entry->offset_index);
@@ -1864,10 +1916,27 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
offset_index);
continue;
}
- ret = entry->offset;
- entry->offset += bytes;
- entry->bytes -= bytes;
+ if (entry->bitmap) {
+ ret = btrfs_alloc_from_bitmap(block_group,
+ cluster, entry, bytes,
+ min_start);
+ if (ret == 0) {
+ struct rb_node *node;
+ node = rb_next(&entry->offset_index);
+ if (!node)
+ break;
+ entry = rb_entry(node, struct btrfs_free_space,
+ offset_index);
+ continue;
+ }
+ } else {
+
+ ret = entry->offset;
+
+ entry->offset += bytes;
+ entry->bytes -= bytes;
+ }
if (entry->bytes == 0)
rb_erase(&entry->offset_index, &cluster->root);
@@ -1884,7 +1953,12 @@ out:
block_group->free_space -= bytes;
if (entry->bytes == 0) {
block_group->free_extents--;
- kfree(entry);
+ if (entry->bitmap) {
+ kfree(entry->bitmap);
+ block_group->total_bitmaps--;
+ recalculate_thresholds(block_group);
+ }
+ kmem_cache_free(btrfs_free_space_cachep, entry);
}
spin_unlock(&block_group->tree_lock);
@@ -1904,12 +1978,13 @@ static int btrfs_bitmap_cluster(struct btrfs_block_group_cache *block_group,
unsigned long found_bits;
unsigned long start = 0;
unsigned long total_found = 0;
+ int ret;
bool found = false;
i = offset_to_bit(entry->offset, block_group->sectorsize,
max_t(u64, offset, entry->offset));
- search_bits = bytes_to_bits(min_bytes, block_group->sectorsize);
- total_bits = bytes_to_bits(bytes, block_group->sectorsize);
+ search_bits = bytes_to_bits(bytes, block_group->sectorsize);
+ total_bits = bytes_to_bits(min_bytes, block_group->sectorsize);
again:
found_bits = 0;
@@ -1926,7 +2001,7 @@ again:
}
if (!found_bits)
- return -1;
+ return -ENOSPC;
if (!found) {
start = i;
@@ -1950,189 +2025,208 @@ again:
cluster->window_start = start * block_group->sectorsize +
entry->offset;
- cluster->points_to_bitmap = true;
+ rb_erase(&entry->offset_index, &block_group->free_space_offset);
+ ret = tree_insert_offset(&cluster->root, entry->offset,
+ &entry->offset_index, 1);
+ BUG_ON(ret);
return 0;
}
/*
- * here we try to find a cluster of blocks in a block group. The goal
- * is to find at least bytes free and up to empty_size + bytes free.
- * We might not find them all in one contiguous area.
- *
- * returns zero and sets up cluster if things worked out, otherwise
- * it returns -enospc
+ * This searches the block group for just extents to fill the cluster with.
*/
-int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_block_group_cache *block_group,
- struct btrfs_free_cluster *cluster,
- u64 offset, u64 bytes, u64 empty_size)
+static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
+ struct btrfs_free_cluster *cluster,
+ u64 offset, u64 bytes, u64 min_bytes)
{
+ struct btrfs_free_space *first = NULL;
struct btrfs_free_space *entry = NULL;
+ struct btrfs_free_space *prev = NULL;
+ struct btrfs_free_space *last;
struct rb_node *node;
- struct btrfs_free_space *next;
- struct btrfs_free_space *last = NULL;
- u64 min_bytes;
u64 window_start;
u64 window_free;
- u64 max_extent = 0;
- bool found_bitmap = false;
- int ret;
+ u64 max_extent;
+ u64 max_gap = 128 * 1024;
- /* for metadata, allow allocates with more holes */
- if (btrfs_test_opt(root, SSD_SPREAD)) {
- min_bytes = bytes + empty_size;
- } else if (block_group->flags & BTRFS_BLOCK_GROUP_METADATA) {
- /*
- * we want to do larger allocations when we are
- * flushing out the delayed refs, it helps prevent
- * making more work as we go along.
- */
- if (trans->transaction->delayed_refs.flushing)
- min_bytes = max(bytes, (bytes + empty_size) >> 1);
- else
- min_bytes = max(bytes, (bytes + empty_size) >> 4);
- } else
- min_bytes = max(bytes, (bytes + empty_size) >> 2);
-
- spin_lock(&block_group->tree_lock);
- spin_lock(&cluster->lock);
-
- /* someone already found a cluster, hooray */
- if (cluster->block_group) {
- ret = 0;
- goto out;
- }
-again:
- entry = tree_search_offset(block_group, offset, found_bitmap, 1);
- if (!entry) {
- ret = -ENOSPC;
- goto out;
- }
+ entry = tree_search_offset(block_group, offset, 0, 1);
+ if (!entry)
+ return -ENOSPC;
/*
- * If found_bitmap is true, we exhausted our search for extent entries,
- * and we just want to search all of the bitmaps that we can find, and
- * ignore any extent entries we find.
+ * We don't want bitmaps, so just move along until we find a normal
+ * extent entry.
*/
- while (entry->bitmap || found_bitmap ||
- (!entry->bitmap && entry->bytes < min_bytes)) {
- struct rb_node *node = rb_next(&entry->offset_index);
-
- if (entry->bitmap && entry->bytes > bytes + empty_size) {
- ret = btrfs_bitmap_cluster(block_group, entry, cluster,
- offset, bytes + empty_size,
- min_bytes);
- if (!ret)
- goto got_it;
- }
-
- if (!node) {
- ret = -ENOSPC;
- goto out;
- }
+ while (entry->bitmap) {
+ node = rb_next(&entry->offset_index);
+ if (!node)
+ return -ENOSPC;
entry = rb_entry(node, struct btrfs_free_space, offset_index);
}
- /*
- * We already searched all the extent entries from the passed in offset
- * to the end and didn't find enough space for the cluster, and we also
- * didn't find any bitmaps that met our criteria, just go ahead and exit
- */
- if (found_bitmap) {
- ret = -ENOSPC;
- goto out;
- }
-
- cluster->points_to_bitmap = false;
window_start = entry->offset;
window_free = entry->bytes;
- last = entry;
max_extent = entry->bytes;
+ first = entry;
+ last = entry;
+ prev = entry;
- while (1) {
- /* out window is just right, lets fill it */
- if (window_free >= bytes + empty_size)
- break;
-
- node = rb_next(&last->offset_index);
- if (!node) {
- if (found_bitmap)
- goto again;
- ret = -ENOSPC;
- goto out;
- }
- next = rb_entry(node, struct btrfs_free_space, offset_index);
+ while (window_free <= min_bytes) {
+ node = rb_next(&entry->offset_index);
+ if (!node)
+ return -ENOSPC;
+ entry = rb_entry(node, struct btrfs_free_space, offset_index);
- /*
- * we found a bitmap, so if this search doesn't result in a
- * cluster, we know to go and search again for the bitmaps and
- * start looking for space there
- */
- if (next->bitmap) {
- if (!found_bitmap)
- offset = next->offset;
- found_bitmap = true;
- last = next;
+ if (entry->bitmap)
continue;
- }
-
/*
* we haven't filled the empty size and the window is
* very large. reset and try again
*/
- if (next->offset - (last->offset + last->bytes) > 128 * 1024 ||
- next->offset - window_start > (bytes + empty_size) * 2) {
- entry = next;
+ if (entry->offset - (prev->offset + prev->bytes) > max_gap ||
+ entry->offset - window_start > (min_bytes * 2)) {
+ first = entry;
window_start = entry->offset;
window_free = entry->bytes;
last = entry;
max_extent = entry->bytes;
} else {
- last = next;
- window_free += next->bytes;
+ last = entry;
+ window_free += entry->bytes;
if (entry->bytes > max_extent)
max_extent = entry->bytes;
}
+ prev = entry;
}
- cluster->window_start = entry->offset;
+ cluster->window_start = first->offset;
+
+ node = &first->offset_index;
/*
* now we've found our entries, pull them out of the free space
* cache and put them into the cluster rbtree
- *
- * The cluster includes an rbtree, but only uses the offset index
- * of each free space cache entry.
*/
- while (1) {
+ do {
+ int ret;
+
+ entry = rb_entry(node, struct btrfs_free_space, offset_index);
node = rb_next(&entry->offset_index);
- if (entry->bitmap && node) {
- entry = rb_entry(node, struct btrfs_free_space,
- offset_index);
+ if (entry->bitmap)
continue;
- } else if (entry->bitmap && !node) {
- break;
- }
rb_erase(&entry->offset_index, &block_group->free_space_offset);
ret = tree_insert_offset(&cluster->root, entry->offset,
&entry->offset_index, 0);
BUG_ON(ret);
+ } while (node && entry != last);
- if (!node || entry == last)
- break;
+ cluster->max_size = max_extent;
+ return 0;
+}
+
+/*
+ * This specifically looks for bitmaps that may work in the cluster, we assume
+ * that we have already failed to find extents that will work.
+ */
+static int setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
+ struct btrfs_free_cluster *cluster,
+ u64 offset, u64 bytes, u64 min_bytes)
+{
+ struct btrfs_free_space *entry;
+ struct rb_node *node;
+ int ret = -ENOSPC;
+
+ if (block_group->total_bitmaps == 0)
+ return -ENOSPC;
+
+ entry = tree_search_offset(block_group,
+ offset_to_bitmap(block_group, offset),
+ 0, 1);
+ if (!entry)
+ return -ENOSPC;
+
+ node = &entry->offset_index;
+ do {
entry = rb_entry(node, struct btrfs_free_space, offset_index);
+ node = rb_next(&entry->offset_index);
+ if (!entry->bitmap)
+ continue;
+ if (entry->bytes < min_bytes)
+ continue;
+ ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset,
+ bytes, min_bytes);
+ } while (ret && node);
+
+ return ret;
+}
+
+/*
+ * here we try to find a cluster of blocks in a block group. The goal
+ * is to find at least bytes free and up to empty_size + bytes free.
+ * We might not find them all in one contiguous area.
+ *
+ * returns zero and sets up cluster if things worked out, otherwise
+ * it returns -enospc
+ */
+int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_block_group_cache *block_group,
+ struct btrfs_free_cluster *cluster,
+ u64 offset, u64 bytes, u64 empty_size)
+{
+ u64 min_bytes;
+ int ret;
+
+ /* for metadata, allow allocates with more holes */
+ if (btrfs_test_opt(root, SSD_SPREAD)) {
+ min_bytes = bytes + empty_size;
+ } else if (block_group->flags & BTRFS_BLOCK_GROUP_METADATA) {
+ /*
+ * we want to do larger allocations when we are
+ * flushing out the delayed refs, it helps prevent
+ * making more work as we go along.
+ */
+ if (trans->transaction->delayed_refs.flushing)
+ min_bytes = max(bytes, (bytes + empty_size) >> 1);
+ else
+ min_bytes = max(bytes, (bytes + empty_size) >> 4);
+ } else
+ min_bytes = max(bytes, (bytes + empty_size) >> 2);
+
+ spin_lock(&block_group->tree_lock);
+
+ /*
+ * If we know we don't have enough space to make a cluster don't even
+ * bother doing all the work to try and find one.
+ */
+ if (block_group->free_space < min_bytes) {
+ spin_unlock(&block_group->tree_lock);
+ return -ENOSPC;
}
- cluster->max_size = max_extent;
-got_it:
- ret = 0;
- atomic_inc(&block_group->count);
- list_add_tail(&cluster->block_group_list, &block_group->cluster_list);
- cluster->block_group = block_group;
+ spin_lock(&cluster->lock);
+
+ /* someone already found a cluster, hooray */
+ if (cluster->block_group) {
+ ret = 0;
+ goto out;
+ }
+
+ ret = setup_cluster_no_bitmap(block_group, cluster, offset, bytes,
+ min_bytes);
+ if (ret)
+ ret = setup_cluster_bitmap(block_group, cluster, offset,
+ bytes, min_bytes);
+
+ if (!ret) {
+ atomic_inc(&block_group->count);
+ list_add_tail(&cluster->block_group_list,
+ &block_group->cluster_list);
+ cluster->block_group = block_group;
+ }
out:
spin_unlock(&cluster->lock);
spin_unlock(&block_group->tree_lock);
@@ -2149,8 +2243,99 @@ void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster)
spin_lock_init(&cluster->refill_lock);
cluster->root = RB_ROOT;
cluster->max_size = 0;
- cluster->points_to_bitmap = false;
INIT_LIST_HEAD(&cluster->block_group_list);
cluster->block_group = NULL;
}
+int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
+ u64 *trimmed, u64 start, u64 end, u64 minlen)
+{
+ struct btrfs_free_space *entry = NULL;
+ struct btrfs_fs_info *fs_info = block_group->fs_info;
+ u64 bytes = 0;
+ u64 actually_trimmed;
+ int ret = 0;
+
+ *trimmed = 0;
+
+ while (start < end) {
+ spin_lock(&block_group->tree_lock);
+
+ if (block_group->free_space < minlen) {
+ spin_unlock(&block_group->tree_lock);
+ break;
+ }
+
+ entry = tree_search_offset(block_group, start, 0, 1);
+ if (!entry)
+ entry = tree_search_offset(block_group,
+ offset_to_bitmap(block_group,
+ start),
+ 1, 1);
+
+ if (!entry || entry->offset >= end) {
+ spin_unlock(&block_group->tree_lock);
+ break;
+ }
+
+ if (entry->bitmap) {
+ ret = search_bitmap(block_group, entry, &start, &bytes);
+ if (!ret) {
+ if (start >= end) {
+ spin_unlock(&block_group->tree_lock);
+ break;
+ }
+ bytes = min(bytes, end - start);
+ bitmap_clear_bits(block_group, entry,
+ start, bytes);
+ if (entry->bytes == 0)
+ free_bitmap(block_group, entry);
+ } else {
+ start = entry->offset + BITS_PER_BITMAP *
+ block_group->sectorsize;
+ spin_unlock(&block_group->tree_lock);
+ ret = 0;
+ continue;
+ }
+ } else {
+ start = entry->offset;
+ bytes = min(entry->bytes, end - start);
+ unlink_free_space(block_group, entry);
+ kfree(entry);
+ }
+
+ spin_unlock(&block_group->tree_lock);
+
+ if (bytes >= minlen) {
+ int update_ret;
+ update_ret = btrfs_update_reserved_bytes(block_group,
+ bytes, 1, 1);
+
+ ret = btrfs_error_discard_extent(fs_info->extent_root,
+ start,
+ bytes,
+ &actually_trimmed);
+
+ btrfs_add_free_space(block_group,
+ start, bytes);
+ if (!update_ret)
+ btrfs_update_reserved_bytes(block_group,
+ bytes, 0, 1);
+
+ if (ret)
+ break;
+ *trimmed += actually_trimmed;
+ }
+ start += bytes;
+ bytes = 0;
+
+ if (fatal_signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+
+ cond_resched();
+ }
+
+ return ret;
+}
diff --git a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h
index e49ca5c..65c3b93 100644
--- a/fs/btrfs/free-space-cache.h
+++ b/fs/btrfs/free-space-cache.h
@@ -68,4 +68,6 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
int btrfs_return_cluster_to_free_space(
struct btrfs_block_group_cache *block_group,
struct btrfs_free_cluster *cluster);
+int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
+ u64 *trimmed, u64 start, u64 end, u64 minlen);
#endif
diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
index c56eb59..c05a08f 100644
--- a/fs/btrfs/inode-map.c
+++ b/fs/btrfs/inode-map.c
@@ -30,7 +30,8 @@ int btrfs_find_highest_inode(struct btrfs_root *root, u64 *objectid)
int slot;
path = btrfs_alloc_path();
- BUG_ON(!path);
+ if (!path)
+ return -ENOMEM;
search_key.objectid = BTRFS_LAST_FREE_OBJECTID;
search_key.type = -1;
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 512c3d1..fcd66b6 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -50,6 +50,7 @@
#include "tree-log.h"
#include "compression.h"
#include "locking.h"
+#include "free-space-cache.h"
struct btrfs_iget_args {
u64 ino;
@@ -70,6 +71,7 @@ static struct kmem_cache *btrfs_inode_cachep;
struct kmem_cache *btrfs_trans_handle_cachep;
struct kmem_cache *btrfs_transaction_cachep;
struct kmem_cache *btrfs_path_cachep;
+struct kmem_cache *btrfs_free_space_cachep;
#define S_SHIFT 12
static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
@@ -82,7 +84,8 @@ static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
[S_IFLNK >> S_SHIFT] = BTRFS_FT_SYMLINK,
};
-static void btrfs_truncate(struct inode *inode);
+static int btrfs_setsize(struct inode *inode, loff_t newsize);
+static int btrfs_truncate(struct inode *inode);
static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end);
static noinline int cow_file_range(struct inode *inode,
struct page *locked_page,
@@ -109,6 +112,7 @@ static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct inode *inode,
u64 start, size_t size, size_t compressed_size,
+ int compress_type,
struct page **compressed_pages)
{
struct btrfs_key key;
@@ -123,12 +127,9 @@ static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
size_t cur_size = size;
size_t datasize;
unsigned long offset;
- int compress_type = BTRFS_COMPRESS_NONE;
- if (compressed_size && compressed_pages) {
- compress_type = root->fs_info->compress_type;
+ if (compressed_size && compressed_pages)
cur_size = compressed_size;
- }
path = btrfs_alloc_path();
if (!path)
@@ -218,7 +219,7 @@ fail:
static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct inode *inode, u64 start, u64 end,
- size_t compressed_size,
+ size_t compressed_size, int compress_type,
struct page **compressed_pages)
{
u64 isize = i_size_read(inode);
@@ -251,7 +252,7 @@ static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans,
inline_len = min_t(u64, isize, actual_end);
ret = insert_inline_extent(trans, root, inode, start,
inline_len, compressed_size,
- compressed_pages);
+ compress_type, compressed_pages);
BUG_ON(ret);
btrfs_delalloc_release_metadata(inode, end + 1 - start);
btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0);
@@ -288,6 +289,7 @@ static noinline int add_async_extent(struct async_cow *cow,
struct async_extent *async_extent;
async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
+ BUG_ON(!async_extent);
async_extent->start = start;
async_extent->ram_size = ram_size;
async_extent->compressed_size = compressed_size;
@@ -382,9 +384,11 @@ again:
*/
if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS) &&
(btrfs_test_opt(root, COMPRESS) ||
- (BTRFS_I(inode)->force_compress))) {
+ (BTRFS_I(inode)->force_compress) ||
+ (BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS))) {
WARN_ON(pages);
pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS);
+ BUG_ON(!pages);
if (BTRFS_I(inode)->force_compress)
compress_type = BTRFS_I(inode)->force_compress;
@@ -427,12 +431,13 @@ again:
* to make an uncompressed inline extent.
*/
ret = cow_file_range_inline(trans, root, inode,
- start, end, 0, NULL);
+ start, end, 0, 0, NULL);
} else {
/* try making a compressed inline extent */
ret = cow_file_range_inline(trans, root, inode,
start, end,
- total_compressed, pages);
+ total_compressed,
+ compress_type, pages);
}
if (ret == 0) {
/*
@@ -786,7 +791,7 @@ static noinline int cow_file_range(struct inode *inode,
if (start == 0) {
/* lets try to make an inline extent */
ret = cow_file_range_inline(trans, root, inode,
- start, end, 0, NULL);
+ start, end, 0, 0, NULL);
if (ret == 0) {
extent_clear_unlock_delalloc(inode,
&BTRFS_I(inode)->io_tree,
@@ -1254,7 +1259,8 @@ static int run_delalloc_range(struct inode *inode, struct page *locked_page,
ret = run_delalloc_nocow(inode, locked_page, start, end,
page_started, 0, nr_written);
else if (!btrfs_test_opt(root, COMPRESS) &&
- !(BTRFS_I(inode)->force_compress))
+ !(BTRFS_I(inode)->force_compress) &&
+ !(BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS))
ret = cow_file_range(inode, locked_page, start, end,
page_started, nr_written, 1);
else
@@ -1461,8 +1467,11 @@ static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
if (bio_flags & EXTENT_BIO_COMPRESSED) {
return btrfs_submit_compressed_read(inode, bio,
mirror_num, bio_flags);
- } else if (!skip_sum)
- btrfs_lookup_bio_sums(root, inode, bio, NULL);
+ } else if (!skip_sum) {
+ ret = btrfs_lookup_bio_sums(root, inode, bio, NULL);
+ if (ret)
+ return ret;
+ }
goto mapit;
} else if (!skip_sum) {
/* csum items have already been cloned */
@@ -1761,9 +1770,12 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
add_pending_csums(trans, inode, ordered_extent->file_offset,
&ordered_extent->list);
- btrfs_ordered_update_i_size(inode, 0, ordered_extent);
- ret = btrfs_update_inode(trans, root, inode);
- BUG_ON(ret);
+ ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
+ if (!ret) {
+ ret = btrfs_update_inode(trans, root, inode);
+ BUG_ON(ret);
+ }
+ ret = 0;
out:
if (nolock) {
if (trans)
@@ -1785,6 +1797,8 @@ out:
static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
struct extent_state *state, int uptodate)
{
+ trace_btrfs_writepage_end_io_hook(page, start, end, uptodate);
+
ClearPagePrivate2(page);
return btrfs_finish_ordered_io(page->mapping->host, start, end);
}
@@ -1895,10 +1909,10 @@ static int btrfs_io_failed_hook(struct bio *failed_bio,
else
rw = READ;
- BTRFS_I(inode)->io_tree.ops->submit_bio_hook(inode, rw, bio,
+ ret = BTRFS_I(inode)->io_tree.ops->submit_bio_hook(inode, rw, bio,
failrec->last_mirror,
failrec->bio_flags, 0);
- return 0;
+ return ret;
}
/*
@@ -2210,8 +2224,6 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
insert = 1;
#endif
insert = 1;
- } else {
- WARN_ON(!BTRFS_I(inode)->orphan_meta_reserved);
}
if (!BTRFS_I(inode)->orphan_meta_reserved) {
@@ -2282,7 +2294,7 @@ int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode)
* this cleans up any orphans that may be left on the list from the last use
* of this root.
*/
-void btrfs_orphan_cleanup(struct btrfs_root *root)
+int btrfs_orphan_cleanup(struct btrfs_root *root)
{
struct btrfs_path *path;
struct extent_buffer *leaf;
@@ -2292,10 +2304,13 @@ void btrfs_orphan_cleanup(struct btrfs_root *root)
int ret = 0, nr_unlink = 0, nr_truncate = 0;
if (cmpxchg(&root->orphan_cleanup_state, 0, ORPHAN_CLEANUP_STARTED))
- return;
+ return 0;
path = btrfs_alloc_path();
- BUG_ON(!path);
+ if (!path) {
+ ret = -ENOMEM;
+ goto out;
+ }
path->reada = -1;
key.objectid = BTRFS_ORPHAN_OBJECTID;
@@ -2304,18 +2319,16 @@ void btrfs_orphan_cleanup(struct btrfs_root *root)
while (1) {
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
- if (ret < 0) {
- printk(KERN_ERR "Error searching slot for orphan: %d"
- "\n", ret);
- break;
- }
+ if (ret < 0)
+ goto out;
/*
* if ret == 0 means we found what we were searching for, which
- * is weird, but possible, so only screw with path if we didnt
+ * is weird, but possible, so only screw with path if we didn't
* find the key and see if we have stuff that matches
*/
if (ret > 0) {
+ ret = 0;
if (path->slots[0] == 0)
break;
path->slots[0]--;
@@ -2343,7 +2356,10 @@ void btrfs_orphan_cleanup(struct btrfs_root *root)
found_key.type = BTRFS_INODE_ITEM_KEY;
found_key.offset = 0;
inode = btrfs_iget(root->fs_info->sb, &found_key, root, NULL);
- BUG_ON(IS_ERR(inode));
+ if (IS_ERR(inode)) {
+ ret = PTR_ERR(inode);
+ goto out;
+ }
/*
* add this inode to the orphan list so btrfs_orphan_del does
@@ -2361,7 +2377,10 @@ void btrfs_orphan_cleanup(struct btrfs_root *root)
*/
if (is_bad_inode(inode)) {
trans = btrfs_start_transaction(root, 0);
- BUG_ON(IS_ERR(trans));
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+ goto out;
+ }
btrfs_orphan_del(trans, inode);
btrfs_end_transaction(trans, root);
iput(inode);
@@ -2370,17 +2389,22 @@ void btrfs_orphan_cleanup(struct btrfs_root *root)
/* if we have links, this was a truncate, lets do that */
if (inode->i_nlink) {
+ if (!S_ISREG(inode->i_mode)) {
+ WARN_ON(1);
+ iput(inode);
+ continue;
+ }
nr_truncate++;
- btrfs_truncate(inode);
+ ret = btrfs_truncate(inode);
} else {
nr_unlink++;
}
/* this will do delete_inode and everything for us */
iput(inode);
+ if (ret)
+ goto out;
}
- btrfs_free_path(path);
-
root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE;
if (root->orphan_block_rsv)
@@ -2389,14 +2413,20 @@ void btrfs_orphan_cleanup(struct btrfs_root *root)
if (root->orphan_block_rsv || root->orphan_item_inserted) {
trans = btrfs_join_transaction(root, 1);
- BUG_ON(IS_ERR(trans));
- btrfs_end_transaction(trans, root);
+ if (!IS_ERR(trans))
+ btrfs_end_transaction(trans, root);
}
if (nr_unlink)
printk(KERN_INFO "btrfs: unlinked %d orphans\n", nr_unlink);
if (nr_truncate)
printk(KERN_INFO "btrfs: truncated %d orphans\n", nr_truncate);
+
+out:
+ if (ret)
+ printk(KERN_CRIT "btrfs: could not do orphan cleanup %d\n", ret);
+ btrfs_free_path(path);
+ return ret;
}
/*
@@ -2563,6 +2593,13 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,
struct btrfs_inode_item *item,
struct inode *inode)
{
+ if (!leaf->map_token)
+ map_private_extent_buffer(leaf, (unsigned long)item,
+ sizeof(struct btrfs_inode_item),
+ &leaf->map_token, &leaf->kaddr,
+ &leaf->map_start, &leaf->map_len,
+ KM_USER1);
+
btrfs_set_inode_uid(leaf, item, inode->i_uid);
btrfs_set_inode_gid(leaf, item, inode->i_gid);
btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size);
@@ -2591,6 +2628,11 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,
btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags);
btrfs_set_inode_block_group(leaf, item, BTRFS_I(inode)->block_group);
+
+ if (leaf->map_token) {
+ unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
+ leaf->map_token = NULL;
+ }
}
/*
@@ -2635,10 +2677,10 @@ failed:
* recovery code. It remove a link in a directory with a given name, and
* also drops the back refs in the inode to the directory
*/
-int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct inode *dir, struct inode *inode,
- const char *name, int name_len)
+static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct inode *dir, struct inode *inode,
+ const char *name, int name_len)
{
struct btrfs_path *path;
int ret = 0;
@@ -2710,12 +2752,25 @@ err:
btrfs_i_size_write(dir, dir->i_size - name_len * 2);
inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME;
btrfs_update_inode(trans, root, dir);
- btrfs_drop_nlink(inode);
- ret = btrfs_update_inode(trans, root, inode);
out:
return ret;
}
+int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct inode *dir, struct inode *inode,
+ const char *name, int name_len)
+{
+ int ret;
+ ret = __btrfs_unlink_inode(trans, root, dir, inode, name, name_len);
+ if (!ret) {
+ btrfs_drop_nlink(inode);
+ ret = btrfs_update_inode(trans, root, inode);
+ }
+ return ret;
+}
+
+
/* helper to check if there is any shared block in the path */
static int check_path_shared(struct btrfs_root *root,
struct btrfs_path *path)
@@ -3537,7 +3592,13 @@ out:
return ret;
}
-int btrfs_cont_expand(struct inode *inode, loff_t size)
+/*
+ * This function puts in dummy file extents for the area we're creating a hole
+ * for. So if we are truncating this file to a larger size we need to insert
+ * these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for
+ * the range between oldsize and size
+ */
+int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
{
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(inode)->root;
@@ -3545,7 +3606,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t size)
struct extent_map *em = NULL;
struct extent_state *cached_state = NULL;
u64 mask = root->sectorsize - 1;
- u64 hole_start = (inode->i_size + mask) & ~mask;
+ u64 hole_start = (oldsize + mask) & ~mask;
u64 block_end = (size + mask) & ~mask;
u64 last_byte;
u64 cur_offset;
@@ -3590,13 +3651,15 @@ int btrfs_cont_expand(struct inode *inode, loff_t size)
err = btrfs_drop_extents(trans, inode, cur_offset,
cur_offset + hole_size,
&hint_byte, 1);
- BUG_ON(err);
+ if (err)
+ break;
err = btrfs_insert_file_extent(trans, root,
inode->i_ino, cur_offset, 0,
0, hole_size, 0, hole_size,
0, 0, 0);
- BUG_ON(err);
+ if (err)
+ break;
btrfs_drop_extent_cache(inode, hole_start,
last_byte - 1, 0);
@@ -3616,81 +3679,41 @@ int btrfs_cont_expand(struct inode *inode, loff_t size)
return err;
}
-static int btrfs_setattr_size(struct inode *inode, struct iattr *attr)
+static int btrfs_setsize(struct inode *inode, loff_t newsize)
{
- struct btrfs_root *root = BTRFS_I(inode)->root;
- struct btrfs_trans_handle *trans;
- unsigned long nr;
+ loff_t oldsize = i_size_read(inode);
int ret;
- if (attr->ia_size == inode->i_size)
+ if (newsize == oldsize)
return 0;
- if (attr->ia_size > inode->i_size) {
- unsigned long limit;
- limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
- if (attr->ia_size > inode->i_sb->s_maxbytes)
- return -EFBIG;
- if (limit != RLIM_INFINITY && attr->ia_size > limit) {
- send_sig(SIGXFSZ, current, 0);
- return -EFBIG;
- }
- }
-
- trans = btrfs_start_transaction(root, 5);
- if (IS_ERR(trans))
- return PTR_ERR(trans);
-
- btrfs_set_trans_block_group(trans, inode);
-
- ret = btrfs_orphan_add(trans, inode);
- BUG_ON(ret);
-
- nr = trans->blocks_used;
- btrfs_end_transaction(trans, root);
- btrfs_btree_balance_dirty(root, nr);
-
- if (attr->ia_size > inode->i_size) {
- ret = btrfs_cont_expand(inode, attr->ia_size);
+ if (newsize > oldsize) {
+ i_size_write(inode, newsize);
+ btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL);
+ truncate_pagecache(inode, oldsize, newsize);
+ ret = btrfs_cont_expand(inode, oldsize, newsize);
if (ret) {
- btrfs_truncate(inode);
+ btrfs_setsize(inode, oldsize);
return ret;
}
- i_size_write(inode, attr->ia_size);
- btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
+ mark_inode_dirty(inode);
+ } else {
- trans = btrfs_start_transaction(root, 0);
- BUG_ON(IS_ERR(trans));
- btrfs_set_trans_block_group(trans, inode);
- trans->block_rsv = root->orphan_block_rsv;
- BUG_ON(!trans->block_rsv);
+ /*
+ * We're truncating a file that used to have good data down to
+ * zero. Make sure it gets into the ordered flush list so that
+ * any new writes get down to disk quickly.
+ */
+ if (newsize == 0)
+ BTRFS_I(inode)->ordered_data_close = 1;
- ret = btrfs_update_inode(trans, root, inode);
- BUG_ON(ret);
- if (inode->i_nlink > 0) {
- ret = btrfs_orphan_del(trans, inode);
- BUG_ON(ret);
- }
- nr = trans->blocks_used;
- btrfs_end_transaction(trans, root);
- btrfs_btree_balance_dirty(root, nr);
- return 0;
+ /* we don't support swapfiles, so vmtruncate shouldn't fail */
+ truncate_setsize(inode, newsize);
+ ret = btrfs_truncate(inode);
}
- /*
- * We're truncating a file that used to have good data down to
- * zero. Make sure it gets into the ordered flush list so that
- * any new writes get down to disk quickly.
- */
- if (attr->ia_size == 0)
- BTRFS_I(inode)->ordered_data_close = 1;
-
- /* we don't support swapfiles, so vmtruncate shouldn't fail */
- ret = vmtruncate(inode, attr->ia_size);
- BUG_ON(ret);
-
- return 0;
+ return ret;
}
static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
@@ -3707,7 +3730,7 @@ static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
return err;
if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
- err = btrfs_setattr_size(inode, attr);
+ err = btrfs_setsize(inode, attr->ia_size);
if (err)
return err;
}
@@ -3730,6 +3753,8 @@ void btrfs_evict_inode(struct inode *inode)
unsigned long nr;
int ret;
+ trace_btrfs_inode_evict(inode);
+
truncate_inode_pages(&inode->i_data, 0);
if (inode->i_nlink && (btrfs_root_refs(&root->root_item) != 0 ||
root == root->fs_info->tree_root))
@@ -4072,7 +4097,6 @@ struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
BTRFS_I(inode)->root = root;
memcpy(&BTRFS_I(inode)->location, location, sizeof(*location));
btrfs_read_locked_inode(inode);
-
inode_tree_add(inode);
unlock_new_inode(inode);
if (new)
@@ -4147,8 +4171,10 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
if (!IS_ERR(inode) && root != sub_root) {
down_read(&root->fs_info->cleanup_work_sem);
if (!(inode->i_sb->s_flags & MS_RDONLY))
- btrfs_orphan_cleanup(sub_root);
+ ret = btrfs_orphan_cleanup(sub_root);
up_read(&root->fs_info->cleanup_work_sem);
+ if (ret)
+ inode = ERR_PTR(ret);
}
return inode;
@@ -4196,10 +4222,8 @@ static int btrfs_real_readdir(struct file *filp, void *dirent,
struct btrfs_key found_key;
struct btrfs_path *path;
int ret;
- u32 nritems;
struct extent_buffer *leaf;
int slot;
- int advance;
unsigned char d_type;
int over = 0;
u32 di_cur;
@@ -4242,27 +4266,19 @@ static int btrfs_real_readdir(struct file *filp, void *dirent,
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
goto err;
- advance = 0;
while (1) {
leaf = path->nodes[0];
- nritems = btrfs_header_nritems(leaf);
slot = path->slots[0];
- if (advance || slot >= nritems) {
- if (slot >= nritems - 1) {
- ret = btrfs_next_leaf(root, path);
- if (ret)
- break;
- leaf = path->nodes[0];
- nritems = btrfs_header_nritems(leaf);
- slot = path->slots[0];
- } else {
- slot++;
- path->slots[0]++;
- }
+ if (slot >= btrfs_header_nritems(leaf)) {
+ ret = btrfs_next_leaf(root, path);
+ if (ret < 0)
+ goto err;
+ else if (ret > 0)
+ break;
+ continue;
}
- advance = 1;
item = btrfs_item_nr(leaf, slot);
btrfs_item_key_to_cpu(leaf, &found_key, slot);
@@ -4271,7 +4287,7 @@ static int btrfs_real_readdir(struct file *filp, void *dirent,
if (btrfs_key_type(&found_key) != key_type)
break;
if (found_key.offset < filp->f_pos)
- continue;
+ goto next;
filp->f_pos = found_key.offset;
@@ -4282,6 +4298,9 @@ static int btrfs_real_readdir(struct file *filp, void *dirent,
while (di_cur < di_total) {
struct btrfs_key location;
+ if (verify_dir_item(root, leaf, di))
+ break;
+
name_len = btrfs_dir_name_len(leaf, di);
if (name_len <= sizeof(tmp_name)) {
name_ptr = tmp_name;
@@ -4321,6 +4340,8 @@ skip:
di_cur += di_len;
di = (struct btrfs_dir_item *)((char *)di + di_len);
}
+next:
+ path->slots[0]++;
}
/* Reached end of directory/root. Bump pos past the last item. */
@@ -4513,12 +4534,17 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
BUG_ON(!path);
inode = new_inode(root->fs_info->sb);
- if (!inode)
+ if (!inode) {
+ btrfs_free_path(path);
return ERR_PTR(-ENOMEM);
+ }
if (dir) {
+ trace_btrfs_inode_request(dir);
+
ret = btrfs_set_inode_index(dir, index);
if (ret) {
+ btrfs_free_path(path);
iput(inode);
return ERR_PTR(ret);
}
@@ -4585,12 +4611,16 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
if ((mode & S_IFREG)) {
if (btrfs_test_opt(root, NODATASUM))
BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
- if (btrfs_test_opt(root, NODATACOW))
+ if (btrfs_test_opt(root, NODATACOW) ||
+ (BTRFS_I(dir)->flags & BTRFS_INODE_NODATACOW))
BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW;
}
insert_inode_hash(inode);
inode_tree_add(inode);
+
+ trace_btrfs_inode_new(inode);
+
return inode;
fail:
if (dir)
@@ -4809,10 +4839,10 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
/* do not allow sys_link's with other subvols of the same device */
if (root->objectid != BTRFS_I(inode)->root->objectid)
- return -EPERM;
+ return -EXDEV;
- btrfs_inc_nlink(inode);
- inode->i_ctime = CURRENT_TIME;
+ if (inode->i_nlink == ~0U)
+ return -EMLINK;
err = btrfs_set_inode_index(dir, &index);
if (err)
@@ -4829,6 +4859,9 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
goto fail;
}
+ btrfs_inc_nlink(inode);
+ inode->i_ctime = CURRENT_TIME;
+
btrfs_set_trans_block_group(trans, dir);
ihold(inode);
@@ -5198,7 +5231,7 @@ again:
btrfs_mark_buffer_dirty(leaf);
}
set_extent_uptodate(io_tree, em->start,
- extent_map_end(em) - 1, GFP_NOFS);
+ extent_map_end(em) - 1, NULL, GFP_NOFS);
goto insert;
} else {
printk(KERN_ERR "btrfs unknown found_type %d\n", found_type);
@@ -5265,6 +5298,9 @@ insert:
}
write_unlock(&em_tree->lock);
out:
+
+ trace_btrfs_get_extent(root, em);
+
if (path)
btrfs_free_path(path);
if (trans) {
@@ -5402,17 +5438,30 @@ out:
}
static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
+ struct extent_map *em,
u64 start, u64 len)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans;
- struct extent_map *em;
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
struct btrfs_key ins;
u64 alloc_hint;
int ret;
+ bool insert = false;
- btrfs_drop_extent_cache(inode, start, start + len - 1, 0);
+ /*
+ * Ok if the extent map we looked up is a hole and is for the exact
+ * range we want, there is no reason to allocate a new one, however if
+ * it is not right then we need to free this one and drop the cache for
+ * our range.
+ */
+ if (em->block_start != EXTENT_MAP_HOLE || em->start != start ||
+ em->len != len) {
+ free_extent_map(em);
+ em = NULL;
+ insert = true;
+ btrfs_drop_extent_cache(inode, start, start + len - 1, 0);
+ }
trans = btrfs_join_transaction(root, 0);
if (IS_ERR(trans))
@@ -5428,10 +5477,12 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
goto out;
}
- em = alloc_extent_map(GFP_NOFS);
if (!em) {
- em = ERR_PTR(-ENOMEM);
- goto out;
+ em = alloc_extent_map(GFP_NOFS);
+ if (!em) {
+ em = ERR_PTR(-ENOMEM);
+ goto out;
+ }
}
em->start = start;
@@ -5441,9 +5492,15 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
em->block_start = ins.objectid;
em->block_len = ins.offset;
em->bdev = root->fs_info->fs_devices->latest_bdev;
+
+ /*
+ * We need to do this because if we're using the original em we searched
+ * for, we could have EXTENT_FLAG_VACANCY set, and we don't want that.
+ */
+ em->flags = 0;
set_bit(EXTENT_FLAG_PINNED, &em->flags);
- while (1) {
+ while (insert) {
write_lock(&em_tree->lock);
ret = add_extent_mapping(em_tree, em);
write_unlock(&em_tree->lock);
@@ -5661,8 +5718,7 @@ must_cow:
* it above
*/
len = bh_result->b_size;
- free_extent_map(em);
- em = btrfs_new_extent_direct(inode, start, len);
+ em = btrfs_new_extent_direct(inode, em, start, len);
if (IS_ERR(em))
return PTR_ERR(em);
len = min(len, em->len - (start - em->start));
@@ -5748,6 +5804,10 @@ static void btrfs_endio_direct_read(struct bio *bio, int err)
kfree(dip->csums);
kfree(dip);
+
+ /* If we had a csum failure make sure to clear the uptodate flag */
+ if (err)
+ clear_bit(BIO_UPTODATE, &bio->bi_flags);
dio_end_io(bio, err);
}
@@ -5821,8 +5881,10 @@ again:
}
add_pending_csums(trans, inode, ordered->file_offset, &ordered->list);
- btrfs_ordered_update_i_size(inode, 0, ordered);
- btrfs_update_inode(trans, root, inode);
+ ret = btrfs_ordered_update_i_size(inode, 0, ordered);
+ if (!ret)
+ btrfs_update_inode(trans, root, inode);
+ ret = 0;
out_unlock:
unlock_extent_cached(&BTRFS_I(inode)->io_tree, ordered->file_offset,
ordered->file_offset + ordered->len - 1,
@@ -5849,6 +5911,10 @@ out_done:
kfree(dip->csums);
kfree(dip);
+
+ /* If we had an error make sure to clear the uptodate flag */
+ if (err)
+ clear_bit(BIO_UPTODATE, &bio->bi_flags);
dio_end_io(bio, err);
}
@@ -5904,7 +5970,7 @@ static struct bio *btrfs_dio_bio_alloc(struct block_device *bdev,
static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
int rw, u64 file_offset, int skip_sum,
- u32 *csums)
+ u32 *csums, int async_submit)
{
int write = rw & REQ_WRITE;
struct btrfs_root *root = BTRFS_I(inode)->root;
@@ -5915,18 +5981,33 @@ static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
if (ret)
goto err;
- if (write && !skip_sum) {
+ if (skip_sum)
+ goto map;
+
+ if (write && async_submit) {
ret = btrfs_wq_submit_bio(root->fs_info,
inode, rw, bio, 0, 0,
file_offset,
__btrfs_submit_bio_start_direct_io,
__btrfs_submit_bio_done);
goto err;
- } else if (!skip_sum)
- btrfs_lookup_bio_sums_dio(root, inode, bio,
+ } else if (write) {
+ /*
+ * If we aren't doing async submit, calculate the csum of the
+ * bio now.
+ */
+ ret = btrfs_csum_one_bio(root, inode, bio, file_offset, 1);
+ if (ret)
+ goto err;
+ } else if (!skip_sum) {
+ ret = btrfs_lookup_bio_sums_dio(root, inode, bio,
file_offset, csums);
+ if (ret)
+ goto err;
+ }
- ret = btrfs_map_bio(root, rw, bio, 0, 1);
+map:
+ ret = btrfs_map_bio(root, rw, bio, 0, async_submit);
err:
bio_put(bio);
return ret;
@@ -5948,13 +6029,8 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
int nr_pages = 0;
u32 *csums = dip->csums;
int ret = 0;
-
- bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS);
- if (!bio)
- return -ENOMEM;
- bio->bi_private = dip;
- bio->bi_end_io = btrfs_end_dio_bio;
- atomic_inc(&dip->pending_bios);
+ int async_submit = 0;
+ int write = rw & REQ_WRITE;
map_length = orig_bio->bi_size;
ret = btrfs_map_block(map_tree, READ, start_sector << 9,
@@ -5964,6 +6040,19 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
return -EIO;
}
+ if (map_length >= orig_bio->bi_size) {
+ bio = orig_bio;
+ goto submit;
+ }
+
+ async_submit = 1;
+ bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS);
+ if (!bio)
+ return -ENOMEM;
+ bio->bi_private = dip;
+ bio->bi_end_io = btrfs_end_dio_bio;
+ atomic_inc(&dip->pending_bios);
+
while (bvec <= (orig_bio->bi_io_vec + orig_bio->bi_vcnt - 1)) {
if (unlikely(map_length < submit_len + bvec->bv_len ||
bio_add_page(bio, bvec->bv_page, bvec->bv_len,
@@ -5977,14 +6066,15 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
atomic_inc(&dip->pending_bios);
ret = __btrfs_submit_dio_bio(bio, inode, rw,
file_offset, skip_sum,
- csums);
+ csums, async_submit);
if (ret) {
bio_put(bio);
atomic_dec(&dip->pending_bios);
goto out_err;
}
- if (!skip_sum)
+ /* Write's use the ordered csums */
+ if (!write && !skip_sum)
csums = csums + nr_pages;
start_sector += submit_len >> 9;
file_offset += submit_len;
@@ -6013,8 +6103,9 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
}
}
+submit:
ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum,
- csums);
+ csums, async_submit);
if (!ret)
return 0;
@@ -6052,7 +6143,8 @@ static void btrfs_submit_direct(int rw, struct bio *bio, struct inode *inode,
}
dip->csums = NULL;
- if (!skip_sum) {
+ /* Write's use the ordered csum stuff, so we don't need dip->csums */
+ if (!write && !skip_sum) {
dip->csums = kmalloc(sizeof(u32) * bio->bi_vcnt, GFP_NOFS);
if (!dip->csums) {
kfree(dip);
@@ -6108,6 +6200,7 @@ static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *io
unsigned long nr_segs)
{
int seg;
+ int i;
size_t size;
unsigned long addr;
unsigned blocksize_mask = root->sectorsize - 1;
@@ -6122,8 +6215,22 @@ static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *io
addr = (unsigned long)iov[seg].iov_base;
size = iov[seg].iov_len;
end += size;
- if ((addr & blocksize_mask) || (size & blocksize_mask))
+ if ((addr & blocksize_mask) || (size & blocksize_mask))
goto out;
+
+ /* If this is a write we don't need to check anymore */
+ if (rw & WRITE)
+ continue;
+
+ /*
+ * Check to make sure we don't have duplicate iov_base's in this
+ * iovec, if so return EINVAL, otherwise we'll get csum errors
+ * when reading back.
+ */
+ for (i = seg + 1; i < nr_segs; i++) {
+ if (iov[seg].iov_base == iov[i].iov_base)
+ goto out;
+ }
}
retval = 0;
out:
@@ -6474,28 +6581,42 @@ out:
return ret;
}
-static void btrfs_truncate(struct inode *inode)
+static int btrfs_truncate(struct inode *inode)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
int ret;
+ int err = 0;
struct btrfs_trans_handle *trans;
unsigned long nr;
u64 mask = root->sectorsize - 1;
- if (!S_ISREG(inode->i_mode)) {
- WARN_ON(1);
- return;
- }
-
ret = btrfs_truncate_page(inode->i_mapping, inode->i_size);
if (ret)
- return;
+ return ret;
btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1);
btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
+ trans = btrfs_start_transaction(root, 5);
+ if (IS_ERR(trans))
+ return PTR_ERR(trans);
+
+ btrfs_set_trans_block_group(trans, inode);
+
+ ret = btrfs_orphan_add(trans, inode);
+ if (ret) {
+ btrfs_end_transaction(trans, root);
+ return ret;
+ }
+
+ nr = trans->blocks_used;
+ btrfs_end_transaction(trans, root);
+ btrfs_btree_balance_dirty(root, nr);
+
+ /* Now start a transaction for the truncate */
trans = btrfs_start_transaction(root, 0);
- BUG_ON(IS_ERR(trans));
+ if (IS_ERR(trans))
+ return PTR_ERR(trans);
btrfs_set_trans_block_group(trans, inode);
trans->block_rsv = root->orphan_block_rsv;
@@ -6522,29 +6643,38 @@ static void btrfs_truncate(struct inode *inode)
while (1) {
if (!trans) {
trans = btrfs_start_transaction(root, 0);
- BUG_ON(IS_ERR(trans));
+ if (IS_ERR(trans))
+ return PTR_ERR(trans);
btrfs_set_trans_block_group(trans, inode);
trans->block_rsv = root->orphan_block_rsv;
}
ret = btrfs_block_rsv_check(trans, root,
root->orphan_block_rsv, 0, 5);
- if (ret) {
- BUG_ON(ret != -EAGAIN);
+ if (ret == -EAGAIN) {
ret = btrfs_commit_transaction(trans, root);
- BUG_ON(ret);
+ if (ret)
+ return ret;
trans = NULL;
continue;
+ } else if (ret) {
+ err = ret;
+ break;
}
ret = btrfs_truncate_inode_items(trans, root, inode,
inode->i_size,
BTRFS_EXTENT_DATA_KEY);
- if (ret != -EAGAIN)
+ if (ret != -EAGAIN) {
+ err = ret;
break;
+ }
ret = btrfs_update_inode(trans, root, inode);
- BUG_ON(ret);
+ if (ret) {
+ err = ret;
+ break;
+ }
nr = trans->blocks_used;
btrfs_end_transaction(trans, root);
@@ -6554,16 +6684,27 @@ static void btrfs_truncate(struct inode *inode)
if (ret == 0 && inode->i_nlink > 0) {
ret = btrfs_orphan_del(trans, inode);
- BUG_ON(ret);
+ if (ret)
+ err = ret;
+ } else if (ret && inode->i_nlink > 0) {
+ /*
+ * Failed to do the truncate, remove us from the in memory
+ * orphan list.
+ */
+ ret = btrfs_orphan_del(NULL, inode);
}
ret = btrfs_update_inode(trans, root, inode);
- BUG_ON(ret);
+ if (ret && !err)
+ err = ret;
nr = trans->blocks_used;
ret = btrfs_end_transaction_throttle(trans, root);
- BUG_ON(ret);
+ if (ret && !err)
+ err = ret;
btrfs_btree_balance_dirty(root, nr);
+
+ return err;
}
/*
@@ -6630,9 +6771,8 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
ei->index_cnt = (u64)-1;
ei->last_unlink_trans = 0;
- spin_lock_init(&ei->accounting_lock);
atomic_set(&ei->outstanding_extents, 0);
- ei->reserved_extents = 0;
+ atomic_set(&ei->reserved_extents, 0);
ei->ordered_data_close = 0;
ei->orphan_meta_reserved = 0;
@@ -6668,7 +6808,7 @@ void btrfs_destroy_inode(struct inode *inode)
WARN_ON(!list_empty(&inode->i_dentry));
WARN_ON(inode->i_data.nrpages);
WARN_ON(atomic_read(&BTRFS_I(inode)->outstanding_extents));
- WARN_ON(BTRFS_I(inode)->reserved_extents);
+ WARN_ON(atomic_read(&BTRFS_I(inode)->reserved_extents));
/*
* This can happen where we create an inode, but somebody else also
@@ -6760,6 +6900,8 @@ void btrfs_destroy_cachep(void)
kmem_cache_destroy(btrfs_transaction_cachep);
if (btrfs_path_cachep)
kmem_cache_destroy(btrfs_path_cachep);
+ if (btrfs_free_space_cachep)
+ kmem_cache_destroy(btrfs_free_space_cachep);
}
int btrfs_init_cachep(void)
@@ -6788,6 +6930,12 @@ int btrfs_init_cachep(void)
if (!btrfs_path_cachep)
goto fail;
+ btrfs_free_space_cachep = kmem_cache_create("btrfs_free_space_cache",
+ sizeof(struct btrfs_free_space), 0,
+ SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
+ if (!btrfs_free_space_cachep)
+ goto fail;
+
return 0;
fail:
btrfs_destroy_cachep();
@@ -6806,6 +6954,26 @@ static int btrfs_getattr(struct vfsmount *mnt,
return 0;
}
+/*
+ * If a file is moved, it will inherit the cow and compression flags of the new
+ * directory.
+ */
+static void fixup_inode_flags(struct inode *dir, struct inode *inode)
+{
+ struct btrfs_inode *b_dir = BTRFS_I(dir);
+ struct btrfs_inode *b_inode = BTRFS_I(inode);
+
+ if (b_dir->flags & BTRFS_INODE_NODATACOW)
+ b_inode->flags |= BTRFS_INODE_NODATACOW;
+ else
+ b_inode->flags &= ~BTRFS_INODE_NODATACOW;
+
+ if (b_dir->flags & BTRFS_INODE_COMPRESS)
+ b_inode->flags |= BTRFS_INODE_COMPRESS;
+ else
+ b_inode->flags &= ~BTRFS_INODE_COMPRESS;
+}
+
static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry)
{
@@ -6854,8 +7022,10 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
* should cover the worst case number of items we'll modify.
*/
trans = btrfs_start_transaction(root, 20);
- if (IS_ERR(trans))
- return PTR_ERR(trans);
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+ goto out_notrans;
+ }
btrfs_set_trans_block_group(trans, new_dir);
@@ -6908,11 +7078,12 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
old_dentry->d_name.name,
old_dentry->d_name.len);
} else {
- btrfs_inc_nlink(old_dentry->d_inode);
- ret = btrfs_unlink_inode(trans, root, old_dir,
- old_dentry->d_inode,
- old_dentry->d_name.name,
- old_dentry->d_name.len);
+ ret = __btrfs_unlink_inode(trans, root, old_dir,
+ old_dentry->d_inode,
+ old_dentry->d_name.name,
+ old_dentry->d_name.len);
+ if (!ret)
+ ret = btrfs_update_inode(trans, root, old_inode);
}
BUG_ON(ret);
@@ -6939,6 +7110,8 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
}
}
+ fixup_inode_flags(new_dir, old_inode);
+
ret = btrfs_add_link(trans, new_dir, old_inode,
new_dentry->d_name.name,
new_dentry->d_name.len, 0, index);
@@ -6952,7 +7125,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
}
out_fail:
btrfs_end_transaction_throttle(trans, root);
-
+out_notrans:
if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
up_read(&root->fs_info->subvol_sem);
@@ -7340,7 +7513,6 @@ static const struct address_space_operations btrfs_aops = {
.writepage = btrfs_writepage,
.writepages = btrfs_writepages,
.readpages = btrfs_readpages,
- .sync_page = block_sync_page,
.direct_IO = btrfs_direct_IO,
.invalidatepage = btrfs_invalidatepage,
.releasepage = btrfs_releasepage,
@@ -7356,7 +7528,6 @@ static const struct address_space_operations btrfs_symlink_aops = {
};
static const struct inode_operations btrfs_file_inode_operations = {
- .truncate = btrfs_truncate,
.getattr = btrfs_getattr,
.setattr = btrfs_setattr,
.setxattr = btrfs_setxattr,
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 5fdb2ab..ffb48d6c 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -40,6 +40,7 @@
#include <linux/xattr.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
+#include <linux/blkdev.h>
#include "compat.h"
#include "ctree.h"
#include "disk-io.h"
@@ -138,6 +139,24 @@ static int btrfs_ioctl_getflags(struct file *file, void __user *arg)
return 0;
}
+static int check_flags(unsigned int flags)
+{
+ if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \
+ FS_NOATIME_FL | FS_NODUMP_FL | \
+ FS_SYNC_FL | FS_DIRSYNC_FL | \
+ FS_NOCOMP_FL | FS_COMPR_FL | \
+ FS_NOCOW_FL | FS_COW_FL))
+ return -EOPNOTSUPP;
+
+ if ((flags & FS_NOCOMP_FL) && (flags & FS_COMPR_FL))
+ return -EINVAL;
+
+ if ((flags & FS_NOCOW_FL) && (flags & FS_COW_FL))
+ return -EINVAL;
+
+ return 0;
+}
+
static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
{
struct inode *inode = file->f_path.dentry->d_inode;
@@ -153,12 +172,11 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
if (copy_from_user(&flags, arg, sizeof(flags)))
return -EFAULT;
- if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \
- FS_NOATIME_FL | FS_NODUMP_FL | \
- FS_SYNC_FL | FS_DIRSYNC_FL))
- return -EOPNOTSUPP;
+ ret = check_flags(flags);
+ if (ret)
+ return ret;
- if (!is_owner_or_cap(inode))
+ if (!inode_owner_or_capable(inode))
return -EACCES;
mutex_lock(&inode->i_mutex);
@@ -201,6 +219,22 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
else
ip->flags &= ~BTRFS_INODE_DIRSYNC;
+ /*
+ * The COMPRESS flag can only be changed by users, while the NOCOMPRESS
+ * flag may be changed automatically if compression code won't make
+ * things smaller.
+ */
+ if (flags & FS_NOCOMP_FL) {
+ ip->flags &= ~BTRFS_INODE_COMPRESS;
+ ip->flags |= BTRFS_INODE_NOCOMPRESS;
+ } else if (flags & FS_COMPR_FL) {
+ ip->flags |= BTRFS_INODE_COMPRESS;
+ ip->flags &= ~BTRFS_INODE_NOCOMPRESS;
+ }
+ if (flags & FS_NOCOW_FL)
+ ip->flags |= BTRFS_INODE_NODATACOW;
+ else if (flags & FS_COW_FL)
+ ip->flags &= ~BTRFS_INODE_NODATACOW;
trans = btrfs_join_transaction(root, 1);
BUG_ON(IS_ERR(trans));
@@ -213,9 +247,11 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
btrfs_end_transaction(trans, root);
mnt_drop_write(file->f_path.mnt);
+
+ ret = 0;
out_unlock:
mutex_unlock(&inode->i_mutex);
- return 0;
+ return ret;
}
static int btrfs_ioctl_getversion(struct file *file, int __user *arg)
@@ -225,6 +261,49 @@ static int btrfs_ioctl_getversion(struct file *file, int __user *arg)
return put_user(inode->i_generation, arg);
}
+static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg)
+{
+ struct btrfs_root *root = fdentry(file)->d_sb->s_fs_info;
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ struct btrfs_device *device;
+ struct request_queue *q;
+ struct fstrim_range range;
+ u64 minlen = ULLONG_MAX;
+ u64 num_devices = 0;
+ int ret;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ mutex_lock(&fs_info->fs_devices->device_list_mutex);
+ list_for_each_entry(device, &fs_info->fs_devices->devices, dev_list) {
+ if (!device->bdev)
+ continue;
+ q = bdev_get_queue(device->bdev);
+ if (blk_queue_discard(q)) {
+ num_devices++;
+ minlen = min((u64)q->limits.discard_granularity,
+ minlen);
+ }
+ }
+ mutex_unlock(&fs_info->fs_devices->device_list_mutex);
+ if (!num_devices)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&range, arg, sizeof(range)))
+ return -EFAULT;
+
+ range.minlen = max(range.minlen, minlen);
+ ret = btrfs_trim_fs(root, &range);
+ if (ret < 0)
+ return ret;
+
+ if (copy_to_user(arg, &range, sizeof(range)))
+ return -EFAULT;
+
+ return 0;
+}
+
static noinline int create_subvol(struct btrfs_root *root,
struct dentry *dentry,
char *name, int namelen,
@@ -294,6 +373,10 @@ static noinline int create_subvol(struct btrfs_root *root,
inode_item->nbytes = cpu_to_le64(root->leafsize);
inode_item->mode = cpu_to_le32(S_IFDIR | 0755);
+ root_item.flags = 0;
+ root_item.byte_limit = 0;
+ inode_item->flags = cpu_to_le64(BTRFS_INODE_ROOT_ITEM_INIT);
+
btrfs_set_root_bytenr(&root_item, leaf->start);
btrfs_set_root_generation(&root_item, trans->transid);
btrfs_set_root_level(&root_item, 0);
@@ -409,7 +492,9 @@ static int create_snapshot(struct btrfs_root *root, struct dentry *dentry,
if (ret)
goto fail;
- btrfs_orphan_cleanup(pending_snapshot->snap);
+ ret = btrfs_orphan_cleanup(pending_snapshot->snap);
+ if (ret)
+ goto fail;
parent = dget_parent(dentry);
inode = btrfs_lookup_dentry(parent->d_inode, dentry);
@@ -1077,7 +1162,7 @@ static noinline int btrfs_ioctl_subvol_setflags(struct file *file,
if (flags & ~BTRFS_SUBVOL_RDONLY)
return -EOPNOTSUPP;
- if (!is_owner_or_cap(inode))
+ if (!inode_owner_or_capable(inode))
return -EACCES;
down_write(&root->fs_info->subvol_sem);
@@ -2202,7 +2287,7 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
struct btrfs_ioctl_space_info space;
struct btrfs_ioctl_space_info *dest;
struct btrfs_ioctl_space_info *dest_orig;
- struct btrfs_ioctl_space_info *user_dest;
+ struct btrfs_ioctl_space_info __user *user_dest;
struct btrfs_space_info *info;
u64 types[] = {BTRFS_BLOCK_GROUP_DATA,
BTRFS_BLOCK_GROUP_SYSTEM,
@@ -2348,12 +2433,17 @@ static noinline long btrfs_ioctl_start_sync(struct file *file, void __user *argp
struct btrfs_root *root = BTRFS_I(file->f_dentry->d_inode)->root;
struct btrfs_trans_handle *trans;
u64 transid;
+ int ret;
trans = btrfs_start_transaction(root, 0);
if (IS_ERR(trans))
return PTR_ERR(trans);
transid = trans->transid;
- btrfs_commit_transaction_async(trans, root, 0);
+ ret = btrfs_commit_transaction_async(trans, root, 0);
+ if (ret) {
+ btrfs_end_transaction(trans, root);
+ return ret;
+ }
if (argp)
if (copy_to_user(argp, &transid, sizeof(transid)))
@@ -2388,6 +2478,8 @@ long btrfs_ioctl(struct file *file, unsigned int
return btrfs_ioctl_setflags(file, argp);
case FS_IOC_GETVERSION:
return btrfs_ioctl_getversion(file, argp);
+ case FITRIM:
+ return btrfs_ioctl_fitrim(file, argp);
case BTRFS_IOC_SNAP_CREATE:
return btrfs_ioctl_snap_create(file, argp, 0);
case BTRFS_IOC_SNAP_CREATE_V2:
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index 083a554..a1c9404 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -202,6 +202,8 @@ static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
INIT_LIST_HEAD(&entry->list);
INIT_LIST_HEAD(&entry->root_extent_list);
+ trace_btrfs_ordered_extent_add(inode, entry);
+
spin_lock(&tree->lock);
node = tree_insert(&tree->tree, file_offset,
&entry->rb_node);
@@ -387,6 +389,8 @@ int btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
struct list_head *cur;
struct btrfs_ordered_sum *sum;
+ trace_btrfs_ordered_extent_put(entry->inode, entry);
+
if (atomic_dec_and_test(&entry->refs)) {
while (!list_empty(&entry->list)) {
cur = entry->list.next;
@@ -420,6 +424,8 @@ static int __btrfs_remove_ordered_extent(struct inode *inode,
spin_lock(&root->fs_info->ordered_extent_lock);
list_del_init(&entry->root_extent_list);
+ trace_btrfs_ordered_extent_remove(inode, entry);
+
/*
* we have no more ordered extents for this inode and
* no dirty pages. We can safely remove it from the
@@ -585,6 +591,8 @@ void btrfs_start_ordered_extent(struct inode *inode,
u64 start = entry->file_offset;
u64 end = start + entry->len - 1;
+ trace_btrfs_ordered_extent_start(inode, entry);
+
/*
* pages in the range can be dirty, clean or writeback. We
* start IO on any dirty ones so the wait doesn't stall waiting
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 31ade58..199a801 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -1724,6 +1724,7 @@ again:
eb = read_tree_block(dest, old_bytenr, blocksize,
old_ptr_gen);
+ BUG_ON(!eb);
btrfs_tree_lock(eb);
if (cow) {
ret = btrfs_cow_block(trans, dest, eb, parent,
@@ -2345,7 +2346,7 @@ struct btrfs_root *select_one_root(struct btrfs_trans_handle *trans,
root = next->root;
BUG_ON(!root);
- /* no other choice for non-refernce counted tree */
+ /* no other choice for non-references counted tree */
if (!root->ref_cows)
return root;
@@ -2513,6 +2514,10 @@ static int do_relocation(struct btrfs_trans_handle *trans,
blocksize = btrfs_level_size(root, node->level);
generation = btrfs_node_ptr_generation(upper->eb, slot);
eb = read_tree_block(root, bytenr, blocksize, generation);
+ if (!eb) {
+ err = -EIO;
+ goto next;
+ }
btrfs_tree_lock(eb);
btrfs_set_lock_blocking(eb);
@@ -2670,6 +2675,7 @@ static int get_tree_block_key(struct reloc_control *rc,
BUG_ON(block->key_ready);
eb = read_tree_block(rc->extent_root, block->bytenr,
block->key.objectid, block->key.offset);
+ BUG_ON(!eb);
WARN_ON(btrfs_header_level(eb) != block->level);
if (block->level == 0)
btrfs_item_key_to_cpu(eb, &block->key, 0);
@@ -4209,7 +4215,7 @@ out:
if (IS_ERR(fs_root))
err = PTR_ERR(fs_root);
else
- btrfs_orphan_cleanup(fs_root);
+ err = btrfs_orphan_cleanup(fs_root);
}
return err;
}
diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
index 6a1086e..6928bff 100644
--- a/fs/btrfs/root-tree.c
+++ b/fs/btrfs/root-tree.c
@@ -88,7 +88,8 @@ int btrfs_find_last_root(struct btrfs_root *root, u64 objectid,
search_key.offset = (u64)-1;
path = btrfs_alloc_path();
- BUG_ON(!path);
+ if (!path)
+ return -ENOMEM;
ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
if (ret < 0)
goto out;
@@ -332,7 +333,8 @@ int btrfs_del_root(struct btrfs_trans_handle *trans, struct btrfs_root *root,
struct extent_buffer *leaf;
path = btrfs_alloc_path();
- BUG_ON(!path);
+ if (!path)
+ return -ENOMEM;
ret = btrfs_search_slot(trans, root, key, path, -1, 1);
if (ret < 0)
goto out;
@@ -471,3 +473,21 @@ again:
btrfs_free_path(path);
return 0;
}
+
+/*
+ * Old btrfs forgets to init root_item->flags and root_item->byte_limit
+ * for subvolumes. To work around this problem, we steal a bit from
+ * root_item->inode_item->flags, and use it to indicate if those fields
+ * have been properly initialized.
+ */
+void btrfs_check_and_init_root_item(struct btrfs_root_item *root_item)
+{
+ u64 inode_flags = le64_to_cpu(root_item->inode.flags);
+
+ if (!(inode_flags & BTRFS_INODE_ROOT_ITEM_INIT)) {
+ inode_flags |= BTRFS_INODE_ROOT_ITEM_INIT;
+ root_item->inode.flags = cpu_to_le64(inode_flags);
+ root_item->flags = 0;
+ root_item->byte_limit = 0;
+ }
+}
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index d39a989..0ac712e 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -52,6 +52,9 @@
#include "export.h"
#include "compression.h"
+#define CREATE_TRACE_POINTS
+#include <trace/events/btrfs.h>
+
static const struct super_operations btrfs_super_ops;
static const char *btrfs_decode_error(struct btrfs_fs_info *fs_info, int errno,
@@ -156,7 +159,7 @@ enum {
Opt_compress_type, Opt_compress_force, Opt_compress_force_type,
Opt_notreelog, Opt_ratio, Opt_flushoncommit, Opt_discard,
Opt_space_cache, Opt_clear_cache, Opt_user_subvol_rm_allowed,
- Opt_enospc_debug, Opt_err,
+ Opt_enospc_debug, Opt_subvolrootid, Opt_err,
};
static match_table_t tokens = {
@@ -186,6 +189,7 @@ static match_table_t tokens = {
{Opt_clear_cache, "clear_cache"},
{Opt_user_subvol_rm_allowed, "user_subvol_rm_allowed"},
{Opt_enospc_debug, "enospc_debug"},
+ {Opt_subvolrootid, "subvolrootid=%d"},
{Opt_err, NULL},
};
@@ -229,6 +233,7 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
break;
case Opt_subvol:
case Opt_subvolid:
+ case Opt_subvolrootid:
case Opt_device:
/*
* These are parsed by btrfs_parse_early_options
@@ -385,7 +390,7 @@ out:
*/
static int btrfs_parse_early_options(const char *options, fmode_t flags,
void *holder, char **subvol_name, u64 *subvol_objectid,
- struct btrfs_fs_devices **fs_devices)
+ u64 *subvol_rootid, struct btrfs_fs_devices **fs_devices)
{
substring_t args[MAX_OPT_ARGS];
char *opts, *orig, *p;
@@ -426,6 +431,18 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags,
*subvol_objectid = intarg;
}
break;
+ case Opt_subvolrootid:
+ intarg = 0;
+ error = match_int(&args[0], &intarg);
+ if (!error) {
+ /* we want the original fs_tree */
+ if (!intarg)
+ *subvol_rootid =
+ BTRFS_FS_TREE_OBJECTID;
+ else
+ *subvol_rootid = intarg;
+ }
+ break;
case Opt_device:
error = btrfs_scan_one_device(match_strdup(&args[0]),
flags, holder, fs_devices);
@@ -620,6 +637,8 @@ int btrfs_sync_fs(struct super_block *sb, int wait)
struct btrfs_root *root = btrfs_sb(sb);
int ret;
+ trace_btrfs_sync_fs(wait);
+
if (!wait) {
filemap_flush(root->fs_info->btree_inode->i_mapping);
return 0;
@@ -639,6 +658,7 @@ static int btrfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
{
struct btrfs_root *root = btrfs_sb(vfs->mnt_sb);
struct btrfs_fs_info *info = root->fs_info;
+ char *compress_type;
if (btrfs_test_opt(root, DEGRADED))
seq_puts(seq, ",degraded");
@@ -657,8 +677,16 @@ static int btrfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
if (info->thread_pool_size != min_t(unsigned long,
num_online_cpus() + 2, 8))
seq_printf(seq, ",thread_pool=%d", info->thread_pool_size);
- if (btrfs_test_opt(root, COMPRESS))
- seq_puts(seq, ",compress");
+ if (btrfs_test_opt(root, COMPRESS)) {
+ if (info->compress_type == BTRFS_COMPRESS_ZLIB)
+ compress_type = "zlib";
+ else
+ compress_type = "lzo";
+ if (btrfs_test_opt(root, FORCE_COMPRESS))
+ seq_printf(seq, ",compress-force=%s", compress_type);
+ else
+ seq_printf(seq, ",compress=%s", compress_type);
+ }
if (btrfs_test_opt(root, NOSSD))
seq_puts(seq, ",nossd");
if (btrfs_test_opt(root, SSD_SPREAD))
@@ -673,6 +701,12 @@ static int btrfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
seq_puts(seq, ",discard");
if (!(root->fs_info->sb->s_flags & MS_POSIXACL))
seq_puts(seq, ",noacl");
+ if (btrfs_test_opt(root, SPACE_CACHE))
+ seq_puts(seq, ",space_cache");
+ if (btrfs_test_opt(root, CLEAR_CACHE))
+ seq_puts(seq, ",clear_cache");
+ if (btrfs_test_opt(root, USER_SUBVOL_RM_ALLOWED))
+ seq_puts(seq, ",user_subvol_rm_allowed");
return 0;
}
@@ -716,6 +750,7 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
fmode_t mode = FMODE_READ;
char *subvol_name = NULL;
u64 subvol_objectid = 0;
+ u64 subvol_rootid = 0;
int error = 0;
if (!(flags & MS_RDONLY))
@@ -723,7 +758,7 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
error = btrfs_parse_early_options(data, mode, fs_type,
&subvol_name, &subvol_objectid,
- &fs_devices);
+ &subvol_rootid, &fs_devices);
if (error)
return ERR_PTR(error);
@@ -787,15 +822,17 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
s->s_flags |= MS_ACTIVE;
}
- root = get_default_root(s, subvol_objectid);
- if (IS_ERR(root)) {
- error = PTR_ERR(root);
- deactivate_locked_super(s);
- goto error_free_subvol_name;
- }
/* if they gave us a subvolume name bind mount into that */
if (strcmp(subvol_name, ".")) {
struct dentry *new_root;
+
+ root = get_default_root(s, subvol_rootid);
+ if (IS_ERR(root)) {
+ error = PTR_ERR(root);
+ deactivate_locked_super(s);
+ goto error_free_subvol_name;
+ }
+
mutex_lock(&root->d_inode->i_mutex);
new_root = lookup_one_len(subvol_name, root,
strlen(subvol_name));
@@ -816,6 +853,13 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
}
dput(root);
root = new_root;
+ } else {
+ root = get_default_root(s, subvol_objectid);
+ if (IS_ERR(root)) {
+ error = PTR_ERR(root);
+ deactivate_locked_super(s);
+ goto error_free_subvol_name;
+ }
}
kfree(subvol_name);
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 3d73c8d..c571734 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -32,10 +32,8 @@
static noinline void put_transaction(struct btrfs_transaction *transaction)
{
- WARN_ON(transaction->use_count == 0);
- transaction->use_count--;
- if (transaction->use_count == 0) {
- list_del_init(&transaction->list);
+ WARN_ON(atomic_read(&transaction->use_count) == 0);
+ if (atomic_dec_and_test(&transaction->use_count)) {
memset(transaction, 0, sizeof(*transaction));
kmem_cache_free(btrfs_transaction_cachep, transaction);
}
@@ -57,16 +55,17 @@ static noinline int join_transaction(struct btrfs_root *root)
if (!cur_trans) {
cur_trans = kmem_cache_alloc(btrfs_transaction_cachep,
GFP_NOFS);
- BUG_ON(!cur_trans);
+ if (!cur_trans)
+ return -ENOMEM;
root->fs_info->generation++;
- cur_trans->num_writers = 1;
+ atomic_set(&cur_trans->num_writers, 1);
cur_trans->num_joined = 0;
cur_trans->transid = root->fs_info->generation;
init_waitqueue_head(&cur_trans->writer_wait);
init_waitqueue_head(&cur_trans->commit_wait);
cur_trans->in_commit = 0;
cur_trans->blocked = 0;
- cur_trans->use_count = 1;
+ atomic_set(&cur_trans->use_count, 1);
cur_trans->commit_done = 0;
cur_trans->start_time = get_seconds();
@@ -87,7 +86,7 @@ static noinline int join_transaction(struct btrfs_root *root)
root->fs_info->running_transaction = cur_trans;
spin_unlock(&root->fs_info->new_trans_lock);
} else {
- cur_trans->num_writers++;
+ atomic_inc(&cur_trans->num_writers);
cur_trans->num_joined++;
}
@@ -144,7 +143,7 @@ static void wait_current_trans(struct btrfs_root *root)
cur_trans = root->fs_info->running_transaction;
if (cur_trans && cur_trans->blocked) {
DEFINE_WAIT(wait);
- cur_trans->use_count++;
+ atomic_inc(&cur_trans->use_count);
while (1) {
prepare_to_wait(&root->fs_info->transaction_wait, &wait,
TASK_UNINTERRUPTIBLE);
@@ -180,6 +179,7 @@ static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
{
struct btrfs_trans_handle *h;
struct btrfs_transaction *cur_trans;
+ int retries = 0;
int ret;
if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
@@ -195,10 +195,15 @@ again:
wait_current_trans(root);
ret = join_transaction(root);
- BUG_ON(ret);
+ if (ret < 0) {
+ kmem_cache_free(btrfs_trans_handle_cachep, h);
+ if (type != TRANS_JOIN_NOLOCK)
+ mutex_unlock(&root->fs_info->trans_mutex);
+ return ERR_PTR(ret);
+ }
cur_trans = root->fs_info->running_transaction;
- cur_trans->use_count++;
+ atomic_inc(&cur_trans->use_count);
if (type != TRANS_JOIN_NOLOCK)
mutex_unlock(&root->fs_info->trans_mutex);
@@ -218,10 +223,18 @@ again:
if (num_items > 0) {
ret = btrfs_trans_reserve_metadata(h, root, num_items);
- if (ret == -EAGAIN) {
+ if (ret == -EAGAIN && !retries) {
+ retries++;
btrfs_commit_transaction(h, root);
goto again;
+ } else if (ret == -EAGAIN) {
+ /*
+ * We have already retried and got EAGAIN, so really we
+ * don't have space, so set ret to -ENOSPC.
+ */
+ ret = -ENOSPC;
}
+
if (ret < 0) {
btrfs_end_transaction(h, root);
return ERR_PTR(ret);
@@ -321,7 +334,7 @@ int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
goto out_unlock; /* nothing committing|committed */
}
- cur_trans->use_count++;
+ atomic_inc(&cur_trans->use_count);
mutex_unlock(&root->fs_info->trans_mutex);
wait_for_commit(root, cur_trans);
@@ -451,18 +464,14 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
wake_up_process(info->transaction_kthread);
}
- if (lock)
- mutex_lock(&info->trans_mutex);
WARN_ON(cur_trans != info->running_transaction);
- WARN_ON(cur_trans->num_writers < 1);
- cur_trans->num_writers--;
+ WARN_ON(atomic_read(&cur_trans->num_writers) < 1);
+ atomic_dec(&cur_trans->num_writers);
smp_mb();
if (waitqueue_active(&cur_trans->writer_wait))
wake_up(&cur_trans->writer_wait);
put_transaction(cur_trans);
- if (lock)
- mutex_unlock(&info->trans_mutex);
if (current->journal_info == trans)
current->journal_info = NULL;
@@ -970,6 +979,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
record_root_in_trans(trans, root);
btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
+ btrfs_check_and_init_root_item(new_root_item);
root_flags = btrfs_root_flags(new_root_item);
if (pending->readonly)
@@ -1156,7 +1166,8 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
struct btrfs_transaction *cur_trans;
ac = kmalloc(sizeof(*ac), GFP_NOFS);
- BUG_ON(!ac);
+ if (!ac)
+ return -ENOMEM;
INIT_DELAYED_WORK(&ac->work, do_async_commit);
ac->root = root;
@@ -1170,7 +1181,7 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
/* take transaction reference */
mutex_lock(&root->fs_info->trans_mutex);
cur_trans = trans->transaction;
- cur_trans->use_count++;
+ atomic_inc(&cur_trans->use_count);
mutex_unlock(&root->fs_info->trans_mutex);
btrfs_end_transaction(trans, root);
@@ -1229,7 +1240,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
mutex_lock(&root->fs_info->trans_mutex);
if (cur_trans->in_commit) {
- cur_trans->use_count++;
+ atomic_inc(&cur_trans->use_count);
mutex_unlock(&root->fs_info->trans_mutex);
btrfs_end_transaction(trans, root);
@@ -1251,7 +1262,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
prev_trans = list_entry(cur_trans->list.prev,
struct btrfs_transaction, list);
if (!prev_trans->commit_done) {
- prev_trans->use_count++;
+ atomic_inc(&prev_trans->use_count);
mutex_unlock(&root->fs_info->trans_mutex);
wait_for_commit(root, prev_trans);
@@ -1292,14 +1303,14 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
TASK_UNINTERRUPTIBLE);
smp_mb();
- if (cur_trans->num_writers > 1)
+ if (atomic_read(&cur_trans->num_writers) > 1)
schedule_timeout(MAX_SCHEDULE_TIMEOUT);
else if (should_grow)
schedule_timeout(1);
mutex_lock(&root->fs_info->trans_mutex);
finish_wait(&cur_trans->writer_wait, &wait);
- } while (cur_trans->num_writers > 1 ||
+ } while (atomic_read(&cur_trans->num_writers) > 1 ||
(should_grow && cur_trans->num_joined != joined));
ret = create_pending_snapshots(trans, root->fs_info);
@@ -1386,9 +1397,12 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
wake_up(&cur_trans->commit_wait);
+ list_del_init(&cur_trans->list);
put_transaction(cur_trans);
put_transaction(cur_trans);
+ trace_btrfs_transaction_commit(root);
+
mutex_unlock(&root->fs_info->trans_mutex);
if (current->journal_info == trans)
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index 229a594..e441acc 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -27,11 +27,11 @@ struct btrfs_transaction {
* total writers in this transaction, it must be zero before the
* transaction can end
*/
- unsigned long num_writers;
+ atomic_t num_writers;
unsigned long num_joined;
int in_commit;
- int use_count;
+ atomic_t use_count;
int commit_done;
int blocked;
struct list_head list;
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index a4bbb85..c50271a 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -799,12 +799,12 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
struct inode *dir;
int ret;
struct btrfs_inode_ref *ref;
- struct btrfs_dir_item *di;
struct inode *inode;
char *name;
int namelen;
unsigned long ref_ptr;
unsigned long ref_end;
+ int search_done = 0;
/*
* it is possible that we didn't log all the parent directories
@@ -845,7 +845,10 @@ again:
* existing back reference, and we don't want to create
* dangling pointers in the directory.
*/
-conflict_again:
+
+ if (search_done)
+ goto insert;
+
ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
if (ret == 0) {
char *victim_name;
@@ -886,37 +889,21 @@ conflict_again:
ret = btrfs_unlink_inode(trans, root, dir,
inode, victim_name,
victim_name_len);
- kfree(victim_name);
- btrfs_release_path(root, path);
- goto conflict_again;
}
kfree(victim_name);
ptr = (unsigned long)(victim_ref + 1) + victim_name_len;
}
BUG_ON(ret);
- }
- btrfs_release_path(root, path);
-
- /* look for a conflicting sequence number */
- di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino,
- btrfs_inode_ref_index(eb, ref),
- name, namelen, 0);
- if (di && !IS_ERR(di)) {
- ret = drop_one_dir_item(trans, root, path, dir, di);
- BUG_ON(ret);
- }
- btrfs_release_path(root, path);
-
- /* look for a conflicting name */
- di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
- name, namelen, 0);
- if (di && !IS_ERR(di)) {
- ret = drop_one_dir_item(trans, root, path, dir, di);
- BUG_ON(ret);
+ /*
+ * NOTE: we have searched root tree and checked the
+ * coresponding ref, it does not need to check again.
+ */
+ search_done = 1;
}
btrfs_release_path(root, path);
+insert:
/* insert our name */
ret = btrfs_add_link(trans, dir, inode, name, namelen, 0,
btrfs_inode_ref_index(eb, ref));
@@ -1286,6 +1273,8 @@ static noinline int replay_one_dir_item(struct btrfs_trans_handle *trans,
ptr_end = ptr + item_size;
while (ptr < ptr_end) {
di = (struct btrfs_dir_item *)ptr;
+ if (verify_dir_item(root, eb, di))
+ return -EIO;
name_len = btrfs_dir_name_len(eb, di);
ret = replay_one_name(trans, root, path, eb, di, key);
BUG_ON(ret);
@@ -1412,6 +1401,11 @@ again:
ptr_end = ptr + item_size;
while (ptr < ptr_end) {
di = (struct btrfs_dir_item *)ptr;
+ if (verify_dir_item(root, eb, di)) {
+ ret = -EIO;
+ goto out;
+ }
+
name_len = btrfs_dir_name_len(eb, di);
name = kmalloc(name_len, GFP_NOFS);
if (!name) {
@@ -1821,7 +1815,8 @@ static int walk_log_tree(struct btrfs_trans_handle *trans,
int orig_level;
path = btrfs_alloc_path();
- BUG_ON(!path);
+ if (!path)
+ return -ENOMEM;
level = btrfs_header_level(log->node);
orig_level = level;
@@ -3107,9 +3102,11 @@ int btrfs_recover_log_trees(struct btrfs_root *log_root_tree)
.stage = 0,
};
- fs_info->log_root_recovering = 1;
path = btrfs_alloc_path();
- BUG_ON(!path);
+ if (!path)
+ return -ENOMEM;
+
+ fs_info->log_root_recovering = 1;
trans = btrfs_start_transaction(fs_info->tree_root, 0);
BUG_ON(IS_ERR(trans));
@@ -3117,7 +3114,8 @@ int btrfs_recover_log_trees(struct btrfs_root *log_root_tree)
wc.trans = trans;
wc.pin = 1;
- walk_log_tree(trans, log_root_tree, &wc);
+ ret = walk_log_tree(trans, log_root_tree, &wc);
+ BUG_ON(ret);
again:
key.objectid = BTRFS_TREE_LOG_OBJECTID;
@@ -3141,8 +3139,7 @@ again:
log = btrfs_read_fs_root_no_radix(log_root_tree,
&found_key);
- BUG_ON(!log);
-
+ BUG_ON(IS_ERR(log));
tmp_key.objectid = found_key.offset;
tmp_key.type = BTRFS_ROOT_ITEM_KEY;
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index dd13eb8..309a57b 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -33,17 +33,6 @@
#include "volumes.h"
#include "async-thread.h"
-struct map_lookup {
- u64 type;
- int io_align;
- int io_width;
- int stripe_len;
- int sector_size;
- int num_stripes;
- int sub_stripes;
- struct btrfs_bio_stripe stripes[];
-};
-
static int init_first_rw_device(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_device *device);
@@ -162,7 +151,6 @@ static noinline int run_scheduled_bios(struct btrfs_device *device)
struct bio *cur;
int again = 0;
unsigned long num_run;
- unsigned long num_sync_run;
unsigned long batch_run = 0;
unsigned long limit;
unsigned long last_waited = 0;
@@ -173,11 +161,6 @@ static noinline int run_scheduled_bios(struct btrfs_device *device)
limit = btrfs_async_submit_limit(fs_info);
limit = limit * 2 / 3;
- /* we want to make sure that every time we switch from the sync
- * list to the normal list, we unplug
- */
- num_sync_run = 0;
-
loop:
spin_lock(&device->io_lock);
@@ -223,15 +206,6 @@ loop_lock:
spin_unlock(&device->io_lock);
- /*
- * if we're doing the regular priority list, make sure we unplug
- * for any high prio bios we've sent down
- */
- if (pending_bios == &device->pending_bios && num_sync_run > 0) {
- num_sync_run = 0;
- blk_run_backing_dev(bdi, NULL);
- }
-
while (pending) {
rmb();
@@ -259,19 +233,11 @@ loop_lock:
BUG_ON(atomic_read(&cur->bi_cnt) == 0);
- if (cur->bi_rw & REQ_SYNC)
- num_sync_run++;
-
submit_bio(cur->bi_rw, cur);
num_run++;
batch_run++;
- if (need_resched()) {
- if (num_sync_run) {
- blk_run_backing_dev(bdi, NULL);
- num_sync_run = 0;
- }
+ if (need_resched())
cond_resched();
- }
/*
* we made progress, there is more work to do and the bdi
@@ -304,13 +270,8 @@ loop_lock:
* against it before looping
*/
last_waited = ioc->last_waited;
- if (need_resched()) {
- if (num_sync_run) {
- blk_run_backing_dev(bdi, NULL);
- num_sync_run = 0;
- }
+ if (need_resched())
cond_resched();
- }
continue;
}
spin_lock(&device->io_lock);
@@ -323,22 +284,6 @@ loop_lock:
}
}
- if (num_sync_run) {
- num_sync_run = 0;
- blk_run_backing_dev(bdi, NULL);
- }
- /*
- * IO has already been through a long path to get here. Checksumming,
- * async helper threads, perhaps compression. We've done a pretty
- * good job of collecting a batch of IO and should just unplug
- * the device right away.
- *
- * This will help anyone who is waiting on the IO, they might have
- * already unplugged, but managed to do so before the bio they
- * cared about found its way down here.
- */
- blk_run_backing_dev(bdi, NULL);
-
cond_resched();
if (again)
goto loop;
@@ -1923,6 +1868,8 @@ static int btrfs_relocate_chunk(struct btrfs_root *root,
BUG_ON(ret);
+ trace_btrfs_chunk_free(root, map, chunk_offset, em->len);
+
if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
BUG_ON(ret);
@@ -2650,6 +2597,8 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
*num_bytes = chunk_bytes_by_type(type, calc_size,
map->num_stripes, sub_stripes);
+ trace_btrfs_chunk_alloc(info->chunk_root, map, start, *num_bytes);
+
em = alloc_extent_map(GFP_NOFS);
if (!em) {
ret = -ENOMEM;
@@ -2758,6 +2707,7 @@ static int __finish_chunk_alloc(struct btrfs_trans_handle *trans,
item_size);
BUG_ON(ret);
}
+
kfree(chunk);
return 0;
}
@@ -2955,14 +2905,17 @@ static int find_live_mirror(struct map_lookup *map, int first, int num,
static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
u64 logical, u64 *length,
struct btrfs_multi_bio **multi_ret,
- int mirror_num, struct page *unplug_page)
+ int mirror_num)
{
struct extent_map *em;
struct map_lookup *map;
struct extent_map_tree *em_tree = &map_tree->map_tree;
u64 offset;
u64 stripe_offset;
+ u64 stripe_end_offset;
u64 stripe_nr;
+ u64 stripe_nr_orig;
+ u64 stripe_nr_end;
int stripes_allocated = 8;
int stripes_required = 1;
int stripe_index;
@@ -2971,7 +2924,7 @@ static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
int max_errors = 0;
struct btrfs_multi_bio *multi = NULL;
- if (multi_ret && !(rw & REQ_WRITE))
+ if (multi_ret && !(rw & (REQ_WRITE | REQ_DISCARD)))
stripes_allocated = 1;
again:
if (multi_ret) {
@@ -2987,11 +2940,6 @@ again:
em = lookup_extent_mapping(em_tree, logical, *length);
read_unlock(&em_tree->lock);
- if (!em && unplug_page) {
- kfree(multi);
- return 0;
- }
-
if (!em) {
printk(KERN_CRIT "unable to find logical %llu len %llu\n",
(unsigned long long)logical,
@@ -3017,7 +2965,15 @@ again:
max_errors = 1;
}
}
- if (multi_ret && (rw & REQ_WRITE) &&
+ if (rw & REQ_DISCARD) {
+ if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
+ BTRFS_BLOCK_GROUP_RAID1 |
+ BTRFS_BLOCK_GROUP_DUP |
+ BTRFS_BLOCK_GROUP_RAID10)) {
+ stripes_required = map->num_stripes;
+ }
+ }
+ if (multi_ret && (rw & (REQ_WRITE | REQ_DISCARD)) &&
stripes_allocated < stripes_required) {
stripes_allocated = map->num_stripes;
free_extent_map(em);
@@ -3037,23 +2993,37 @@ again:
/* stripe_offset is the offset of this block in its stripe*/
stripe_offset = offset - stripe_offset;
- if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 |
- BTRFS_BLOCK_GROUP_RAID10 |
- BTRFS_BLOCK_GROUP_DUP)) {
+ if (rw & REQ_DISCARD)
+ *length = min_t(u64, em->len - offset, *length);
+ else if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
+ BTRFS_BLOCK_GROUP_RAID1 |
+ BTRFS_BLOCK_GROUP_RAID10 |
+ BTRFS_BLOCK_GROUP_DUP)) {
/* we limit the length of each bio to what fits in a stripe */
*length = min_t(u64, em->len - offset,
- map->stripe_len - stripe_offset);
+ map->stripe_len - stripe_offset);
} else {
*length = em->len - offset;
}
- if (!multi_ret && !unplug_page)
+ if (!multi_ret)
goto out;
num_stripes = 1;
stripe_index = 0;
- if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
- if (unplug_page || (rw & REQ_WRITE))
+ stripe_nr_orig = stripe_nr;
+ stripe_nr_end = (offset + *length + map->stripe_len - 1) &
+ (~(map->stripe_len - 1));
+ do_div(stripe_nr_end, map->stripe_len);
+ stripe_end_offset = stripe_nr_end * map->stripe_len -
+ (offset + *length);
+ if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
+ if (rw & REQ_DISCARD)
+ num_stripes = min_t(u64, map->num_stripes,
+ stripe_nr_end - stripe_nr_orig);
+ stripe_index = do_div(stripe_nr, map->num_stripes);
+ } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
+ if (rw & (REQ_WRITE | REQ_DISCARD))
num_stripes = map->num_stripes;
else if (mirror_num)
stripe_index = mirror_num - 1;
@@ -3064,7 +3034,7 @@ again:
}
} else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
- if (rw & REQ_WRITE)
+ if (rw & (REQ_WRITE | REQ_DISCARD))
num_stripes = map->num_stripes;
else if (mirror_num)
stripe_index = mirror_num - 1;
@@ -3075,8 +3045,12 @@ again:
stripe_index = do_div(stripe_nr, factor);
stripe_index *= map->sub_stripes;
- if (unplug_page || (rw & REQ_WRITE))
+ if (rw & REQ_WRITE)
num_stripes = map->sub_stripes;
+ else if (rw & REQ_DISCARD)
+ num_stripes = min_t(u64, map->sub_stripes *
+ (stripe_nr_end - stripe_nr_orig),
+ map->num_stripes);
else if (mirror_num)
stripe_index += mirror_num - 1;
else {
@@ -3094,24 +3068,101 @@ again:
}
BUG_ON(stripe_index >= map->num_stripes);
- for (i = 0; i < num_stripes; i++) {
- if (unplug_page) {
- struct btrfs_device *device;
- struct backing_dev_info *bdi;
-
- device = map->stripes[stripe_index].dev;
- if (device->bdev) {
- bdi = blk_get_backing_dev_info(device->bdev);
- if (bdi->unplug_io_fn)
- bdi->unplug_io_fn(bdi, unplug_page);
- }
- } else {
+ if (rw & REQ_DISCARD) {
+ for (i = 0; i < num_stripes; i++) {
multi->stripes[i].physical =
map->stripes[stripe_index].physical +
stripe_offset + stripe_nr * map->stripe_len;
multi->stripes[i].dev = map->stripes[stripe_index].dev;
+
+ if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
+ u64 stripes;
+ u32 last_stripe = 0;
+ int j;
+
+ div_u64_rem(stripe_nr_end - 1,
+ map->num_stripes,
+ &last_stripe);
+
+ for (j = 0; j < map->num_stripes; j++) {
+ u32 test;
+
+ div_u64_rem(stripe_nr_end - 1 - j,
+ map->num_stripes, &test);
+ if (test == stripe_index)
+ break;
+ }
+ stripes = stripe_nr_end - 1 - j;
+ do_div(stripes, map->num_stripes);
+ multi->stripes[i].length = map->stripe_len *
+ (stripes - stripe_nr + 1);
+
+ if (i == 0) {
+ multi->stripes[i].length -=
+ stripe_offset;
+ stripe_offset = 0;
+ }
+ if (stripe_index == last_stripe)
+ multi->stripes[i].length -=
+ stripe_end_offset;
+ } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
+ u64 stripes;
+ int j;
+ int factor = map->num_stripes /
+ map->sub_stripes;
+ u32 last_stripe = 0;
+
+ div_u64_rem(stripe_nr_end - 1,
+ factor, &last_stripe);
+ last_stripe *= map->sub_stripes;
+
+ for (j = 0; j < factor; j++) {
+ u32 test;
+
+ div_u64_rem(stripe_nr_end - 1 - j,
+ factor, &test);
+
+ if (test ==
+ stripe_index / map->sub_stripes)
+ break;
+ }
+ stripes = stripe_nr_end - 1 - j;
+ do_div(stripes, factor);
+ multi->stripes[i].length = map->stripe_len *
+ (stripes - stripe_nr + 1);
+
+ if (i < map->sub_stripes) {
+ multi->stripes[i].length -=
+ stripe_offset;
+ if (i == map->sub_stripes - 1)
+ stripe_offset = 0;
+ }
+ if (stripe_index >= last_stripe &&
+ stripe_index <= (last_stripe +
+ map->sub_stripes - 1)) {
+ multi->stripes[i].length -=
+ stripe_end_offset;
+ }
+ } else
+ multi->stripes[i].length = *length;
+
+ stripe_index++;
+ if (stripe_index == map->num_stripes) {
+ /* This could only happen for RAID0/10 */
+ stripe_index = 0;
+ stripe_nr++;
+ }
+ }
+ } else {
+ for (i = 0; i < num_stripes; i++) {
+ multi->stripes[i].physical =
+ map->stripes[stripe_index].physical +
+ stripe_offset +
+ stripe_nr * map->stripe_len;
+ multi->stripes[i].dev =
+ map->stripes[stripe_index].dev;
+ stripe_index++;
}
- stripe_index++;
}
if (multi_ret) {
*multi_ret = multi;
@@ -3128,7 +3179,7 @@ int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
struct btrfs_multi_bio **multi_ret, int mirror_num)
{
return __btrfs_map_block(map_tree, rw, logical, length, multi_ret,
- mirror_num, NULL);
+ mirror_num);
}
int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
@@ -3196,14 +3247,6 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
return 0;
}
-int btrfs_unplug_page(struct btrfs_mapping_tree *map_tree,
- u64 logical, struct page *page)
-{
- u64 length = PAGE_CACHE_SIZE;
- return __btrfs_map_block(map_tree, READ, logical, &length,
- NULL, 0, page);
-}
-
static void end_bio_multi_stripe(struct bio *bio, int err)
{
struct btrfs_multi_bio *multi = bio->bi_private;
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index 7fb59d4..cc2eada 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -126,6 +126,7 @@ struct btrfs_fs_devices {
struct btrfs_bio_stripe {
struct btrfs_device *dev;
u64 physical;
+ u64 length; /* only used for discard mappings */
};
struct btrfs_multi_bio {
@@ -145,6 +146,17 @@ struct btrfs_device_info {
u64 max_avail;
};
+struct map_lookup {
+ u64 type;
+ int io_align;
+ int io_width;
+ int stripe_len;
+ int sector_size;
+ int num_stripes;
+ int sub_stripes;
+ struct btrfs_bio_stripe stripes[];
+};
+
/* Used to sort the devices by max_avail(descending sort) */
int btrfs_cmp_device_free_bytes(const void *dev_info1, const void *dev_info2);
diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
index d779cef..cfd6605 100644
--- a/fs/btrfs/xattr.c
+++ b/fs/btrfs/xattr.c
@@ -180,11 +180,10 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
struct btrfs_path *path;
struct extent_buffer *leaf;
struct btrfs_dir_item *di;
- int ret = 0, slot, advance;
+ int ret = 0, slot;
size_t total_size = 0, size_left = size;
unsigned long name_ptr;
size_t name_len;
- u32 nritems;
/*
* ok we want all objects associated with this id.
@@ -204,34 +203,24 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
goto err;
- advance = 0;
+
while (1) {
leaf = path->nodes[0];
- nritems = btrfs_header_nritems(leaf);
slot = path->slots[0];
/* this is where we start walking through the path */
- if (advance || slot >= nritems) {
+ if (slot >= btrfs_header_nritems(leaf)) {
/*
* if we've reached the last slot in this leaf we need
* to go to the next leaf and reset everything
*/
- if (slot >= nritems-1) {
- ret = btrfs_next_leaf(root, path);
- if (ret)
- break;
- leaf = path->nodes[0];
- nritems = btrfs_header_nritems(leaf);
- slot = path->slots[0];
- } else {
- /*
- * just walking through the slots on this leaf
- */
- slot++;
- path->slots[0]++;
- }
+ ret = btrfs_next_leaf(root, path);
+ if (ret < 0)
+ goto err;
+ else if (ret > 0)
+ break;
+ continue;
}
- advance = 1;
btrfs_item_key_to_cpu(leaf, &found_key, slot);
@@ -242,13 +231,15 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
break;
di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
+ if (verify_dir_item(root, leaf, di))
+ continue;
name_len = btrfs_dir_name_len(leaf, di);
total_size += name_len + 1;
/* we are just looking for how big our buffer needs to be */
if (!size)
- continue;
+ goto next;
if (!buffer || (name_len + 1) > size_left) {
ret = -ERANGE;
@@ -261,6 +252,8 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
size_left -= name_len + 1;
buffer += name_len + 1;
+next:
+ path->slots[0]++;
}
ret = total_size;
diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c
index f5ec2d4..faccd47 100644
--- a/fs/btrfs/zlib.c
+++ b/fs/btrfs/zlib.c
@@ -57,7 +57,8 @@ static struct list_head *zlib_alloc_workspace(void)
if (!workspace)
return ERR_PTR(-ENOMEM);
- workspace->def_strm.workspace = vmalloc(zlib_deflate_workspacesize());
+ workspace->def_strm.workspace = vmalloc(zlib_deflate_workspacesize(
+ MAX_WBITS, MAX_MEM_LEVEL));
workspace->inf_strm.workspace = vmalloc(zlib_inflate_workspacesize());
workspace->buf = kmalloc(PAGE_CACHE_SIZE, GFP_NOFS);
if (!workspace->def_strm.workspace ||
diff --git a/fs/buffer.c b/fs/buffer.c
index 2219a76..a08bb8e 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -54,23 +54,15 @@ init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
}
EXPORT_SYMBOL(init_buffer);
-static int sync_buffer(void *word)
+static int sleep_on_buffer(void *word)
{
- struct block_device *bd;
- struct buffer_head *bh
- = container_of(word, struct buffer_head, b_state);
-
- smp_mb();
- bd = bh->b_bdev;
- if (bd)
- blk_run_address_space(bd->bd_inode->i_mapping);
io_schedule();
return 0;
}
void __lock_buffer(struct buffer_head *bh)
{
- wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
+ wait_on_bit_lock(&bh->b_state, BH_Lock, sleep_on_buffer,
TASK_UNINTERRUPTIBLE);
}
EXPORT_SYMBOL(__lock_buffer);
@@ -90,7 +82,7 @@ EXPORT_SYMBOL(unlock_buffer);
*/
void __wait_on_buffer(struct buffer_head * bh)
{
- wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
+ wait_on_bit(&bh->b_state, BH_Lock, sleep_on_buffer, TASK_UNINTERRUPTIBLE);
}
EXPORT_SYMBOL(__wait_on_buffer);
@@ -749,10 +741,12 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
{
struct buffer_head *bh;
struct list_head tmp;
- struct address_space *mapping, *prev_mapping = NULL;
+ struct address_space *mapping;
int err = 0, err2;
+ struct blk_plug plug;
INIT_LIST_HEAD(&tmp);
+ blk_start_plug(&plug);
spin_lock(lock);
while (!list_empty(list)) {
@@ -775,7 +769,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
* still in flight on potentially older
* contents.
*/
- write_dirty_buffer(bh, WRITE_SYNC_PLUG);
+ write_dirty_buffer(bh, WRITE_SYNC);
/*
* Kick off IO for the previous mapping. Note
@@ -783,16 +777,16 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
* wait_on_buffer() will do that for us
* through sync_buffer().
*/
- if (prev_mapping && prev_mapping != mapping)
- blk_run_address_space(prev_mapping);
- prev_mapping = mapping;
-
brelse(bh);
spin_lock(lock);
}
}
}
+ spin_unlock(lock);
+ blk_finish_plug(&plug);
+ spin_lock(lock);
+
while (!list_empty(&tmp)) {
bh = BH_ENTRY(tmp.prev);
get_bh(bh);
@@ -1144,7 +1138,7 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size)
* inode list.
*
* mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
- * mapping->tree_lock and the global inode_lock.
+ * mapping->tree_lock and mapping->host->i_lock.
*/
void mark_buffer_dirty(struct buffer_head *bh)
{
@@ -1614,14 +1608,8 @@ EXPORT_SYMBOL(unmap_underlying_metadata);
* prevents this contention from occurring.
*
* If block_write_full_page() is called with wbc->sync_mode ==
- * WB_SYNC_ALL, the writes are posted using WRITE_SYNC_PLUG; this
- * causes the writes to be flagged as synchronous writes, but the
- * block device queue will NOT be unplugged, since usually many pages
- * will be pushed to the out before the higher-level caller actually
- * waits for the writes to be completed. The various wait functions,
- * such as wait_on_writeback_range() will ultimately call sync_page()
- * which will ultimately call blk_run_backing_dev(), which will end up
- * unplugging the device queue.
+ * WB_SYNC_ALL, the writes are posted using WRITE_SYNC; this
+ * causes the writes to be flagged as synchronous writes.
*/
static int __block_write_full_page(struct inode *inode, struct page *page,
get_block_t *get_block, struct writeback_control *wbc,
@@ -1634,7 +1622,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
const unsigned blocksize = 1 << inode->i_blkbits;
int nr_underway = 0;
int write_op = (wbc->sync_mode == WB_SYNC_ALL ?
- WRITE_SYNC_PLUG : WRITE);
+ WRITE_SYNC : WRITE);
BUG_ON(!PageLocked(page));
@@ -3138,17 +3126,6 @@ out:
}
EXPORT_SYMBOL(try_to_free_buffers);
-void block_sync_page(struct page *page)
-{
- struct address_space *mapping;
-
- smp_mb();
- mapping = page_mapping(page);
- if (mapping)
- blk_run_backing_dev(mapping->backing_dev_info, page);
-}
-EXPORT_SYMBOL(block_sync_page);
-
/*
* There are no bdflush tunables left. But distributions are
* still running obsolete flush daemons, so we terminate them here.
diff --git a/fs/cachefiles/interface.c b/fs/cachefiles/interface.c
index 37fe101..1064805 100644
--- a/fs/cachefiles/interface.c
+++ b/fs/cachefiles/interface.c
@@ -197,7 +197,7 @@ struct fscache_object *cachefiles_grab_object(struct fscache_object *_object)
}
/*
- * update the auxilliary data for an object object on disk
+ * update the auxiliary data for an object object on disk
*/
static void cachefiles_update_object(struct fscache_object *_object)
{
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 561438b..e159c52 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -24,7 +24,7 @@
* context needs to be associated with the osd write during writeback.
*
* Similarly, struct ceph_inode_info maintains a set of counters to
- * count dirty pages on the inode. In the absense of snapshots,
+ * count dirty pages on the inode. In the absence of snapshots,
* i_wrbuffer_ref == i_wrbuffer_ref_head == the dirty page count.
*
* When a snapshot is taken (that is, when the client receives
@@ -92,7 +92,7 @@ static int ceph_set_page_dirty(struct page *page)
ci->i_head_snapc = ceph_get_snap_context(snapc);
++ci->i_wrbuffer_ref_head;
if (ci->i_wrbuffer_ref == 0)
- igrab(inode);
+ ihold(inode);
++ci->i_wrbuffer_ref;
dout("%p set_page_dirty %p idx %lu head %d/%d -> %d/%d "
"snapc %p seq %lld (%d snaps)\n",
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 6b61ded..5323c33 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -765,7 +765,7 @@ int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int touch)
if (touch) {
struct rb_node *q;
- /* touch this + preceeding caps */
+ /* touch this + preceding caps */
__touch_cap(cap);
for (q = rb_first(&ci->i_caps); q != p;
q = rb_next(q)) {
diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c
index 08f65fa..0dba691 100644
--- a/fs/ceph/debugfs.c
+++ b/fs/ceph/debugfs.c
@@ -210,8 +210,6 @@ int ceph_fs_debugfs_init(struct ceph_fs_client *fsc)
if (!fsc->debugfs_congestion_kb)
goto out;
- dout("a\n");
-
snprintf(name, sizeof(name), "../../bdi/%s",
dev_name(fsc->backing_dev_info.dev));
fsc->debugfs_bdi =
@@ -221,7 +219,6 @@ int ceph_fs_debugfs_init(struct ceph_fs_client *fsc)
if (!fsc->debugfs_bdi)
goto out;
- dout("b\n");
fsc->debugfs_mdsmap = debugfs_create_file("mdsmap",
0600,
fsc->client->debugfs_dir,
@@ -230,7 +227,6 @@ int ceph_fs_debugfs_init(struct ceph_fs_client *fsc)
if (!fsc->debugfs_mdsmap)
goto out;
- dout("ca\n");
fsc->debugfs_mdsc = debugfs_create_file("mdsc",
0600,
fsc->client->debugfs_dir,
@@ -239,7 +235,6 @@ int ceph_fs_debugfs_init(struct ceph_fs_client *fsc)
if (!fsc->debugfs_mdsc)
goto out;
- dout("da\n");
fsc->debugfs_caps = debugfs_create_file("caps",
0400,
fsc->client->debugfs_dir,
@@ -248,7 +243,6 @@ int ceph_fs_debugfs_init(struct ceph_fs_client *fsc)
if (!fsc->debugfs_caps)
goto out;
- dout("ea\n");
fsc->debugfs_dentry_lru = debugfs_create_file("dentry_lru",
0600,
fsc->client->debugfs_dir,
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index ebafa65..1a867a3 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -161,7 +161,7 @@ more:
filp->f_pos = di->offset;
err = filldir(dirent, dentry->d_name.name,
dentry->d_name.len, di->offset,
- dentry->d_inode->i_ino,
+ ceph_translate_ino(dentry->d_sb, dentry->d_inode->i_ino),
dentry->d_inode->i_mode >> 12);
if (last) {
@@ -245,15 +245,17 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
dout("readdir off 0 -> '.'\n");
if (filldir(dirent, ".", 1, ceph_make_fpos(0, 0),
- inode->i_ino, inode->i_mode >> 12) < 0)
+ ceph_translate_ino(inode->i_sb, inode->i_ino),
+ inode->i_mode >> 12) < 0)
return 0;
filp->f_pos = 1;
off = 1;
}
if (filp->f_pos == 1) {
+ ino_t ino = filp->f_dentry->d_parent->d_inode->i_ino;
dout("readdir off 1 -> '..'\n");
if (filldir(dirent, "..", 2, ceph_make_fpos(0, 1),
- filp->f_dentry->d_parent->d_inode->i_ino,
+ ceph_translate_ino(inode->i_sb, ino),
inode->i_mode >> 12) < 0)
return 0;
filp->f_pos = 2;
@@ -377,7 +379,8 @@ more:
if (filldir(dirent,
rinfo->dir_dname[off - fi->offset],
rinfo->dir_dname_len[off - fi->offset],
- pos, ino, ftype) < 0) {
+ pos,
+ ceph_translate_ino(inode->i_sb, ino), ftype) < 0) {
dout("filldir stopping us...\n");
return 0;
}
@@ -1024,14 +1027,13 @@ out_touch:
}
/*
- * When a dentry is released, clear the dir I_COMPLETE if it was part
- * of the current dir gen or if this is in the snapshot namespace.
+ * Release our ceph_dentry_info.
*/
-static void ceph_dentry_release(struct dentry *dentry)
+static void ceph_d_release(struct dentry *dentry)
{
struct ceph_dentry_info *di = ceph_dentry(dentry);
- dout("dentry_release %p\n", dentry);
+ dout("d_release %p\n", dentry);
if (di) {
ceph_dentry_lru_del(dentry);
if (di->lease_session)
@@ -1256,14 +1258,14 @@ const struct inode_operations ceph_dir_iops = {
const struct dentry_operations ceph_dentry_ops = {
.d_revalidate = ceph_d_revalidate,
- .d_release = ceph_dentry_release,
+ .d_release = ceph_d_release,
};
const struct dentry_operations ceph_snapdir_dentry_ops = {
.d_revalidate = ceph_snapdir_d_revalidate,
- .d_release = ceph_dentry_release,
+ .d_release = ceph_d_release,
};
const struct dentry_operations ceph_snap_dentry_ops = {
- .d_release = ceph_dentry_release,
+ .d_release = ceph_d_release,
};
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 7d0e4a8..159b512 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -564,11 +564,19 @@ more:
* start_request so that a tid has been assigned.
*/
spin_lock(&ci->i_unsafe_lock);
- list_add(&req->r_unsafe_item, &ci->i_unsafe_writes);
+ list_add_tail(&req->r_unsafe_item,
+ &ci->i_unsafe_writes);
spin_unlock(&ci->i_unsafe_lock);
ceph_get_cap_refs(ci, CEPH_CAP_FILE_WR);
}
+
ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
+ if (ret < 0 && req->r_safe_callback) {
+ spin_lock(&ci->i_unsafe_lock);
+ list_del_init(&req->r_unsafe_item);
+ spin_unlock(&ci->i_unsafe_lock);
+ ceph_put_cap_refs(ci, CEPH_CAP_FILE_WR);
+ }
}
if (file->f_flags & O_DIRECT)
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 193bfa5..b54c97da 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -36,6 +36,13 @@ static void ceph_vmtruncate_work(struct work_struct *work);
/*
* find or create an inode, given the ceph ino number
*/
+static int ceph_set_ino_cb(struct inode *inode, void *data)
+{
+ ceph_inode(inode)->i_vino = *(struct ceph_vino *)data;
+ inode->i_ino = ceph_vino_to_ino(*(struct ceph_vino *)data);
+ return 0;
+}
+
struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino)
{
struct inode *inode;
@@ -1030,9 +1037,6 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
dout("fill_trace doing d_move %p -> %p\n",
req->r_old_dentry, dn);
- /* d_move screws up d_subdirs order */
- ceph_i_clear(dir, CEPH_I_COMPLETE);
-
d_move(req->r_old_dentry, dn);
dout(" src %p '%.*s' dst %p '%.*s'\n",
req->r_old_dentry,
@@ -1044,12 +1048,15 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
rehashing bug in vfs_rename_dir */
ceph_invalidate_dentry_lease(dn);
- /* take overwritten dentry's readdir offset */
- dout("dn %p gets %p offset %lld (old offset %lld)\n",
- req->r_old_dentry, dn, ceph_dentry(dn)->offset,
+ /*
+ * d_move() puts the renamed dentry at the end of
+ * d_subdirs. We need to assign it an appropriate
+ * directory offset so we can behave when holding
+ * I_COMPLETE.
+ */
+ ceph_set_dentry_offset(req->r_old_dentry);
+ dout("dn %p gets new offset %lld\n", req->r_old_dentry,
ceph_dentry(req->r_old_dentry)->offset);
- ceph_dentry(req->r_old_dentry)->offset =
- ceph_dentry(dn)->offset;
dn = req->r_old_dentry; /* use old_dentry */
in = dn->d_inode;
@@ -1809,7 +1816,7 @@ int ceph_getattr(struct vfsmount *mnt, struct dentry *dentry,
err = ceph_do_getattr(inode, CEPH_STAT_CAP_INODE_ALL);
if (!err) {
generic_fillattr(inode, stat);
- stat->ino = inode->i_ino;
+ stat->ino = ceph_translate_ino(inode->i_sb, inode->i_ino);
if (ceph_snap(inode) != CEPH_NOSNAP)
stat->dev = ceph_snap(inode);
else
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index a1ee8fa..f60b07b 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -3215,9 +3215,15 @@ void ceph_mdsc_destroy(struct ceph_fs_client *fsc)
{
struct ceph_mds_client *mdsc = fsc->mdsc;
+ dout("mdsc_destroy %p\n", mdsc);
ceph_mdsc_stop(mdsc);
+
+ /* flush out any connection work with references to us */
+ ceph_msgr_flush();
+
fsc->mdsc = NULL;
kfree(mdsc);
+ dout("mdsc_destroy %p done\n", mdsc);
}
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index f40b913..e86ec11 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -342,7 +342,7 @@ static int build_snap_context(struct ceph_snap_realm *realm)
num = 0;
snapc->seq = realm->seq;
if (parent) {
- /* include any of parent's snaps occuring _after_ my
+ /* include any of parent's snaps occurring _after_ my
parent became my parent */
for (i = 0; i < parent->cached_context->num_snaps; i++)
if (parent->cached_context->snaps[i] >=
@@ -463,8 +463,8 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
dout("queue_cap_snap %p cap_snap %p queuing under %p\n", inode,
capsnap, snapc);
- igrab(inode);
-
+ ihold(inode);
+
atomic_set(&capsnap->nref, 1);
capsnap->ci = ci;
INIT_LIST_HEAD(&capsnap->ci_item);
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index 9c50854..f2f77fd 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -131,6 +131,7 @@ enum {
Opt_rbytes,
Opt_norbytes,
Opt_noasyncreaddir,
+ Opt_ino32,
};
static match_table_t fsopt_tokens = {
@@ -150,6 +151,7 @@ static match_table_t fsopt_tokens = {
{Opt_rbytes, "rbytes"},
{Opt_norbytes, "norbytes"},
{Opt_noasyncreaddir, "noasyncreaddir"},
+ {Opt_ino32, "ino32"},
{-1, NULL}
};
@@ -225,6 +227,9 @@ static int parse_fsopt_token(char *c, void *private)
case Opt_noasyncreaddir:
fsopt->flags |= CEPH_MOUNT_OPT_NOASYNCREADDIR;
break;
+ case Opt_ino32:
+ fsopt->flags |= CEPH_MOUNT_OPT_INO32;
+ break;
default:
BUG_ON(token);
}
@@ -288,7 +293,7 @@ static int parse_mount_options(struct ceph_mount_options **pfsopt,
fsopt->sb_flags = flags;
fsopt->flags = CEPH_MOUNT_OPT_DEFAULT;
- fsopt->rsize = CEPH_MOUNT_RSIZE_DEFAULT;
+ fsopt->rsize = CEPH_RSIZE_DEFAULT;
fsopt->snapdir_name = kstrdup(CEPH_SNAPDIRNAME_DEFAULT, GFP_KERNEL);
fsopt->caps_wanted_delay_min = CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT;
fsopt->caps_wanted_delay_max = CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT;
@@ -348,7 +353,7 @@ static int ceph_show_options(struct seq_file *m, struct vfsmount *mnt)
if (opt->name)
seq_printf(m, ",name=%s", opt->name);
- if (opt->secret)
+ if (opt->key)
seq_puts(m, ",secret=<hidden>");
if (opt->mount_timeout != CEPH_MOUNT_TIMEOUT_DEFAULT)
@@ -370,7 +375,7 @@ static int ceph_show_options(struct seq_file *m, struct vfsmount *mnt)
if (fsopt->wsize)
seq_printf(m, ",wsize=%d", fsopt->wsize);
- if (fsopt->rsize != CEPH_MOUNT_RSIZE_DEFAULT)
+ if (fsopt->rsize != CEPH_RSIZE_DEFAULT)
seq_printf(m, ",rsize=%d", fsopt->rsize);
if (fsopt->congestion_kb != default_congestion_kb())
seq_printf(m, ",write_congestion_kb=%d", fsopt->congestion_kb);
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 20b907d..619fe71 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -27,6 +27,7 @@
#define CEPH_MOUNT_OPT_DIRSTAT (1<<4) /* `cat dirname` for stats */
#define CEPH_MOUNT_OPT_RBYTES (1<<5) /* dir st_bytes = rbytes */
#define CEPH_MOUNT_OPT_NOASYNCREADDIR (1<<7) /* no dcache readdir */
+#define CEPH_MOUNT_OPT_INO32 (1<<8) /* 32 bit inos */
#define CEPH_MOUNT_OPT_DEFAULT (CEPH_MOUNT_OPT_RBYTES)
@@ -35,6 +36,7 @@
#define ceph_test_mount_opt(fsc, opt) \
(!!((fsc)->mount_options->flags & CEPH_MOUNT_OPT_##opt))
+#define CEPH_RSIZE_DEFAULT (512*1024) /* readahead */
#define CEPH_MAX_READDIR_DEFAULT 1024
#define CEPH_MAX_READDIR_BYTES_DEFAULT (512*1024)
#define CEPH_SNAPDIRNAME_DEFAULT ".snap"
@@ -319,6 +321,16 @@ static inline struct ceph_inode_info *ceph_inode(struct inode *inode)
return container_of(inode, struct ceph_inode_info, vfs_inode);
}
+static inline struct ceph_fs_client *ceph_inode_to_client(struct inode *inode)
+{
+ return (struct ceph_fs_client *)inode->i_sb->s_fs_info;
+}
+
+static inline struct ceph_fs_client *ceph_sb_to_client(struct super_block *sb)
+{
+ return (struct ceph_fs_client *)sb->s_fs_info;
+}
+
static inline struct ceph_vino ceph_vino(struct inode *inode)
{
return ceph_inode(inode)->i_vino;
@@ -327,19 +339,49 @@ static inline struct ceph_vino ceph_vino(struct inode *inode)
/*
* ino_t is <64 bits on many architectures, blech.
*
- * don't include snap in ino hash, at least for now.
+ * i_ino (kernel inode) st_ino (userspace)
+ * i386 32 32
+ * x86_64+ino32 64 32
+ * x86_64 64 64
+ */
+static inline u32 ceph_ino_to_ino32(ino_t ino)
+{
+ ino ^= ino >> (sizeof(ino) * 8 - 32);
+ if (!ino)
+ ino = 1;
+ return ino;
+}
+
+/*
+ * kernel i_ino value
*/
static inline ino_t ceph_vino_to_ino(struct ceph_vino vino)
{
ino_t ino = (ino_t)vino.ino; /* ^ (vino.snap << 20); */
#if BITS_PER_LONG == 32
- ino ^= vino.ino >> (sizeof(u64)-sizeof(ino_t)) * 8;
- if (!ino)
- ino = 1;
+ ino = ceph_ino_to_ino32(ino);
#endif
return ino;
}
+/*
+ * user-visible ino (stat, filldir)
+ */
+#if BITS_PER_LONG == 32
+static inline ino_t ceph_translate_ino(struct super_block *sb, ino_t ino)
+{
+ return ino;
+}
+#else
+static inline ino_t ceph_translate_ino(struct super_block *sb, ino_t ino)
+{
+ if (ceph_test_mount_opt(ceph_sb_to_client(sb), INO32))
+ ino = ceph_ino_to_ino32(ino);
+ return ino;
+}
+#endif
+
+
/* for printf-style formatting */
#define ceph_vinop(i) ceph_inode(i)->i_vino.ino, ceph_inode(i)->i_vino.snap
@@ -428,13 +470,6 @@ static inline loff_t ceph_make_fpos(unsigned frag, unsigned off)
return ((loff_t)frag << 32) | (loff_t)off;
}
-static inline int ceph_set_ino_cb(struct inode *inode, void *data)
-{
- ceph_inode(inode)->i_vino = *(struct ceph_vino *)data;
- inode->i_ino = ceph_vino_to_ino(*(struct ceph_vino *)data);
- return 0;
-}
-
/*
* caps helpers
*/
@@ -503,15 +538,6 @@ extern void ceph_reservation_status(struct ceph_fs_client *client,
int *total, int *avail, int *used,
int *reserved, int *min);
-static inline struct ceph_fs_client *ceph_inode_to_client(struct inode *inode)
-{
- return (struct ceph_fs_client *)inode->i_sb->s_fs_info;
-}
-
-static inline struct ceph_fs_client *ceph_sb_to_client(struct super_block *sb)
-{
- return (struct ceph_fs_client *)sb->s_fs_info;
-}
/*
diff --git a/fs/cifs/AUTHORS b/fs/cifs/AUTHORS
index 7f7fa3c..ea940b1 100644
--- a/fs/cifs/AUTHORS
+++ b/fs/cifs/AUTHORS
@@ -35,7 +35,7 @@ Adrian Bunk (kcalloc cleanups)
Miklos Szeredi
Kazeon team for various fixes especially for 2.4 version.
Asser Ferno (Change Notify support)
-Shaggy (Dave Kleikamp) for inumerable small fs suggestions and some good cleanup
+Shaggy (Dave Kleikamp) for innumerable small fs suggestions and some good cleanup
Gunter Kukkukk (testing and suggestions for support of old servers)
Igor Mammedov (DFS support)
Jeff Layton (many, many fixes, as well as great work on the cifs Kerberos code)
diff --git a/fs/cifs/README b/fs/cifs/README
index fe16835..74ab165f 100644
--- a/fs/cifs/README
+++ b/fs/cifs/README
@@ -685,22 +685,6 @@ LinuxExtensionsEnabled If set to one then the client will attempt to
support and want to map the uid and gid fields
to values supplied at mount (rather than the
actual values, then set this to zero. (default 1)
-Experimental When set to 1 used to enable certain experimental
- features (currently enables multipage writes
- when signing is enabled, the multipage write
- performance enhancement was disabled when
- signing turned on in case buffer was modified
- just before it was sent, also this flag will
- be used to use the new experimental directory change
- notification code). When set to 2 enables
- an additional experimental feature, "raw ntlmssp"
- session establishment support (which allows
- specifying "sec=ntlmssp" on mount). The Linux cifs
- module will use ntlmv2 authentication encapsulated
- in "raw ntlmssp" (not using SPNEGO) when
- "sec=ntlmssp" is specified on mount.
- This support also requires building cifs with
- the CONFIG_CIFS_EXPERIMENTAL configuration flag.
These experimental features and tracing can be enabled by changing flags in
/proc/fs/cifs (after the cifs module has been installed or built into the
diff --git a/fs/cifs/cache.c b/fs/cifs/cache.c
index e654dfd..53d57a3 100644
--- a/fs/cifs/cache.c
+++ b/fs/cifs/cache.c
@@ -50,7 +50,7 @@ void cifs_fscache_unregister(void)
*/
struct cifs_server_key {
uint16_t family; /* address family */
- uint16_t port; /* IP port */
+ __be16 port; /* IP port */
union {
struct in_addr ipv4_addr;
struct in6_addr ipv6_addr;
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index 65829d3..30d01bc 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -423,7 +423,6 @@ static const struct file_operations cifs_lookup_cache_proc_fops;
static const struct file_operations traceSMB_proc_fops;
static const struct file_operations cifs_multiuser_mount_proc_fops;
static const struct file_operations cifs_security_flags_proc_fops;
-static const struct file_operations cifs_experimental_proc_fops;
static const struct file_operations cifs_linux_ext_proc_fops;
void
@@ -441,8 +440,6 @@ cifs_proc_init(void)
proc_create("cifsFYI", 0, proc_fs_cifs, &cifsFYI_proc_fops);
proc_create("traceSMB", 0, proc_fs_cifs, &traceSMB_proc_fops);
proc_create("OplockEnabled", 0, proc_fs_cifs, &cifs_oplock_proc_fops);
- proc_create("Experimental", 0, proc_fs_cifs,
- &cifs_experimental_proc_fops);
proc_create("LinuxExtensionsEnabled", 0, proc_fs_cifs,
&cifs_linux_ext_proc_fops);
proc_create("MultiuserMount", 0, proc_fs_cifs,
@@ -469,7 +466,6 @@ cifs_proc_clean(void)
remove_proc_entry("OplockEnabled", proc_fs_cifs);
remove_proc_entry("SecurityFlags", proc_fs_cifs);
remove_proc_entry("LinuxExtensionsEnabled", proc_fs_cifs);
- remove_proc_entry("Experimental", proc_fs_cifs);
remove_proc_entry("LookupCacheEnabled", proc_fs_cifs);
remove_proc_entry("fs/cifs", NULL);
}
@@ -550,45 +546,6 @@ static const struct file_operations cifs_oplock_proc_fops = {
.write = cifs_oplock_proc_write,
};
-static int cifs_experimental_proc_show(struct seq_file *m, void *v)
-{
- seq_printf(m, "%d\n", experimEnabled);
- return 0;
-}
-
-static int cifs_experimental_proc_open(struct inode *inode, struct file *file)
-{
- return single_open(file, cifs_experimental_proc_show, NULL);
-}
-
-static ssize_t cifs_experimental_proc_write(struct file *file,
- const char __user *buffer, size_t count, loff_t *ppos)
-{
- char c;
- int rc;
-
- rc = get_user(c, buffer);
- if (rc)
- return rc;
- if (c == '0' || c == 'n' || c == 'N')
- experimEnabled = 0;
- else if (c == '1' || c == 'y' || c == 'Y')
- experimEnabled = 1;
- else if (c == '2')
- experimEnabled = 2;
-
- return count;
-}
-
-static const struct file_operations cifs_experimental_proc_fops = {
- .owner = THIS_MODULE,
- .open = cifs_experimental_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = cifs_experimental_proc_write,
-};
-
static int cifs_linux_ext_proc_show(struct seq_file *m, void *v)
{
seq_printf(m, "%d\n", linuxExtEnabled);
diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c
index 0a265ad..2b68ac5 100644
--- a/fs/cifs/cifs_dfs_ref.c
+++ b/fs/cifs/cifs_dfs_ref.c
@@ -53,7 +53,7 @@ void cifs_dfs_release_automount_timer(void)
*
* Extracts sharename form full UNC.
* i.e. strips from UNC trailing path that is not part of share
- * name and fixup missing '\' in the begining of DFS node refferal
+ * name and fixup missing '\' in the beginning of DFS node refferal
* if necessary.
* Returns pointer to share name on success or ERR_PTR on error.
* Caller is responsible for freeing returned string.
diff --git a/fs/cifs/cifs_spnego.c b/fs/cifs/cifs_spnego.c
index 4dfba82..33d2213 100644
--- a/fs/cifs/cifs_spnego.c
+++ b/fs/cifs/cifs_spnego.c
@@ -113,7 +113,7 @@ cifs_get_spnego_key(struct cifsSesInfo *sesInfo)
MAX_MECH_STR_LEN +
UID_KEY_LEN + (sizeof(uid_t) * 2) +
CREDUID_KEY_LEN + (sizeof(uid_t) * 2) +
- USER_KEY_LEN + strlen(sesInfo->userName) +
+ USER_KEY_LEN + strlen(sesInfo->user_name) +
PID_KEY_LEN + (sizeof(pid_t) * 2) + 1;
spnego_key = ERR_PTR(-ENOMEM);
@@ -153,7 +153,7 @@ cifs_get_spnego_key(struct cifsSesInfo *sesInfo)
sprintf(dp, ";creduid=0x%x", sesInfo->cred_uid);
dp = description + strlen(description);
- sprintf(dp, ";user=%s", sesInfo->userName);
+ sprintf(dp, ";user=%s", sesInfo->user_name);
dp = description + strlen(description);
sprintf(dp, ";pid=0x%x", current->pid);
diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
index fc0fd4f..23d43cd 100644
--- a/fs/cifs/cifs_unicode.c
+++ b/fs/cifs/cifs_unicode.c
@@ -90,7 +90,7 @@ cifs_mapchar(char *target, const __u16 src_char, const struct nls_table *cp,
case UNI_COLON:
*target = ':';
break;
- case UNI_ASTERIK:
+ case UNI_ASTERISK:
*target = '*';
break;
case UNI_QUESTION:
@@ -264,40 +264,40 @@ cifs_strndup_from_ucs(const char *src, const int maxlen, const bool is_unicode,
* names are little endian 16 bit Unicode on the wire
*/
int
-cifsConvertToUCS(__le16 *target, const char *source, int maxlen,
+cifsConvertToUCS(__le16 *target, const char *source, int srclen,
const struct nls_table *cp, int mapChars)
{
int i, j, charlen;
- int len_remaining = maxlen;
char src_char;
- __u16 temp;
+ __le16 dst_char;
+ wchar_t tmp;
if (!mapChars)
return cifs_strtoUCS(target, source, PATH_MAX, cp);
- for (i = 0, j = 0; i < maxlen; j++) {
+ for (i = 0, j = 0; i < srclen; j++) {
src_char = source[i];
switch (src_char) {
case 0:
- put_unaligned_le16(0, &target[j]);
+ put_unaligned(0, &target[j]);
goto ctoUCS_out;
case ':':
- temp = UNI_COLON;
+ dst_char = cpu_to_le16(UNI_COLON);
break;
case '*':
- temp = UNI_ASTERIK;
+ dst_char = cpu_to_le16(UNI_ASTERISK);
break;
case '?':
- temp = UNI_QUESTION;
+ dst_char = cpu_to_le16(UNI_QUESTION);
break;
case '<':
- temp = UNI_LESSTHAN;
+ dst_char = cpu_to_le16(UNI_LESSTHAN);
break;
case '>':
- temp = UNI_GRTRTHAN;
+ dst_char = cpu_to_le16(UNI_GRTRTHAN);
break;
case '|':
- temp = UNI_PIPE;
+ dst_char = cpu_to_le16(UNI_PIPE);
break;
/*
* FIXME: We can not handle remapping backslash (UNI_SLASH)
@@ -305,17 +305,17 @@ cifsConvertToUCS(__le16 *target, const char *source, int maxlen,
* as they use backslash as separator.
*/
default:
- charlen = cp->char2uni(source+i, len_remaining,
- &temp);
+ charlen = cp->char2uni(source + i, srclen - i, &tmp);
+ dst_char = cpu_to_le16(tmp);
+
/*
* if no match, use question mark, which at least in
* some cases serves as wild card
*/
if (charlen < 1) {
- temp = 0x003f;
+ dst_char = cpu_to_le16(0x003f);
charlen = 1;
}
- len_remaining -= charlen;
/*
* character may take more than one byte in the source
* string, but will take exactly two bytes in the
@@ -324,9 +324,8 @@ cifsConvertToUCS(__le16 *target, const char *source, int maxlen,
i += charlen;
continue;
}
- put_unaligned_le16(temp, &target[j]);
+ put_unaligned(dst_char, &target[j]);
i++; /* move to next char in source string */
- len_remaining--;
}
ctoUCS_out:
diff --git a/fs/cifs/cifs_unicode.h b/fs/cifs/cifs_unicode.h
index 7fe6b52..644dd882 100644
--- a/fs/cifs/cifs_unicode.h
+++ b/fs/cifs/cifs_unicode.h
@@ -44,7 +44,7 @@
* reserved symbols (along with \ and /), otherwise illegal to store
* in filenames in NTFS
*/
-#define UNI_ASTERIK (__u16) ('*' + 0xF000)
+#define UNI_ASTERISK (__u16) ('*' + 0xF000)
#define UNI_QUESTION (__u16) ('?' + 0xF000)
#define UNI_COLON (__u16) (':' + 0xF000)
#define UNI_GRTRTHAN (__u16) ('>' + 0xF000)
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index a51585f..d1a016b 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -30,12 +30,13 @@
#include <linux/ctype.h>
#include <linux/random.h>
-/* Calculate and return the CIFS signature based on the mac key and SMB PDU */
-/* the 16 byte signature must be allocated by the caller */
-/* Note we only use the 1st eight bytes */
-/* Note that the smb header signature field on input contains the
- sequence number before this function is called */
-
+/*
+ * Calculate and return the CIFS signature based on the mac key and SMB PDU.
+ * The 16 byte signature must be allocated by the caller. Note we only use the
+ * 1st eight bytes and that the smb header signature field on input contains
+ * the sequence number before this function is called. Also, this function
+ * should be called with the server->srv_mutex held.
+ */
static int cifs_calculate_signature(const struct smb_hdr *cifs_pdu,
struct TCP_Server_Info *server, char *signature)
{
@@ -209,8 +210,10 @@ int cifs_verify_signature(struct smb_hdr *cifs_pdu,
cpu_to_le32(expected_sequence_number);
cifs_pdu->Signature.Sequence.Reserved = 0;
+ mutex_lock(&server->srv_mutex);
rc = cifs_calculate_signature(cifs_pdu, server,
what_we_think_sig_should_be);
+ mutex_unlock(&server->srv_mutex);
if (rc)
return rc;
@@ -469,15 +472,15 @@ static int calc_ntlmv2_hash(struct cifsSesInfo *ses, char *ntlmv2_hash,
return rc;
}
- /* convert ses->userName to unicode and uppercase */
- len = strlen(ses->userName);
+ /* convert ses->user_name to unicode and uppercase */
+ len = strlen(ses->user_name);
user = kmalloc(2 + (len * 2), GFP_KERNEL);
if (user == NULL) {
cERROR(1, "calc_ntlmv2_hash: user mem alloc failure\n");
rc = -ENOMEM;
goto calc_exit_2;
}
- len = cifs_strtoUCS((__le16 *)user, ses->userName, len, nls_cp);
+ len = cifs_strtoUCS((__le16 *)user, ses->user_name, len, nls_cp);
UniStrupr(user);
crypto_shash_update(&ses->server->secmech.sdeschmacmd5->shash,
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index f297013..5c412b3 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -53,7 +53,6 @@ int cifsFYI = 0;
int cifsERROR = 1;
int traceSMB = 0;
unsigned int oplockEnabled = 1;
-unsigned int experimEnabled = 0;
unsigned int linuxExtEnabled = 1;
unsigned int lookupCacheEnabled = 1;
unsigned int multiuser_mount = 0;
@@ -127,6 +126,7 @@ cifs_read_super(struct super_block *sb, void *data,
kfree(cifs_sb);
return rc;
}
+ cifs_sb->bdi.ra_pages = default_backing_dev_info.ra_pages;
#ifdef CONFIG_CIFS_DFS_UPCALL
/* copy mount params to sb for use in submounts */
@@ -409,8 +409,8 @@ cifs_show_options(struct seq_file *s, struct vfsmount *m)
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)
seq_printf(s, ",multiuser");
- else if (tcon->ses->userName)
- seq_printf(s, ",username=%s", tcon->ses->userName);
+ else if (tcon->ses->user_name)
+ seq_printf(s, ",username=%s", tcon->ses->user_name);
if (tcon->ses->domainName)
seq_printf(s, ",domain=%s", tcon->ses->domainName);
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 17afb0f..a5d1106 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -37,10 +37,9 @@
#define MAX_TREE_SIZE (2 + MAX_SERVER_SIZE + 1 + MAX_SHARE_SIZE + 1)
#define MAX_SERVER_SIZE 15
-#define MAX_SHARE_SIZE 64 /* used to be 20, this should still be enough */
-#define MAX_USERNAME_SIZE 32 /* 32 is to allow for 15 char names + null
- termination then *2 for unicode versions */
-#define MAX_PASSWORD_SIZE 512 /* max for windows seems to be 256 wide chars */
+#define MAX_SHARE_SIZE 80
+#define MAX_USERNAME_SIZE 256 /* reasonable maximum for current servers */
+#define MAX_PASSWORD_SIZE 512 /* max for windows seems to be 256 wide chars */
#define CIFS_MIN_RCV_POOL 4
@@ -92,7 +91,8 @@ enum statusEnum {
CifsNew = 0,
CifsGood,
CifsExiting,
- CifsNeedReconnect
+ CifsNeedReconnect,
+ CifsNeedNegotiate
};
enum securityEnum {
@@ -274,7 +274,7 @@ struct cifsSesInfo {
int capabilities;
char serverName[SERVER_NAME_LEN_WITH_NULL * 2]; /* BB make bigger for
TCP names - will ipv6 and sctp addresses fit? */
- char userName[MAX_USERNAME_SIZE + 1];
+ char *user_name;
char *domainName;
char *password;
struct session_key auth_key;
@@ -817,7 +817,6 @@ GLOBAL_EXTERN unsigned int multiuser_mount; /* if enabled allows new sessions
have the uid/password or Kerberos credential
or equivalent for current user */
GLOBAL_EXTERN unsigned int oplockEnabled;
-GLOBAL_EXTERN unsigned int experimEnabled;
GLOBAL_EXTERN unsigned int lookupCacheEnabled;
GLOBAL_EXTERN unsigned int global_secflags; /* if on, session setup sent
with more secure ntlmssp2 challenge/resp */
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 904aa47..df959ba 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -142,9 +142,9 @@ cifs_reconnect_tcon(struct cifsTconInfo *tcon, int smb_command)
*/
while (server->tcpStatus == CifsNeedReconnect) {
wait_event_interruptible_timeout(server->response_q,
- (server->tcpStatus == CifsGood), 10 * HZ);
+ (server->tcpStatus != CifsNeedReconnect), 10 * HZ);
- /* is TCP session is reestablished now ?*/
+ /* are we still trying to reconnect? */
if (server->tcpStatus != CifsNeedReconnect)
break;
@@ -729,7 +729,7 @@ CIFSSMBEcho(struct TCP_Server_Info *server)
return rc;
/* set up echo request */
- smb->hdr.Tid = cpu_to_le16(0xffff);
+ smb->hdr.Tid = 0xffff;
smb->hdr.WordCount = 1;
put_unaligned_le16(1, &smb->EchoCount);
put_bcc_le(1, &smb->hdr);
@@ -1884,10 +1884,10 @@ CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon,
__constant_cpu_to_le16(CIFS_WRLCK))
pLockData->fl_type = F_WRLCK;
- pLockData->fl_start = parm_data->start;
- pLockData->fl_end = parm_data->start +
- parm_data->length - 1;
- pLockData->fl_pid = parm_data->pid;
+ pLockData->fl_start = le64_to_cpu(parm_data->start);
+ pLockData->fl_end = pLockData->fl_start +
+ le64_to_cpu(parm_data->length) - 1;
+ pLockData->fl_pid = le32_to_cpu(parm_data->pid);
}
}
@@ -5247,7 +5247,7 @@ cifs_fill_unix_set_info(FILE_UNIX_BASIC_INFO *data_offset,
* Samba server ignores set of file size to zero due to bugs in some
* older clients, but we should be precise - we use SetFileSize to
* set file size and do not want to truncate file size to zero
- * accidently as happened on one Samba server beta by putting
+ * accidentally as happened on one Samba server beta by putting
* zero instead of -1 here
*/
data_offset->EndOfFile = cpu_to_le64(NO_CHANGE_64);
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 8d6c17a..db9d55b 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -199,8 +199,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
}
spin_unlock(&GlobalMid_Lock);
- while ((server->tcpStatus != CifsExiting) &&
- (server->tcpStatus != CifsGood)) {
+ while (server->tcpStatus == CifsNeedReconnect) {
try_to_freeze();
/* we should try only the port we connected to before */
@@ -212,7 +211,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
atomic_inc(&tcpSesReconnectCount);
spin_lock(&GlobalMid_Lock);
if (server->tcpStatus != CifsExiting)
- server->tcpStatus = CifsGood;
+ server->tcpStatus = CifsNeedNegotiate;
spin_unlock(&GlobalMid_Lock);
}
}
@@ -248,24 +247,24 @@ static int check2ndT2(struct smb_hdr *pSMB, unsigned int maxBufSize)
total_data_size = get_unaligned_le16(&pSMBt->t2_rsp.TotalDataCount);
data_in_this_rsp = get_unaligned_le16(&pSMBt->t2_rsp.DataCount);
- remaining = total_data_size - data_in_this_rsp;
-
- if (remaining == 0)
+ if (total_data_size == data_in_this_rsp)
return 0;
- else if (remaining < 0) {
+ else if (total_data_size < data_in_this_rsp) {
cFYI(1, "total data %d smaller than data in frame %d",
total_data_size, data_in_this_rsp);
return -EINVAL;
- } else {
- cFYI(1, "missing %d bytes from transact2, check next response",
- remaining);
- if (total_data_size > maxBufSize) {
- cERROR(1, "TotalDataSize %d is over maximum buffer %d",
- total_data_size, maxBufSize);
- return -EINVAL;
- }
- return remaining;
}
+
+ remaining = total_data_size - data_in_this_rsp;
+
+ cFYI(1, "missing %d bytes from transact2, check next response",
+ remaining);
+ if (total_data_size > maxBufSize) {
+ cERROR(1, "TotalDataSize %d is over maximum buffer %d",
+ total_data_size, maxBufSize);
+ return -EINVAL;
+ }
+ return remaining;
}
static int coalesce_t2(struct smb_hdr *psecond, struct smb_hdr *pTargetSMB)
@@ -421,7 +420,7 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server)
pdu_length = 4; /* enough to get RFC1001 header */
incomplete_rcv:
- if (echo_retries > 0 &&
+ if (echo_retries > 0 && server->tcpStatus == CifsGood &&
time_after(jiffies, server->lstrp +
(echo_retries * SMB_ECHO_INTERVAL))) {
cERROR(1, "Server %s has not responded in %d seconds. "
@@ -881,7 +880,8 @@ cifs_parse_mount_options(char *options, const char *devname,
/* null user, ie anonymous, authentication */
vol->nullauth = 1;
}
- if (strnlen(value, 200) < 200) {
+ if (strnlen(value, MAX_USERNAME_SIZE) <
+ MAX_USERNAME_SIZE) {
vol->username = value;
} else {
printk(KERN_WARNING "CIFS: username too long\n");
@@ -1472,7 +1472,7 @@ srcip_matches(struct sockaddr *srcaddr, struct sockaddr *rhs)
static bool
match_port(struct TCP_Server_Info *server, struct sockaddr *addr)
{
- unsigned short int port, *sport;
+ __be16 port, *sport;
switch (addr->sa_family) {
case AF_INET:
@@ -1572,7 +1572,7 @@ match_security(struct TCP_Server_Info *server, struct smb_vol *vol)
return false;
}
- /* now check if signing mode is acceptible */
+ /* now check if signing mode is acceptable */
if ((secFlags & CIFSSEC_MAY_SIGN) == 0 &&
(server->secMode & SECMODE_SIGN_REQUIRED))
return false;
@@ -1765,6 +1765,7 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
module_put(THIS_MODULE);
goto out_err_crypto_release;
}
+ tcp_ses->tcpStatus = CifsNeedNegotiate;
/* thread spawned, put it on the list */
spin_lock(&cifs_tcp_ses_lock);
@@ -1808,7 +1809,9 @@ cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb_vol *vol)
break;
default:
/* anything else takes username/password */
- if (strncmp(ses->userName, vol->username,
+ if (ses->user_name == NULL)
+ continue;
+ if (strncmp(ses->user_name, vol->username,
MAX_USERNAME_SIZE))
continue;
if (strlen(vol->username) != 0 &&
@@ -1851,6 +1854,8 @@ cifs_put_smb_ses(struct cifsSesInfo *ses)
cifs_put_tcp_session(server);
}
+static bool warned_on_ntlm; /* globals init to false automatically */
+
static struct cifsSesInfo *
cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
{
@@ -1906,9 +1911,11 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
else
sprintf(ses->serverName, "%pI4", &addr->sin_addr);
- if (volume_info->username)
- strncpy(ses->userName, volume_info->username,
- MAX_USERNAME_SIZE);
+ if (volume_info->username) {
+ ses->user_name = kstrdup(volume_info->username, GFP_KERNEL);
+ if (!ses->user_name)
+ goto get_ses_fail;
+ }
/* volume_info->password freed at unmount */
if (volume_info->password) {
@@ -1923,6 +1930,15 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
}
ses->cred_uid = volume_info->cred_uid;
ses->linux_uid = volume_info->linux_uid;
+
+ /* ntlmv2 is much stronger than ntlm security, and has been broadly
+ supported for many years, time to update default security mechanism */
+ if ((volume_info->secFlg == 0) && warned_on_ntlm == false) {
+ warned_on_ntlm = true;
+ cERROR(1, "default security mechanism requested. The default "
+ "security mechanism will be upgraded from ntlm to "
+ "ntlmv2 in kernel release 2.6.41");
+ }
ses->overrideSecFlg = volume_info->secFlg;
mutex_lock(&ses->session_mutex);
@@ -2276,7 +2292,7 @@ static int
generic_ip_connect(struct TCP_Server_Info *server)
{
int rc = 0;
- unsigned short int sport;
+ __be16 sport;
int slen, sfamily;
struct socket *socket = server->ssocket;
struct sockaddr *saddr;
@@ -2361,7 +2377,7 @@ generic_ip_connect(struct TCP_Server_Info *server)
static int
ip_connect(struct TCP_Server_Info *server)
{
- unsigned short int *sport;
+ __be16 *sport;
struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&server->dstaddr;
struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr;
@@ -2826,7 +2842,7 @@ try_mount_again:
remote_path_check:
/* check if a whole path (including prepath) is not remote */
- if (!rc && cifs_sb->prepathlen && tcon) {
+ if (!rc && tcon) {
/* build_path_to_root works only when we have a valid tcon */
full_path = cifs_build_path_to_root(cifs_sb, tcon);
if (full_path == NULL) {
@@ -2933,7 +2949,7 @@ mount_fail_check:
if (mount_data != mount_data_global)
kfree(mount_data);
/* If find_unc succeeded then rc == 0 so we can not end */
- /* up accidently freeing someone elses tcon struct */
+ /* up accidentally freeing someone elses tcon struct */
if (tcon)
cifs_put_tcon(tcon);
else if (pSesInfo)
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index dd5f229..9ea65cf 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -189,7 +189,7 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
inode->i_sb, mode, oflags, &oplock, &fileHandle, xid);
/* EIO could indicate that (posix open) operation is not
supported, despite what server claimed in capability
- negotation. EREMOTE indicates DFS junction, which is not
+ negotiation. EREMOTE indicates DFS junction, which is not
handled in posix open */
if (rc == 0) {
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index e964b1c..faf5952 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -575,8 +575,10 @@ reopen_error_exit:
int cifs_close(struct inode *inode, struct file *file)
{
- cifsFileInfo_put(file->private_data);
- file->private_data = NULL;
+ if (file->private_data != NULL) {
+ cifsFileInfo_put(file->private_data);
+ file->private_data = NULL;
+ }
/* return code from the ->release op is always ignored */
return 0;
@@ -970,6 +972,9 @@ static ssize_t cifs_write(struct cifsFileInfo *open_file,
total_written += bytes_written) {
rc = -EAGAIN;
while (rc == -EAGAIN) {
+ struct kvec iov[2];
+ unsigned int len;
+
if (open_file->invalidHandle) {
/* we could deadlock if we called
filemap_fdatawait from here so tell
@@ -979,31 +984,14 @@ static ssize_t cifs_write(struct cifsFileInfo *open_file,
if (rc != 0)
break;
}
- if (experimEnabled || (pTcon->ses->server &&
- ((pTcon->ses->server->secMode &
- (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
- == 0))) {
- struct kvec iov[2];
- unsigned int len;
-
- len = min((size_t)cifs_sb->wsize,
- write_size - total_written);
- /* iov[0] is reserved for smb header */
- iov[1].iov_base = (char *)write_data +
- total_written;
- iov[1].iov_len = len;
- rc = CIFSSMBWrite2(xid, pTcon,
- open_file->netfid, len,
- *poffset, &bytes_written,
- iov, 1, 0);
- } else
- rc = CIFSSMBWrite(xid, pTcon,
- open_file->netfid,
- min_t(const int, cifs_sb->wsize,
- write_size - total_written),
- *poffset, &bytes_written,
- write_data + total_written,
- NULL, 0);
+
+ len = min((size_t)cifs_sb->wsize,
+ write_size - total_written);
+ /* iov[0] is reserved for smb header */
+ iov[1].iov_base = (char *)write_data + total_written;
+ iov[1].iov_len = len;
+ rc = CIFSSMBWrite2(xid, pTcon, open_file->netfid, len,
+ *poffset, &bytes_written, iov, 1, 0);
}
if (rc || (bytes_written == 0)) {
if (total_written)
@@ -1240,12 +1228,6 @@ static int cifs_writepages(struct address_space *mapping,
}
tcon = tlink_tcon(open_file->tlink);
- if (!experimEnabled && tcon->ses->server->secMode &
- (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
- cifsFileInfo_put(open_file);
- kfree(iov);
- return generic_writepages(mapping, wbc);
- }
cifsFileInfo_put(open_file);
xid = GetXid();
@@ -1569,34 +1551,6 @@ int cifs_fsync(struct file *file, int datasync)
return rc;
}
-/* static void cifs_sync_page(struct page *page)
-{
- struct address_space *mapping;
- struct inode *inode;
- unsigned long index = page->index;
- unsigned int rpages = 0;
- int rc = 0;
-
- cFYI(1, "sync page %p", page);
- mapping = page->mapping;
- if (!mapping)
- return 0;
- inode = mapping->host;
- if (!inode)
- return; */
-
-/* fill in rpages then
- result = cifs_pagein_inode(inode, index, rpages); */ /* BB finish */
-
-/* cFYI(1, "rpages is %d for sync page of Index %ld", rpages, index);
-
-#if 0
- if (rc < 0)
- return rc;
- return 0;
-#endif
-} */
-
/*
* As file closes, flush all cached write data for this inode checking
* for write behind errors.
@@ -2008,6 +1962,24 @@ static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
return total_read;
}
+/*
+ * If the page is mmap'ed into a process' page tables, then we need to make
+ * sure that it doesn't change while being written back.
+ */
+static int
+cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct page *page = vmf->page;
+
+ lock_page(page);
+ return VM_FAULT_LOCKED;
+}
+
+static struct vm_operations_struct cifs_file_vm_ops = {
+ .fault = filemap_fault,
+ .page_mkwrite = cifs_page_mkwrite,
+};
+
int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
{
int rc, xid;
@@ -2019,6 +1991,8 @@ int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
cifs_invalidate_mapping(inode);
rc = generic_file_mmap(file, vma);
+ if (rc == 0)
+ vma->vm_ops = &cifs_file_vm_ops;
FreeXid(xid);
return rc;
}
@@ -2035,6 +2009,8 @@ int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
return rc;
}
rc = generic_file_mmap(file, vma);
+ if (rc == 0)
+ vma->vm_ops = &cifs_file_vm_ops;
FreeXid(xid);
return rc;
}
@@ -2510,7 +2486,6 @@ const struct address_space_operations cifs_addr_ops = {
.set_page_dirty = __set_page_dirty_nobuffers,
.releasepage = cifs_release_page,
.invalidatepage = cifs_invalidate_page,
- /* .sync_page = cifs_sync_page, */
/* .direct_IO = */
};
@@ -2528,6 +2503,5 @@ const struct address_space_operations cifs_addr_ops_smallbuf = {
.set_page_dirty = __set_page_dirty_nobuffers,
.releasepage = cifs_release_page,
.invalidatepage = cifs_invalidate_page,
- /* .sync_page = cifs_sync_page, */
/* .direct_IO = */
};
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
index e8804d3..ce417a9 100644
--- a/fs/cifs/link.c
+++ b/fs/cifs/link.c
@@ -239,7 +239,7 @@ CIFSQueryMFSymLink(const int xid, struct cifsTconInfo *tcon,
if (rc != 0)
return rc;
- if (file_info.EndOfFile != CIFS_MF_SYMLINK_FILE_SIZE) {
+ if (file_info.EndOfFile != cpu_to_le64(CIFS_MF_SYMLINK_FILE_SIZE)) {
CIFSSMBClose(xid, tcon, netfid);
/* it's not a symlink */
return -EINVAL;
@@ -316,7 +316,7 @@ CIFSCheckMFSymlink(struct cifs_fattr *fattr,
if (rc != 0)
goto out;
- if (file_info.EndOfFile != CIFS_MF_SYMLINK_FILE_SIZE) {
+ if (file_info.EndOfFile != cpu_to_le64(CIFS_MF_SYMLINK_FILE_SIZE)) {
CIFSSMBClose(xid, pTcon, netfid);
/* it's not a symlink */
goto out;
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index 2a930a7..0c684ae 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -100,6 +100,7 @@ sesInfoFree(struct cifsSesInfo *buf_to_free)
memset(buf_to_free->password, 0, strlen(buf_to_free->password));
kfree(buf_to_free->password);
}
+ kfree(buf_to_free->user_name);
kfree(buf_to_free->domainName);
kfree(buf_to_free);
}
@@ -520,7 +521,7 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
(struct smb_com_transaction_change_notify_rsp *)buf;
struct file_notify_information *pnotify;
__u32 data_offset = 0;
- if (pSMBr->ByteCount > sizeof(struct file_notify_information)) {
+ if (get_bcc_le(buf) > sizeof(struct file_notify_information)) {
data_offset = le32_to_cpu(pSMBr->DataOffset);
pnotify = (struct file_notify_information *)
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index 1676570..f6728eb 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -219,12 +219,12 @@ static void unicode_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses,
bcc_ptr++;
} */
/* copy user */
- if (ses->userName == NULL) {
+ if (ses->user_name == NULL) {
/* null user mount */
*bcc_ptr = 0;
*(bcc_ptr+1) = 0;
} else {
- bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, ses->userName,
+ bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, ses->user_name,
MAX_USERNAME_SIZE, nls_cp);
}
bcc_ptr += 2 * bytes_ret;
@@ -244,12 +244,11 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses,
/* copy user */
/* BB what about null user mounts - check that we do this BB */
/* copy user */
- if (ses->userName == NULL) {
- /* BB what about null user mounts - check that we do this BB */
- } else {
- strncpy(bcc_ptr, ses->userName, MAX_USERNAME_SIZE);
- }
- bcc_ptr += strnlen(ses->userName, MAX_USERNAME_SIZE);
+ if (ses->user_name != NULL)
+ strncpy(bcc_ptr, ses->user_name, MAX_USERNAME_SIZE);
+ /* else null user mount */
+
+ bcc_ptr += strnlen(ses->user_name, MAX_USERNAME_SIZE);
*bcc_ptr = 0;
bcc_ptr++; /* account for null termination */
@@ -405,8 +404,8 @@ static int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len,
/* BB spec says that if AvId field of MsvAvTimestamp is populated then
we must set the MIC field of the AUTHENTICATE_MESSAGE */
ses->ntlmssp->server_flags = le32_to_cpu(pblob->NegotiateFlags);
- tioffset = cpu_to_le16(pblob->TargetInfoArray.BufferOffset);
- tilen = cpu_to_le16(pblob->TargetInfoArray.Length);
+ tioffset = le32_to_cpu(pblob->TargetInfoArray.BufferOffset);
+ tilen = le16_to_cpu(pblob->TargetInfoArray.Length);
if (tilen) {
ses->auth_key.response = kmalloc(tilen, GFP_KERNEL);
if (!ses->auth_key.response) {
@@ -523,14 +522,14 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer,
tmp += len;
}
- if (ses->userName == NULL) {
+ if (ses->user_name == NULL) {
sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer);
sec_blob->UserName.Length = 0;
sec_blob->UserName.MaximumLength = 0;
tmp += 2;
} else {
int len;
- len = cifs_strtoUCS((__le16 *)tmp, ses->userName,
+ len = cifs_strtoUCS((__le16 *)tmp, ses->user_name,
MAX_USERNAME_SIZE, nls_cp);
len *= 2; /* unicode is 2 bytes each */
sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer);
diff --git a/fs/coda/Makefile b/fs/coda/Makefile
index 6c22e61..1bab69a 100644
--- a/fs/coda/Makefile
+++ b/fs/coda/Makefile
@@ -9,4 +9,4 @@ coda-objs := psdev.o cache.o cnode.o inode.o dir.o file.o upcall.o \
# If you want debugging output, please uncomment the following line.
-# EXTRA_CFLAGS += -DDEBUG -DDEBUG_SMB_MALLOC=1
+# ccflags-y := -DDEBUG -DDEBUG_SMB_MALLOC=1
diff --git a/fs/coda/sysctl.c b/fs/coda/sysctl.c
index c6405ce..af56ad5 100644
--- a/fs/coda/sysctl.c
+++ b/fs/coda/sysctl.c
@@ -13,7 +13,6 @@
#ifdef CONFIG_SYSCTL
static struct ctl_table_header *fs_table_header;
-#endif
static ctl_table coda_table[] = {
{
@@ -40,7 +39,6 @@ static ctl_table coda_table[] = {
{}
};
-#ifdef CONFIG_SYSCTL
static ctl_table fs_table[] = {
{
.procname = "coda",
@@ -49,22 +47,27 @@ static ctl_table fs_table[] = {
},
{}
};
-#endif
void coda_sysctl_init(void)
{
-#ifdef CONFIG_SYSCTL
if ( !fs_table_header )
fs_table_header = register_sysctl_table(fs_table);
-#endif
}
void coda_sysctl_clean(void)
{
-#ifdef CONFIG_SYSCTL
if ( fs_table_header ) {
unregister_sysctl_table(fs_table_header);
fs_table_header = NULL;
}
-#endif
}
+
+#else
+void coda_sysctl_init(void)
+{
+}
+
+void coda_sysctl_clean(void)
+{
+}
+#endif
diff --git a/fs/compat.c b/fs/compat.c
index c6d31a3..72fe6cd 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -1671,9 +1671,6 @@ int compat_set_fd_set(unsigned long nr, compat_ulong_t __user *ufdset,
* Update: ERESTARTSYS breaks at least the xview clock binary, so
* I'm trying ERESTARTNOHAND which restart only when you want to.
*/
-#define MAX_SELECT_SECONDS \
- ((unsigned long) (MAX_SCHEDULE_TIMEOUT / HZ)-1)
-
int compat_core_sys_select(int n, compat_ulong_t __user *inp,
compat_ulong_t __user *outp, compat_ulong_t __user *exp,
struct timespec *end_time)
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
index 90ff3cb..3313dd1 100644
--- a/fs/configfs/dir.c
+++ b/fs/configfs/dir.c
@@ -990,7 +990,7 @@ static int configfs_dump(struct configfs_dirent *sd, int level)
* This describes these functions and their helpers.
*
* Allow another kernel system to depend on a config_item. If this
- * happens, the item cannot go away until the dependant can live without
+ * happens, the item cannot go away until the dependent can live without
* it. The idea is to give client modules as simple an interface as
* possible. When a system asks them to depend on an item, they just
* call configfs_depend_item(). If the item is live and the client
diff --git a/fs/dcache.c b/fs/dcache.c
index ad25c4c..22a0ef4 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -99,12 +99,9 @@ static struct kmem_cache *dentry_cache __read_mostly;
static unsigned int d_hash_mask __read_mostly;
static unsigned int d_hash_shift __read_mostly;
-struct dcache_hash_bucket {
- struct hlist_bl_head head;
-};
-static struct dcache_hash_bucket *dentry_hashtable __read_mostly;
+static struct hlist_bl_head *dentry_hashtable __read_mostly;
-static inline struct dcache_hash_bucket *d_hash(struct dentry *parent,
+static inline struct hlist_bl_head *d_hash(struct dentry *parent,
unsigned long hash)
{
hash += ((unsigned long) parent ^ GOLDEN_RATIO_PRIME) / L1_CACHE_BYTES;
@@ -112,16 +109,6 @@ static inline struct dcache_hash_bucket *d_hash(struct dentry *parent,
return dentry_hashtable + (hash & D_HASHMASK);
}
-static inline void spin_lock_bucket(struct dcache_hash_bucket *b)
-{
- bit_spin_lock(0, (unsigned long *)&b->head.first);
-}
-
-static inline void spin_unlock_bucket(struct dcache_hash_bucket *b)
-{
- __bit_spin_unlock(0, (unsigned long *)&b->head.first);
-}
-
/* Statistics gathering. */
struct dentry_stat_t dentry_stat = {
.age_limit = 45,
@@ -167,8 +154,8 @@ static void d_free(struct dentry *dentry)
if (dentry->d_op && dentry->d_op->d_release)
dentry->d_op->d_release(dentry);
- /* if dentry was never inserted into hash, immediate free is OK */
- if (hlist_bl_unhashed(&dentry->d_hash))
+ /* if dentry was never visible to RCU, immediate free is OK */
+ if (!(dentry->d_flags & DCACHE_RCUACCESS))
__d_free(&dentry->d_u.d_rcu);
else
call_rcu(&dentry->d_u.d_rcu, __d_free);
@@ -330,28 +317,19 @@ static struct dentry *d_kill(struct dentry *dentry, struct dentry *parent)
*/
void __d_drop(struct dentry *dentry)
{
- if (!(dentry->d_flags & DCACHE_UNHASHED)) {
- if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED)) {
- bit_spin_lock(0,
- (unsigned long *)&dentry->d_sb->s_anon.first);
- dentry->d_flags |= DCACHE_UNHASHED;
- hlist_bl_del_init(&dentry->d_hash);
- __bit_spin_unlock(0,
- (unsigned long *)&dentry->d_sb->s_anon.first);
- } else {
- struct dcache_hash_bucket *b;
+ if (!d_unhashed(dentry)) {
+ struct hlist_bl_head *b;
+ if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED))
+ b = &dentry->d_sb->s_anon;
+ else
b = d_hash(dentry->d_parent, dentry->d_name.hash);
- spin_lock_bucket(b);
- /*
- * We may not actually need to put DCACHE_UNHASHED
- * manipulations under the hash lock, but follow
- * the principle of least surprise.
- */
- dentry->d_flags |= DCACHE_UNHASHED;
- hlist_bl_del_rcu(&dentry->d_hash);
- spin_unlock_bucket(b);
- dentry_rcuwalk_barrier(dentry);
- }
+
+ hlist_bl_lock(b);
+ __hlist_bl_del(&dentry->d_hash);
+ dentry->d_hash.pprev = NULL;
+ hlist_bl_unlock(b);
+
+ dentry_rcuwalk_barrier(dentry);
}
}
EXPORT_SYMBOL(__d_drop);
@@ -1304,7 +1282,7 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
dname[name->len] = 0;
dentry->d_count = 1;
- dentry->d_flags = DCACHE_UNHASHED;
+ dentry->d_flags = 0;
spin_lock_init(&dentry->d_lock);
seqcount_init(&dentry->d_seq);
dentry->d_inode = NULL;
@@ -1606,10 +1584,9 @@ struct dentry *d_obtain_alias(struct inode *inode)
tmp->d_inode = inode;
tmp->d_flags |= DCACHE_DISCONNECTED;
list_add(&tmp->d_alias, &inode->i_dentry);
- bit_spin_lock(0, (unsigned long *)&tmp->d_sb->s_anon.first);
- tmp->d_flags &= ~DCACHE_UNHASHED;
+ hlist_bl_lock(&tmp->d_sb->s_anon);
hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon);
- __bit_spin_unlock(0, (unsigned long *)&tmp->d_sb->s_anon.first);
+ hlist_bl_unlock(&tmp->d_sb->s_anon);
spin_unlock(&tmp->d_lock);
spin_unlock(&inode->i_lock);
security_d_instantiate(tmp, inode);
@@ -1789,7 +1766,7 @@ struct dentry *__d_lookup_rcu(struct dentry *parent, struct qstr *name,
unsigned int len = name->len;
unsigned int hash = name->hash;
const unsigned char *str = name->name;
- struct dcache_hash_bucket *b = d_hash(parent, hash);
+ struct hlist_bl_head *b = d_hash(parent, hash);
struct hlist_bl_node *node;
struct dentry *dentry;
@@ -1813,7 +1790,7 @@ struct dentry *__d_lookup_rcu(struct dentry *parent, struct qstr *name,
*
* See Documentation/filesystems/path-lookup.txt for more details.
*/
- hlist_bl_for_each_entry_rcu(dentry, node, &b->head, d_hash) {
+ hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
struct inode *i;
const char *tname;
int tlen;
@@ -1908,7 +1885,7 @@ struct dentry *__d_lookup(struct dentry *parent, struct qstr *name)
unsigned int len = name->len;
unsigned int hash = name->hash;
const unsigned char *str = name->name;
- struct dcache_hash_bucket *b = d_hash(parent, hash);
+ struct hlist_bl_head *b = d_hash(parent, hash);
struct hlist_bl_node *node;
struct dentry *found = NULL;
struct dentry *dentry;
@@ -1935,7 +1912,7 @@ struct dentry *__d_lookup(struct dentry *parent, struct qstr *name)
*/
rcu_read_lock();
- hlist_bl_for_each_entry_rcu(dentry, node, &b->head, d_hash) {
+ hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
const char *tname;
int tlen;
@@ -2086,13 +2063,13 @@ again:
}
EXPORT_SYMBOL(d_delete);
-static void __d_rehash(struct dentry * entry, struct dcache_hash_bucket *b)
+static void __d_rehash(struct dentry * entry, struct hlist_bl_head *b)
{
BUG_ON(!d_unhashed(entry));
- spin_lock_bucket(b);
- entry->d_flags &= ~DCACHE_UNHASHED;
- hlist_bl_add_head_rcu(&entry->d_hash, &b->head);
- spin_unlock_bucket(b);
+ hlist_bl_lock(b);
+ entry->d_flags |= DCACHE_RCUACCESS;
+ hlist_bl_add_head_rcu(&entry->d_hash, b);
+ hlist_bl_unlock(b);
}
static void _d_rehash(struct dentry * entry)
@@ -2131,7 +2108,7 @@ EXPORT_SYMBOL(d_rehash);
*/
void dentry_update_name_case(struct dentry *dentry, struct qstr *name)
{
- BUG_ON(!mutex_is_locked(&dentry->d_inode->i_mutex));
+ BUG_ON(!mutex_is_locked(&dentry->d_parent->d_inode->i_mutex));
BUG_ON(dentry->d_name.len != name->len); /* d_lookup gives this */
spin_lock(&dentry->d_lock);
@@ -3025,7 +3002,7 @@ static void __init dcache_init_early(void)
dentry_hashtable =
alloc_large_system_hash("Dentry cache",
- sizeof(struct dcache_hash_bucket),
+ sizeof(struct hlist_bl_head),
dhash_entries,
13,
HASH_EARLY,
@@ -3034,7 +3011,7 @@ static void __init dcache_init_early(void)
0);
for (loop = 0; loop < (1 << d_hash_shift); loop++)
- INIT_HLIST_BL_HEAD(&dentry_hashtable[loop].head);
+ INIT_HLIST_BL_HEAD(dentry_hashtable + loop);
}
static void __init dcache_init(void)
@@ -3057,7 +3034,7 @@ static void __init dcache_init(void)
dentry_hashtable =
alloc_large_system_hash("Dentry cache",
- sizeof(struct dcache_hash_bucket),
+ sizeof(struct hlist_bl_head),
dhash_entries,
13,
0,
@@ -3066,7 +3043,7 @@ static void __init dcache_init(void)
0);
for (loop = 0; loop < (1 << d_hash_shift); loop++)
- INIT_HLIST_BL_HEAD(&dentry_hashtable[loop].head);
+ INIT_HLIST_BL_HEAD(dentry_hashtable + loop);
}
/* SLAB cache for __getname() consumers */
diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
index 1bb547c..2f27e57 100644
--- a/fs/devpts/inode.c
+++ b/fs/devpts/inode.c
@@ -479,6 +479,7 @@ int devpts_pty_new(struct inode *ptmx_inode, struct tty_struct *tty)
struct dentry *root = sb->s_root;
struct pts_fs_info *fsi = DEVPTS_SB(sb);
struct pts_mount_opts *opts = &fsi->mount_opts;
+ int ret = 0;
char s[12];
/* We're supposed to be given the slave end of a pty */
@@ -501,14 +502,17 @@ int devpts_pty_new(struct inode *ptmx_inode, struct tty_struct *tty)
mutex_lock(&root->d_inode->i_mutex);
dentry = d_alloc_name(root, s);
- if (!IS_ERR(dentry)) {
+ if (dentry) {
d_add(dentry, inode);
fsnotify_create(root->d_inode, dentry);
+ } else {
+ iput(inode);
+ ret = -ENOMEM;
}
mutex_unlock(&root->d_inode->i_mutex);
- return 0;
+ return ret;
}
struct tty_struct *devpts_get_tty(struct inode *pts_inode, int number)
@@ -544,17 +548,12 @@ void devpts_pty_kill(struct tty_struct *tty)
mutex_lock(&root->d_inode->i_mutex);
dentry = d_find_alias(inode);
- if (IS_ERR(dentry))
- goto out;
-
- if (dentry) {
- inode->i_nlink--;
- d_delete(dentry);
- dput(dentry); /* d_alloc_name() in devpts_pty_new() */
- }
+ inode->i_nlink--;
+ d_delete(dentry);
+ dput(dentry); /* d_alloc_name() in devpts_pty_new() */
dput(dentry); /* d_find_alias above */
-out:
+
mutex_unlock(&root->d_inode->i_mutex);
}
diff --git a/fs/direct-io.c b/fs/direct-io.c
index dcb5577..ac5f164 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -1110,11 +1110,8 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
((rw & READ) || (dio->result == dio->size)))
ret = -EIOCBQUEUED;
- if (ret != -EIOCBQUEUED) {
- /* All IO is now issued, send it on its way */
- blk_run_address_space(inode->i_mapping);
+ if (ret != -EIOCBQUEUED)
dio_await_completion(dio);
- }
/*
* Sync will always be dropping the final ref and completing the
@@ -1176,7 +1173,7 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
struct dio *dio;
if (rw & WRITE)
- rw = WRITE_ODIRECT_PLUG;
+ rw = WRITE_ODIRECT;
if (bdev)
bdev_blkbits = blksize_bits(bdev_logical_block_size(bdev));
diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
index 04b8c44..56d6bfc 100644
--- a/fs/dlm/lock.c
+++ b/fs/dlm/lock.c
@@ -519,7 +519,7 @@ static void toss_rsb(struct kref *kref)
}
}
-/* When all references to the rsb are gone it's transfered to
+/* When all references to the rsb are gone it's transferred to
the tossed list for later disposal. */
static void put_rsb(struct dlm_rsb *r)
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
index bffa1e7..5e2c71f 100644
--- a/fs/dlm/lowcomms.c
+++ b/fs/dlm/lowcomms.c
@@ -810,7 +810,7 @@ static int tcp_accept_from_sock(struct connection *con)
/*
* Add it to the active queue in case we got data
- * beween processing the accept adding the socket
+ * between processing the accept adding the socket
* to the read_sockets list
*/
if (!test_and_set_bit(CF_READ_PENDING, &addcon->flags))
diff --git a/fs/dlm/recover.c b/fs/dlm/recover.c
index eda43f3..1463823 100644
--- a/fs/dlm/recover.c
+++ b/fs/dlm/recover.c
@@ -304,7 +304,7 @@ static void set_master_lkbs(struct dlm_rsb *r)
}
/*
- * Propogate the new master nodeid to locks
+ * Propagate the new master nodeid to locks
* The NEW_MASTER flag tells dlm_recover_locks() which rsb's to consider.
* The NEW_MASTER2 flag tells recover_lvb() and set_locks_purged() which
* rsb's to consider.
diff --git a/fs/drop_caches.c b/fs/drop_caches.c
index 2195c21..98b77c8 100644
--- a/fs/drop_caches.c
+++ b/fs/drop_caches.c
@@ -8,6 +8,7 @@
#include <linux/writeback.h>
#include <linux/sysctl.h>
#include <linux/gfp.h>
+#include "internal.h"
/* A global variable is a bit ugly, but it keeps the code simple */
int sysctl_drop_caches;
@@ -16,20 +17,23 @@ static void drop_pagecache_sb(struct super_block *sb, void *unused)
{
struct inode *inode, *toput_inode = NULL;
- spin_lock(&inode_lock);
+ spin_lock(&inode_sb_list_lock);
list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
- if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW))
- continue;
- if (inode->i_mapping->nrpages == 0)
+ spin_lock(&inode->i_lock);
+ if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
+ (inode->i_mapping->nrpages == 0)) {
+ spin_unlock(&inode->i_lock);
continue;
+ }
__iget(inode);
- spin_unlock(&inode_lock);
+ spin_unlock(&inode->i_lock);
+ spin_unlock(&inode_sb_list_lock);
invalidate_mapping_pages(inode->i_mapping, 0, -1);
iput(toput_inode);
toput_inode = inode;
- spin_lock(&inode_lock);
+ spin_lock(&inode_sb_list_lock);
}
- spin_unlock(&inode_lock);
+ spin_unlock(&inode_sb_list_lock);
iput(toput_inode);
}
@@ -45,7 +49,11 @@ static void drop_slab(void)
int drop_caches_sysctl_handler(ctl_table *table, int write,
void __user *buffer, size_t *length, loff_t *ppos)
{
- proc_dointvec_minmax(table, write, buffer, length, ppos);
+ int ret;
+
+ ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
+ if (ret)
+ return ret;
if (write) {
if (sysctl_drop_caches & 1)
iterate_supers(drop_pagecache_sb, NULL);
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index bfd8b68..b8d5c80 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -266,7 +266,6 @@ void ecryptfs_destroy_mount_crypt_stat(
&mount_crypt_stat->global_auth_tok_list,
mount_crypt_stat_list) {
list_del(&auth_tok->mount_crypt_stat_list);
- mount_crypt_stat->num_global_auth_toks--;
if (auth_tok->global_auth_tok_key
&& !(auth_tok->flags & ECRYPTFS_AUTH_TOK_INVALID))
key_put(auth_tok->global_auth_tok_key);
@@ -1389,6 +1388,7 @@ int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry)
rc = -ENOMEM;
goto out;
}
+ /* Zeroed page ensures the in-header unencrypted i_size is set to 0 */
rc = ecryptfs_write_headers_virt(virt, virt_len, &size, crypt_stat,
ecryptfs_dentry);
if (unlikely(rc)) {
@@ -1452,6 +1452,25 @@ static void set_default_header_data(struct ecryptfs_crypt_stat *crypt_stat)
crypt_stat->metadata_size = ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE;
}
+void ecryptfs_i_size_init(const char *page_virt, struct inode *inode)
+{
+ struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
+ struct ecryptfs_crypt_stat *crypt_stat;
+ u64 file_size;
+
+ crypt_stat = &ecryptfs_inode_to_private(inode)->crypt_stat;
+ mount_crypt_stat =
+ &ecryptfs_superblock_to_private(inode->i_sb)->mount_crypt_stat;
+ if (mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED) {
+ file_size = i_size_read(ecryptfs_inode_to_lower(inode));
+ if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR)
+ file_size += crypt_stat->metadata_size;
+ } else
+ file_size = get_unaligned_be64(page_virt);
+ i_size_write(inode, (loff_t)file_size);
+ crypt_stat->flags |= ECRYPTFS_I_SIZE_INITIALIZED;
+}
+
/**
* ecryptfs_read_headers_virt
* @page_virt: The virtual address into which to read the headers
@@ -1482,6 +1501,8 @@ static int ecryptfs_read_headers_virt(char *page_virt,
rc = -EINVAL;
goto out;
}
+ if (!(crypt_stat->flags & ECRYPTFS_I_SIZE_INITIALIZED))
+ ecryptfs_i_size_init(page_virt, ecryptfs_dentry->d_inode);
offset += MAGIC_ECRYPTFS_MARKER_SIZE_BYTES;
rc = ecryptfs_process_flags(crypt_stat, (page_virt + offset),
&bytes_read);
diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h
index e007534..e702827 100644
--- a/fs/ecryptfs/ecryptfs_kernel.h
+++ b/fs/ecryptfs/ecryptfs_kernel.h
@@ -233,7 +233,7 @@ ecryptfs_get_key_payload_data(struct key *key)
struct ecryptfs_key_sig {
struct list_head crypt_stat_list;
- char keysig[ECRYPTFS_SIG_SIZE_HEX];
+ char keysig[ECRYPTFS_SIG_SIZE_HEX + 1];
};
struct ecryptfs_filename {
@@ -257,19 +257,19 @@ struct ecryptfs_filename {
struct ecryptfs_crypt_stat {
#define ECRYPTFS_STRUCT_INITIALIZED 0x00000001
#define ECRYPTFS_POLICY_APPLIED 0x00000002
-#define ECRYPTFS_NEW_FILE 0x00000004
-#define ECRYPTFS_ENCRYPTED 0x00000008
-#define ECRYPTFS_SECURITY_WARNING 0x00000010
-#define ECRYPTFS_ENABLE_HMAC 0x00000020
-#define ECRYPTFS_ENCRYPT_IV_PAGES 0x00000040
-#define ECRYPTFS_KEY_VALID 0x00000080
-#define ECRYPTFS_METADATA_IN_XATTR 0x00000100
-#define ECRYPTFS_VIEW_AS_ENCRYPTED 0x00000200
-#define ECRYPTFS_KEY_SET 0x00000400
-#define ECRYPTFS_ENCRYPT_FILENAMES 0x00000800
-#define ECRYPTFS_ENCFN_USE_MOUNT_FNEK 0x00001000
-#define ECRYPTFS_ENCFN_USE_FEK 0x00002000
-#define ECRYPTFS_UNLINK_SIGS 0x00004000
+#define ECRYPTFS_ENCRYPTED 0x00000004
+#define ECRYPTFS_SECURITY_WARNING 0x00000008
+#define ECRYPTFS_ENABLE_HMAC 0x00000010
+#define ECRYPTFS_ENCRYPT_IV_PAGES 0x00000020
+#define ECRYPTFS_KEY_VALID 0x00000040
+#define ECRYPTFS_METADATA_IN_XATTR 0x00000080
+#define ECRYPTFS_VIEW_AS_ENCRYPTED 0x00000100
+#define ECRYPTFS_KEY_SET 0x00000200
+#define ECRYPTFS_ENCRYPT_FILENAMES 0x00000400
+#define ECRYPTFS_ENCFN_USE_MOUNT_FNEK 0x00000800
+#define ECRYPTFS_ENCFN_USE_FEK 0x00001000
+#define ECRYPTFS_UNLINK_SIGS 0x00002000
+#define ECRYPTFS_I_SIZE_INITIALIZED 0x00004000
u32 flags;
unsigned int file_version;
size_t iv_bytes;
@@ -296,8 +296,9 @@ struct ecryptfs_crypt_stat {
struct ecryptfs_inode_info {
struct inode vfs_inode;
struct inode *wii_inode;
- struct file *lower_file;
struct mutex lower_file_mutex;
+ atomic_t lower_file_count;
+ struct file *lower_file;
struct ecryptfs_crypt_stat crypt_stat;
};
@@ -333,7 +334,6 @@ struct ecryptfs_global_auth_tok {
u32 flags;
struct list_head mount_crypt_stat_list;
struct key *global_auth_tok_key;
- struct ecryptfs_auth_tok *global_auth_tok;
unsigned char sig[ECRYPTFS_SIG_SIZE_HEX + 1];
};
@@ -380,7 +380,6 @@ struct ecryptfs_mount_crypt_stat {
u32 flags;
struct list_head global_auth_tok_list;
struct mutex global_auth_tok_list_mutex;
- size_t num_global_auth_toks;
size_t global_default_cipher_key_size;
size_t global_default_fn_cipher_key_bytes;
unsigned char global_default_cipher_name[ECRYPTFS_MAX_CIPHER_NAME_SIZE
@@ -630,6 +629,7 @@ struct ecryptfs_open_req {
int ecryptfs_interpose(struct dentry *hidden_dentry,
struct dentry *this_dentry, struct super_block *sb,
u32 flags);
+void ecryptfs_i_size_init(const char *page_virt, struct inode *inode);
int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry,
struct dentry *lower_dentry,
struct inode *ecryptfs_dir_inode);
@@ -761,7 +761,8 @@ int ecryptfs_privileged_open(struct file **lower_file,
struct dentry *lower_dentry,
struct vfsmount *lower_mnt,
const struct cred *cred);
-int ecryptfs_init_persistent_file(struct dentry *ecryptfs_dentry);
+int ecryptfs_get_lower_file(struct dentry *ecryptfs_dentry);
+void ecryptfs_put_lower_file(struct inode *inode);
int
ecryptfs_write_tag_70_packet(char *dest, size_t *remaining_bytes,
size_t *packet_size,
diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
index 7d1050e..566e547 100644
--- a/fs/ecryptfs/file.c
+++ b/fs/ecryptfs/file.c
@@ -191,10 +191,10 @@ static int ecryptfs_open(struct inode *inode, struct file *file)
| ECRYPTFS_ENCRYPTED);
}
mutex_unlock(&crypt_stat->cs_mutex);
- rc = ecryptfs_init_persistent_file(ecryptfs_dentry);
+ rc = ecryptfs_get_lower_file(ecryptfs_dentry);
if (rc) {
printk(KERN_ERR "%s: Error attempting to initialize "
- "the persistent file for the dentry with name "
+ "the lower file for the dentry with name "
"[%s]; rc = [%d]\n", __func__,
ecryptfs_dentry->d_name.name, rc);
goto out_free;
@@ -202,9 +202,9 @@ static int ecryptfs_open(struct inode *inode, struct file *file)
if ((ecryptfs_inode_to_private(inode)->lower_file->f_flags & O_ACCMODE)
== O_RDONLY && (file->f_flags & O_ACCMODE) != O_RDONLY) {
rc = -EPERM;
- printk(KERN_WARNING "%s: Lower persistent file is RO; eCryptfs "
+ printk(KERN_WARNING "%s: Lower file is RO; eCryptfs "
"file must hence be opened RO\n", __func__);
- goto out_free;
+ goto out_put;
}
ecryptfs_set_file_lower(
file, ecryptfs_inode_to_private(inode)->lower_file);
@@ -232,10 +232,11 @@ static int ecryptfs_open(struct inode *inode, struct file *file)
"Plaintext passthrough mode is not "
"enabled; returning -EIO\n");
mutex_unlock(&crypt_stat->cs_mutex);
- goto out_free;
+ goto out_put;
}
rc = 0;
- crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED);
+ crypt_stat->flags &= ~(ECRYPTFS_I_SIZE_INITIALIZED
+ | ECRYPTFS_ENCRYPTED);
mutex_unlock(&crypt_stat->cs_mutex);
goto out;
}
@@ -245,6 +246,8 @@ static int ecryptfs_open(struct inode *inode, struct file *file)
"[0x%.16lx] size: [0x%.16llx]\n", inode, inode->i_ino,
(unsigned long long)i_size_read(inode));
goto out;
+out_put:
+ ecryptfs_put_lower_file(inode);
out_free:
kmem_cache_free(ecryptfs_file_info_cache,
ecryptfs_file_to_private(file));
@@ -254,17 +257,13 @@ out:
static int ecryptfs_flush(struct file *file, fl_owner_t td)
{
- int rc = 0;
- struct file *lower_file = NULL;
-
- lower_file = ecryptfs_file_to_lower(file);
- if (lower_file->f_op && lower_file->f_op->flush)
- rc = lower_file->f_op->flush(lower_file, td);
- return rc;
+ return file->f_mode & FMODE_WRITE
+ ? filemap_write_and_wait(file->f_mapping) : 0;
}
static int ecryptfs_release(struct inode *inode, struct file *file)
{
+ ecryptfs_put_lower_file(inode);
kmem_cache_free(ecryptfs_file_info_cache,
ecryptfs_file_to_private(file));
return 0;
@@ -273,7 +272,14 @@ static int ecryptfs_release(struct inode *inode, struct file *file)
static int
ecryptfs_fsync(struct file *file, int datasync)
{
- return vfs_fsync(ecryptfs_file_to_lower(file), datasync);
+ int rc = 0;
+
+ rc = generic_file_fsync(file, datasync);
+ if (rc)
+ goto out;
+ rc = vfs_fsync(ecryptfs_file_to_lower(file), datasync);
+out:
+ return rc;
}
static int ecryptfs_fasync(int fd, struct file *file, int flag)
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index b592938..4d4cc6a 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -143,26 +143,6 @@ out:
}
/**
- * grow_file
- * @ecryptfs_dentry: the eCryptfs dentry
- *
- * This is the code which will grow the file to its correct size.
- */
-static int grow_file(struct dentry *ecryptfs_dentry)
-{
- struct inode *ecryptfs_inode = ecryptfs_dentry->d_inode;
- char zero_virt[] = { 0x00 };
- int rc = 0;
-
- rc = ecryptfs_write(ecryptfs_inode, zero_virt, 0, 1);
- i_size_write(ecryptfs_inode, 0);
- rc = ecryptfs_write_inode_size_to_metadata(ecryptfs_inode);
- ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat.flags |=
- ECRYPTFS_NEW_FILE;
- return rc;
-}
-
-/**
* ecryptfs_initialize_file
*
* Cause the file to be changed from a basic empty file to an ecryptfs
@@ -181,7 +161,6 @@ static int ecryptfs_initialize_file(struct dentry *ecryptfs_dentry)
crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED);
goto out;
}
- crypt_stat->flags |= ECRYPTFS_NEW_FILE;
ecryptfs_printk(KERN_DEBUG, "Initializing crypto context\n");
rc = ecryptfs_new_file_context(ecryptfs_dentry);
if (rc) {
@@ -189,22 +168,18 @@ static int ecryptfs_initialize_file(struct dentry *ecryptfs_dentry)
"context; rc = [%d]\n", rc);
goto out;
}
- rc = ecryptfs_init_persistent_file(ecryptfs_dentry);
+ rc = ecryptfs_get_lower_file(ecryptfs_dentry);
if (rc) {
printk(KERN_ERR "%s: Error attempting to initialize "
- "the persistent file for the dentry with name "
+ "the lower file for the dentry with name "
"[%s]; rc = [%d]\n", __func__,
ecryptfs_dentry->d_name.name, rc);
goto out;
}
rc = ecryptfs_write_metadata(ecryptfs_dentry);
- if (rc) {
- printk(KERN_ERR "Error writing headers; rc = [%d]\n", rc);
- goto out;
- }
- rc = grow_file(ecryptfs_dentry);
if (rc)
- printk(KERN_ERR "Error growing file; rc = [%d]\n", rc);
+ printk(KERN_ERR "Error writing headers; rc = [%d]\n", rc);
+ ecryptfs_put_lower_file(ecryptfs_dentry->d_inode);
out:
return rc;
}
@@ -250,11 +225,9 @@ int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry,
struct dentry *lower_dir_dentry;
struct vfsmount *lower_mnt;
struct inode *lower_inode;
- struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
struct ecryptfs_crypt_stat *crypt_stat;
char *page_virt = NULL;
- u64 file_size;
- int rc = 0;
+ int put_lower = 0, rc = 0;
lower_dir_dentry = lower_dentry->d_parent;
lower_mnt = mntget(ecryptfs_dentry_to_lower_mnt(
@@ -301,14 +274,15 @@ int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry,
rc = -ENOMEM;
goto out;
}
- rc = ecryptfs_init_persistent_file(ecryptfs_dentry);
+ rc = ecryptfs_get_lower_file(ecryptfs_dentry);
if (rc) {
printk(KERN_ERR "%s: Error attempting to initialize "
- "the persistent file for the dentry with name "
+ "the lower file for the dentry with name "
"[%s]; rc = [%d]\n", __func__,
ecryptfs_dentry->d_name.name, rc);
goto out_free_kmem;
}
+ put_lower = 1;
crypt_stat = &ecryptfs_inode_to_private(
ecryptfs_dentry->d_inode)->crypt_stat;
/* TODO: lock for crypt_stat comparison */
@@ -326,18 +300,7 @@ int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry,
}
crypt_stat->flags |= ECRYPTFS_METADATA_IN_XATTR;
}
- mount_crypt_stat = &ecryptfs_superblock_to_private(
- ecryptfs_dentry->d_sb)->mount_crypt_stat;
- if (mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED) {
- if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR)
- file_size = (crypt_stat->metadata_size
- + i_size_read(lower_dentry->d_inode));
- else
- file_size = i_size_read(lower_dentry->d_inode);
- } else {
- file_size = get_unaligned_be64(page_virt);
- }
- i_size_write(ecryptfs_dentry->d_inode, (loff_t)file_size);
+ ecryptfs_i_size_init(page_virt, ecryptfs_dentry->d_inode);
out_free_kmem:
kmem_cache_free(ecryptfs_header_cache_2, page_virt);
goto out;
@@ -346,6 +309,8 @@ out_put:
mntput(lower_mnt);
d_drop(ecryptfs_dentry);
out:
+ if (put_lower)
+ ecryptfs_put_lower_file(ecryptfs_dentry->d_inode);
return rc;
}
@@ -562,8 +527,6 @@ static int ecryptfs_rmdir(struct inode *dir, struct dentry *dentry)
dget(lower_dentry);
rc = vfs_rmdir(lower_dir_dentry->d_inode, lower_dentry);
dput(lower_dentry);
- if (!rc)
- d_delete(lower_dentry);
fsstack_copy_attr_times(dir, lower_dir_dentry->d_inode);
dir->i_nlink = lower_dir_dentry->d_inode->i_nlink;
unlock_dir(lower_dir_dentry);
@@ -634,8 +597,8 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry,
fsstack_copy_attr_all(old_dir, lower_old_dir_dentry->d_inode);
out_lock:
unlock_rename(lower_old_dir_dentry, lower_new_dir_dentry);
- dput(lower_new_dentry->d_parent);
- dput(lower_old_dentry->d_parent);
+ dput(lower_new_dir_dentry);
+ dput(lower_old_dir_dentry);
dput(lower_new_dentry);
dput(lower_old_dentry);
return rc;
@@ -783,8 +746,11 @@ static int truncate_upper(struct dentry *dentry, struct iattr *ia,
if (unlikely((ia->ia_size == i_size))) {
lower_ia->ia_valid &= ~ATTR_SIZE;
- goto out;
+ return 0;
}
+ rc = ecryptfs_get_lower_file(dentry);
+ if (rc)
+ return rc;
crypt_stat = &ecryptfs_inode_to_private(dentry->d_inode)->crypt_stat;
/* Switch on growing or shrinking file */
if (ia->ia_size > i_size) {
@@ -862,6 +828,7 @@ static int truncate_upper(struct dentry *dentry, struct iattr *ia,
lower_ia->ia_valid &= ~ATTR_SIZE;
}
out:
+ ecryptfs_put_lower_file(inode);
return rc;
}
@@ -937,7 +904,13 @@ static int ecryptfs_setattr(struct dentry *dentry, struct iattr *ia)
mount_crypt_stat = &ecryptfs_superblock_to_private(
dentry->d_sb)->mount_crypt_stat;
+ rc = ecryptfs_get_lower_file(dentry);
+ if (rc) {
+ mutex_unlock(&crypt_stat->cs_mutex);
+ goto out;
+ }
rc = ecryptfs_read_metadata(dentry);
+ ecryptfs_put_lower_file(inode);
if (rc) {
if (!(mount_crypt_stat->flags
& ECRYPTFS_PLAINTEXT_PASSTHROUGH_ENABLED)) {
@@ -951,10 +924,17 @@ static int ecryptfs_setattr(struct dentry *dentry, struct iattr *ia)
goto out;
}
rc = 0;
- crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED);
+ crypt_stat->flags &= ~(ECRYPTFS_I_SIZE_INITIALIZED
+ | ECRYPTFS_ENCRYPTED);
}
}
mutex_unlock(&crypt_stat->cs_mutex);
+ if (S_ISREG(inode->i_mode)) {
+ rc = filemap_write_and_wait(inode->i_mapping);
+ if (rc)
+ goto out;
+ fsstack_copy_attr_all(inode, lower_inode);
+ }
memcpy(&lower_ia, ia, sizeof(lower_ia));
if (ia->ia_valid & ATTR_FILE)
lower_ia.ia_file = ecryptfs_file_to_lower(ia->ia_file);
diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
index c1436cf..03e609c 100644
--- a/fs/ecryptfs/keystore.c
+++ b/fs/ecryptfs/keystore.c
@@ -65,6 +65,24 @@ static int process_request_key_err(long err_code)
return rc;
}
+static int process_find_global_auth_tok_for_sig_err(int err_code)
+{
+ int rc = err_code;
+
+ switch (err_code) {
+ case -ENOENT:
+ ecryptfs_printk(KERN_WARNING, "Missing auth tok\n");
+ break;
+ case -EINVAL:
+ ecryptfs_printk(KERN_WARNING, "Invalid auth tok\n");
+ break;
+ default:
+ rc = process_request_key_err(err_code);
+ break;
+ }
+ return rc;
+}
+
/**
* ecryptfs_parse_packet_length
* @data: Pointer to memory containing length at offset
@@ -403,27 +421,120 @@ out:
return rc;
}
+/**
+ * ecryptfs_verify_version
+ * @version: The version number to confirm
+ *
+ * Returns zero on good version; non-zero otherwise
+ */
+static int ecryptfs_verify_version(u16 version)
+{
+ int rc = 0;
+ unsigned char major;
+ unsigned char minor;
+
+ major = ((version >> 8) & 0xFF);
+ minor = (version & 0xFF);
+ if (major != ECRYPTFS_VERSION_MAJOR) {
+ ecryptfs_printk(KERN_ERR, "Major version number mismatch. "
+ "Expected [%d]; got [%d]\n",
+ ECRYPTFS_VERSION_MAJOR, major);
+ rc = -EINVAL;
+ goto out;
+ }
+ if (minor != ECRYPTFS_VERSION_MINOR) {
+ ecryptfs_printk(KERN_ERR, "Minor version number mismatch. "
+ "Expected [%d]; got [%d]\n",
+ ECRYPTFS_VERSION_MINOR, minor);
+ rc = -EINVAL;
+ goto out;
+ }
+out:
+ return rc;
+}
+
+/**
+ * ecryptfs_verify_auth_tok_from_key
+ * @auth_tok_key: key containing the authentication token
+ * @auth_tok: authentication token
+ *
+ * Returns zero on valid auth tok; -EINVAL otherwise
+ */
+static int
+ecryptfs_verify_auth_tok_from_key(struct key *auth_tok_key,
+ struct ecryptfs_auth_tok **auth_tok)
+{
+ int rc = 0;
+
+ (*auth_tok) = ecryptfs_get_key_payload_data(auth_tok_key);
+ if (ecryptfs_verify_version((*auth_tok)->version)) {
+ printk(KERN_ERR "Data structure version mismatch. Userspace "
+ "tools must match eCryptfs kernel module with major "
+ "version [%d] and minor version [%d]\n",
+ ECRYPTFS_VERSION_MAJOR, ECRYPTFS_VERSION_MINOR);
+ rc = -EINVAL;
+ goto out;
+ }
+ if ((*auth_tok)->token_type != ECRYPTFS_PASSWORD
+ && (*auth_tok)->token_type != ECRYPTFS_PRIVATE_KEY) {
+ printk(KERN_ERR "Invalid auth_tok structure "
+ "returned from key query\n");
+ rc = -EINVAL;
+ goto out;
+ }
+out:
+ return rc;
+}
+
static int
ecryptfs_find_global_auth_tok_for_sig(
- struct ecryptfs_global_auth_tok **global_auth_tok,
+ struct key **auth_tok_key,
+ struct ecryptfs_auth_tok **auth_tok,
struct ecryptfs_mount_crypt_stat *mount_crypt_stat, char *sig)
{
struct ecryptfs_global_auth_tok *walker;
int rc = 0;
- (*global_auth_tok) = NULL;
+ (*auth_tok_key) = NULL;
+ (*auth_tok) = NULL;
mutex_lock(&mount_crypt_stat->global_auth_tok_list_mutex);
list_for_each_entry(walker,
&mount_crypt_stat->global_auth_tok_list,
mount_crypt_stat_list) {
- if (memcmp(walker->sig, sig, ECRYPTFS_SIG_SIZE_HEX) == 0) {
- rc = key_validate(walker->global_auth_tok_key);
- if (!rc)
- (*global_auth_tok) = walker;
+ if (memcmp(walker->sig, sig, ECRYPTFS_SIG_SIZE_HEX))
+ continue;
+
+ if (walker->flags & ECRYPTFS_AUTH_TOK_INVALID) {
+ rc = -EINVAL;
goto out;
}
+
+ rc = key_validate(walker->global_auth_tok_key);
+ if (rc) {
+ if (rc == -EKEYEXPIRED)
+ goto out;
+ goto out_invalid_auth_tok;
+ }
+
+ down_write(&(walker->global_auth_tok_key->sem));
+ rc = ecryptfs_verify_auth_tok_from_key(
+ walker->global_auth_tok_key, auth_tok);
+ if (rc)
+ goto out_invalid_auth_tok_unlock;
+
+ (*auth_tok_key) = walker->global_auth_tok_key;
+ key_get(*auth_tok_key);
+ goto out;
}
- rc = -EINVAL;
+ rc = -ENOENT;
+ goto out;
+out_invalid_auth_tok_unlock:
+ up_write(&(walker->global_auth_tok_key->sem));
+out_invalid_auth_tok:
+ printk(KERN_WARNING "Invalidating auth tok with sig = [%s]\n", sig);
+ walker->flags |= ECRYPTFS_AUTH_TOK_INVALID;
+ key_put(walker->global_auth_tok_key);
+ walker->global_auth_tok_key = NULL;
out:
mutex_unlock(&mount_crypt_stat->global_auth_tok_list_mutex);
return rc;
@@ -451,14 +562,11 @@ ecryptfs_find_auth_tok_for_sig(
struct ecryptfs_mount_crypt_stat *mount_crypt_stat,
char *sig)
{
- struct ecryptfs_global_auth_tok *global_auth_tok;
int rc = 0;
- (*auth_tok_key) = NULL;
- (*auth_tok) = NULL;
- if (ecryptfs_find_global_auth_tok_for_sig(&global_auth_tok,
- mount_crypt_stat, sig)) {
-
+ rc = ecryptfs_find_global_auth_tok_for_sig(auth_tok_key, auth_tok,
+ mount_crypt_stat, sig);
+ if (rc == -ENOENT) {
/* if the flag ECRYPTFS_GLOBAL_MOUNT_AUTH_TOK_ONLY is set in the
* mount_crypt_stat structure, we prevent to use auth toks that
* are not inserted through the ecryptfs_add_global_auth_tok
@@ -470,8 +578,7 @@ ecryptfs_find_auth_tok_for_sig(
rc = ecryptfs_keyring_auth_tok_for_sig(auth_tok_key, auth_tok,
sig);
- } else
- (*auth_tok) = global_auth_tok->global_auth_tok;
+ }
return rc;
}
@@ -531,6 +638,16 @@ ecryptfs_write_tag_70_packet(char *dest, size_t *remaining_bytes,
}
s->desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
(*packet_size) = 0;
+ rc = ecryptfs_find_auth_tok_for_sig(
+ &auth_tok_key,
+ &s->auth_tok, mount_crypt_stat,
+ mount_crypt_stat->global_default_fnek_sig);
+ if (rc) {
+ printk(KERN_ERR "%s: Error attempting to find auth tok for "
+ "fnek sig [%s]; rc = [%d]\n", __func__,
+ mount_crypt_stat->global_default_fnek_sig, rc);
+ goto out;
+ }
rc = ecryptfs_get_tfm_and_mutex_for_cipher_name(
&s->desc.tfm,
&s->tfm_mutex, mount_crypt_stat->global_default_fn_cipher_name);
@@ -616,16 +733,6 @@ ecryptfs_write_tag_70_packet(char *dest, size_t *remaining_bytes,
goto out_free_unlock;
}
dest[s->i++] = s->cipher_code;
- rc = ecryptfs_find_auth_tok_for_sig(
- &auth_tok_key,
- &s->auth_tok, mount_crypt_stat,
- mount_crypt_stat->global_default_fnek_sig);
- if (rc) {
- printk(KERN_ERR "%s: Error attempting to find auth tok for "
- "fnek sig [%s]; rc = [%d]\n", __func__,
- mount_crypt_stat->global_default_fnek_sig, rc);
- goto out_free_unlock;
- }
/* TODO: Support other key modules than passphrase for
* filename encryption */
if (s->auth_tok->token_type != ECRYPTFS_PASSWORD) {
@@ -765,8 +872,10 @@ out_free_unlock:
out_unlock:
mutex_unlock(s->tfm_mutex);
out:
- if (auth_tok_key)
+ if (auth_tok_key) {
+ up_write(&(auth_tok_key->sem));
key_put(auth_tok_key);
+ }
kfree(s);
return rc;
}
@@ -879,6 +988,15 @@ ecryptfs_parse_tag_70_packet(char **filename, size_t *filename_size,
__func__, s->cipher_code);
goto out;
}
+ rc = ecryptfs_find_auth_tok_for_sig(&auth_tok_key,
+ &s->auth_tok, mount_crypt_stat,
+ s->fnek_sig_hex);
+ if (rc) {
+ printk(KERN_ERR "%s: Error attempting to find auth tok for "
+ "fnek sig [%s]; rc = [%d]\n", __func__, s->fnek_sig_hex,
+ rc);
+ goto out;
+ }
rc = ecryptfs_get_tfm_and_mutex_for_cipher_name(&s->desc.tfm,
&s->tfm_mutex,
s->cipher_string);
@@ -925,15 +1043,6 @@ ecryptfs_parse_tag_70_packet(char **filename, size_t *filename_size,
* >= ECRYPTFS_MAX_IV_BYTES. */
memset(s->iv, 0, ECRYPTFS_MAX_IV_BYTES);
s->desc.info = s->iv;
- rc = ecryptfs_find_auth_tok_for_sig(&auth_tok_key,
- &s->auth_tok, mount_crypt_stat,
- s->fnek_sig_hex);
- if (rc) {
- printk(KERN_ERR "%s: Error attempting to find auth tok for "
- "fnek sig [%s]; rc = [%d]\n", __func__, s->fnek_sig_hex,
- rc);
- goto out_free_unlock;
- }
/* TODO: Support other key modules than passphrase for
* filename encryption */
if (s->auth_tok->token_type != ECRYPTFS_PASSWORD) {
@@ -1002,8 +1111,10 @@ out:
(*filename_size) = 0;
(*filename) = NULL;
}
- if (auth_tok_key)
+ if (auth_tok_key) {
+ up_write(&(auth_tok_key->sem));
key_put(auth_tok_key);
+ }
kfree(s);
return rc;
}
@@ -1520,38 +1631,6 @@ out:
return rc;
}
-/**
- * ecryptfs_verify_version
- * @version: The version number to confirm
- *
- * Returns zero on good version; non-zero otherwise
- */
-static int ecryptfs_verify_version(u16 version)
-{
- int rc = 0;
- unsigned char major;
- unsigned char minor;
-
- major = ((version >> 8) & 0xFF);
- minor = (version & 0xFF);
- if (major != ECRYPTFS_VERSION_MAJOR) {
- ecryptfs_printk(KERN_ERR, "Major version number mismatch. "
- "Expected [%d]; got [%d]\n",
- ECRYPTFS_VERSION_MAJOR, major);
- rc = -EINVAL;
- goto out;
- }
- if (minor != ECRYPTFS_VERSION_MINOR) {
- ecryptfs_printk(KERN_ERR, "Minor version number mismatch. "
- "Expected [%d]; got [%d]\n",
- ECRYPTFS_VERSION_MINOR, minor);
- rc = -EINVAL;
- goto out;
- }
-out:
- return rc;
-}
-
int ecryptfs_keyring_auth_tok_for_sig(struct key **auth_tok_key,
struct ecryptfs_auth_tok **auth_tok,
char *sig)
@@ -1563,31 +1642,16 @@ int ecryptfs_keyring_auth_tok_for_sig(struct key **auth_tok_key,
printk(KERN_ERR "Could not find key with description: [%s]\n",
sig);
rc = process_request_key_err(PTR_ERR(*auth_tok_key));
+ (*auth_tok_key) = NULL;
goto out;
}
- (*auth_tok) = ecryptfs_get_key_payload_data(*auth_tok_key);
- if (ecryptfs_verify_version((*auth_tok)->version)) {
- printk(KERN_ERR
- "Data structure version mismatch. "
- "Userspace tools must match eCryptfs "
- "kernel module with major version [%d] "
- "and minor version [%d]\n",
- ECRYPTFS_VERSION_MAJOR,
- ECRYPTFS_VERSION_MINOR);
- rc = -EINVAL;
- goto out_release_key;
- }
- if ((*auth_tok)->token_type != ECRYPTFS_PASSWORD
- && (*auth_tok)->token_type != ECRYPTFS_PRIVATE_KEY) {
- printk(KERN_ERR "Invalid auth_tok structure "
- "returned from key query\n");
- rc = -EINVAL;
- goto out_release_key;
- }
-out_release_key:
+ down_write(&(*auth_tok_key)->sem);
+ rc = ecryptfs_verify_auth_tok_from_key(*auth_tok_key, auth_tok);
if (rc) {
+ up_write(&(*auth_tok_key)->sem);
key_put(*auth_tok_key);
(*auth_tok_key) = NULL;
+ goto out;
}
out:
return rc;
@@ -1809,6 +1873,7 @@ int ecryptfs_parse_packet_set(struct ecryptfs_crypt_stat *crypt_stat,
find_next_matching_auth_tok:
found_auth_tok = 0;
if (auth_tok_key) {
+ up_write(&(auth_tok_key->sem));
key_put(auth_tok_key);
auth_tok_key = NULL;
}
@@ -1895,8 +1960,10 @@ found_matching_auth_tok:
out_wipe_list:
wipe_auth_tok_list(&auth_tok_list);
out:
- if (auth_tok_key)
+ if (auth_tok_key) {
+ up_write(&(auth_tok_key->sem));
key_put(auth_tok_key);
+ }
return rc;
}
@@ -2324,7 +2391,7 @@ ecryptfs_generate_key_packet_set(char *dest_base,
size_t max)
{
struct ecryptfs_auth_tok *auth_tok;
- struct ecryptfs_global_auth_tok *global_auth_tok;
+ struct key *auth_tok_key = NULL;
struct ecryptfs_mount_crypt_stat *mount_crypt_stat =
&ecryptfs_superblock_to_private(
ecryptfs_dentry->d_sb)->mount_crypt_stat;
@@ -2343,21 +2410,16 @@ ecryptfs_generate_key_packet_set(char *dest_base,
list_for_each_entry(key_sig, &crypt_stat->keysig_list,
crypt_stat_list) {
memset(key_rec, 0, sizeof(*key_rec));
- rc = ecryptfs_find_global_auth_tok_for_sig(&global_auth_tok,
+ rc = ecryptfs_find_global_auth_tok_for_sig(&auth_tok_key,
+ &auth_tok,
mount_crypt_stat,
key_sig->keysig);
if (rc) {
- printk(KERN_ERR "Error attempting to get the global "
- "auth_tok; rc = [%d]\n", rc);
+ printk(KERN_WARNING "Unable to retrieve auth tok with "
+ "sig = [%s]\n", key_sig->keysig);
+ rc = process_find_global_auth_tok_for_sig_err(rc);
goto out_free;
}
- if (global_auth_tok->flags & ECRYPTFS_AUTH_TOK_INVALID) {
- printk(KERN_WARNING
- "Skipping invalid auth tok with sig = [%s]\n",
- global_auth_tok->sig);
- continue;
- }
- auth_tok = global_auth_tok->global_auth_tok;
if (auth_tok->token_type == ECRYPTFS_PASSWORD) {
rc = write_tag_3_packet((dest_base + (*len)),
&max, auth_tok,
@@ -2395,6 +2457,9 @@ ecryptfs_generate_key_packet_set(char *dest_base,
rc = -EINVAL;
goto out_free;
}
+ up_write(&(auth_tok_key->sem));
+ key_put(auth_tok_key);
+ auth_tok_key = NULL;
}
if (likely(max > 0)) {
dest_base[(*len)] = 0x00;
@@ -2407,6 +2472,11 @@ out_free:
out:
if (rc)
(*len) = 0;
+ if (auth_tok_key) {
+ up_write(&(auth_tok_key->sem));
+ key_put(auth_tok_key);
+ }
+
mutex_unlock(&crypt_stat->keysig_list_mutex);
return rc;
}
@@ -2424,6 +2494,7 @@ int ecryptfs_add_keysig(struct ecryptfs_crypt_stat *crypt_stat, char *sig)
return -ENOMEM;
}
memcpy(new_key_sig->keysig, sig, ECRYPTFS_SIG_SIZE_HEX);
+ new_key_sig->keysig[ECRYPTFS_SIG_SIZE_HEX] = '\0';
/* Caller must hold keysig_list_mutex */
list_add(&new_key_sig->crypt_stat_list, &crypt_stat->keysig_list);
@@ -2453,7 +2524,6 @@ ecryptfs_add_global_auth_tok(struct ecryptfs_mount_crypt_stat *mount_crypt_stat,
mutex_lock(&mount_crypt_stat->global_auth_tok_list_mutex);
list_add(&new_auth_tok->mount_crypt_stat_list,
&mount_crypt_stat->global_auth_tok_list);
- mount_crypt_stat->num_global_auth_toks++;
mutex_unlock(&mount_crypt_stat->global_auth_tok_list_mutex);
out:
return rc;
diff --git a/fs/ecryptfs/kthread.c b/fs/ecryptfs/kthread.c
index 0851ab6..69f994a 100644
--- a/fs/ecryptfs/kthread.c
+++ b/fs/ecryptfs/kthread.c
@@ -44,7 +44,7 @@ static struct task_struct *ecryptfs_kthread;
* @ignored: ignored
*
* The eCryptfs kernel thread that has the responsibility of getting
- * the lower persistent file with RW permissions.
+ * the lower file with RW permissions.
*
* Returns zero on success; non-zero otherwise
*/
@@ -141,8 +141,8 @@ int ecryptfs_privileged_open(struct file **lower_file,
int rc = 0;
/* Corresponding dput() and mntput() are done when the
- * persistent file is fput() when the eCryptfs inode is
- * destroyed. */
+ * lower file is fput() when all eCryptfs files for the inode are
+ * released. */
dget(lower_dentry);
mntget(lower_mnt);
flags |= IS_RDONLY(lower_dentry->d_inode) ? O_RDONLY : O_RDWR;
diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
index 758323a..89b9338 100644
--- a/fs/ecryptfs/main.c
+++ b/fs/ecryptfs/main.c
@@ -96,7 +96,7 @@ void __ecryptfs_printk(const char *fmt, ...)
}
/**
- * ecryptfs_init_persistent_file
+ * ecryptfs_init_lower_file
* @ecryptfs_dentry: Fully initialized eCryptfs dentry object, with
* the lower dentry and the lower mount set
*
@@ -104,44 +104,70 @@ void __ecryptfs_printk(const char *fmt, ...)
* inode. All I/O operations to the lower inode occur through that
* file. When the first eCryptfs dentry that interposes with the first
* lower dentry for that inode is created, this function creates the
- * persistent file struct and associates it with the eCryptfs
- * inode. When the eCryptfs inode is destroyed, the file is closed.
+ * lower file struct and associates it with the eCryptfs
+ * inode. When all eCryptfs files associated with the inode are released, the
+ * file is closed.
*
- * The persistent file will be opened with read/write permissions, if
+ * The lower file will be opened with read/write permissions, if
* possible. Otherwise, it is opened read-only.
*
- * This function does nothing if a lower persistent file is already
+ * This function does nothing if a lower file is already
* associated with the eCryptfs inode.
*
* Returns zero on success; non-zero otherwise
*/
-int ecryptfs_init_persistent_file(struct dentry *ecryptfs_dentry)
+static int ecryptfs_init_lower_file(struct dentry *dentry,
+ struct file **lower_file)
{
const struct cred *cred = current_cred();
+ struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
+ struct vfsmount *lower_mnt = ecryptfs_dentry_to_lower_mnt(dentry);
+ int rc;
+
+ rc = ecryptfs_privileged_open(lower_file, lower_dentry, lower_mnt,
+ cred);
+ if (rc) {
+ printk(KERN_ERR "Error opening lower file "
+ "for lower_dentry [0x%p] and lower_mnt [0x%p]; "
+ "rc = [%d]\n", lower_dentry, lower_mnt, rc);
+ (*lower_file) = NULL;
+ }
+ return rc;
+}
+
+int ecryptfs_get_lower_file(struct dentry *dentry)
+{
struct ecryptfs_inode_info *inode_info =
- ecryptfs_inode_to_private(ecryptfs_dentry->d_inode);
- int rc = 0;
+ ecryptfs_inode_to_private(dentry->d_inode);
+ int count, rc = 0;
mutex_lock(&inode_info->lower_file_mutex);
- if (!inode_info->lower_file) {
- struct dentry *lower_dentry;
- struct vfsmount *lower_mnt =
- ecryptfs_dentry_to_lower_mnt(ecryptfs_dentry);
-
- lower_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry);
- rc = ecryptfs_privileged_open(&inode_info->lower_file,
- lower_dentry, lower_mnt, cred);
- if (rc) {
- printk(KERN_ERR "Error opening lower persistent file "
- "for lower_dentry [0x%p] and lower_mnt [0x%p]; "
- "rc = [%d]\n", lower_dentry, lower_mnt, rc);
- inode_info->lower_file = NULL;
- }
+ count = atomic_inc_return(&inode_info->lower_file_count);
+ if (WARN_ON_ONCE(count < 1))
+ rc = -EINVAL;
+ else if (count == 1) {
+ rc = ecryptfs_init_lower_file(dentry,
+ &inode_info->lower_file);
+ if (rc)
+ atomic_set(&inode_info->lower_file_count, 0);
}
mutex_unlock(&inode_info->lower_file_mutex);
return rc;
}
+void ecryptfs_put_lower_file(struct inode *inode)
+{
+ struct ecryptfs_inode_info *inode_info;
+
+ inode_info = ecryptfs_inode_to_private(inode);
+ if (atomic_dec_and_mutex_lock(&inode_info->lower_file_count,
+ &inode_info->lower_file_mutex)) {
+ fput(inode_info->lower_file);
+ inode_info->lower_file = NULL;
+ mutex_unlock(&inode_info->lower_file_mutex);
+ }
+}
+
static struct inode *ecryptfs_get_inode(struct inode *lower_inode,
struct super_block *sb)
{
@@ -241,14 +267,14 @@ static int ecryptfs_init_global_auth_toks(
struct ecryptfs_mount_crypt_stat *mount_crypt_stat)
{
struct ecryptfs_global_auth_tok *global_auth_tok;
+ struct ecryptfs_auth_tok *auth_tok;
int rc = 0;
list_for_each_entry(global_auth_tok,
&mount_crypt_stat->global_auth_tok_list,
mount_crypt_stat_list) {
rc = ecryptfs_keyring_auth_tok_for_sig(
- &global_auth_tok->global_auth_tok_key,
- &global_auth_tok->global_auth_tok,
+ &global_auth_tok->global_auth_tok_key, &auth_tok,
global_auth_tok->sig);
if (rc) {
printk(KERN_ERR "Could not find valid key in user "
@@ -256,8 +282,10 @@ static int ecryptfs_init_global_auth_toks(
"option: [%s]\n", global_auth_tok->sig);
global_auth_tok->flags |= ECRYPTFS_AUTH_TOK_INVALID;
goto out;
- } else
+ } else {
global_auth_tok->flags &= ~ECRYPTFS_AUTH_TOK_INVALID;
+ up_write(&(global_auth_tok->global_auth_tok_key)->sem);
+ }
}
out:
return rc;
@@ -276,7 +304,7 @@ static void ecryptfs_init_mount_crypt_stat(
/**
* ecryptfs_parse_options
* @sb: The ecryptfs super block
- * @options: The options pased to the kernel
+ * @options: The options passed to the kernel
*
* Parse mount options:
* debug=N - ecryptfs_verbosity level for debug output
@@ -840,7 +868,7 @@ static int __init ecryptfs_init(void)
}
rc = ecryptfs_init_messaging();
if (rc) {
- printk(KERN_ERR "Failure occured while attempting to "
+ printk(KERN_ERR "Failure occurred while attempting to "
"initialize the communications channel to "
"ecryptfsd\n");
goto out_destroy_kthread;
diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c
index cc64fca..6a44148 100644
--- a/fs/ecryptfs/mmap.c
+++ b/fs/ecryptfs/mmap.c
@@ -62,6 +62,18 @@ static int ecryptfs_writepage(struct page *page, struct writeback_control *wbc)
{
int rc;
+ /*
+ * Refuse to write the page out if we are called from reclaim context
+ * since our writepage() path may potentially allocate memory when
+ * calling into the lower fs vfs_write() which may in turn invoke
+ * us again.
+ */
+ if (current->flags & PF_MEMALLOC) {
+ redirty_page_for_writepage(wbc, page);
+ rc = 0;
+ goto out;
+ }
+
rc = ecryptfs_encrypt_page(page);
if (rc) {
ecryptfs_printk(KERN_WARNING, "Error encrypting "
@@ -70,8 +82,8 @@ static int ecryptfs_writepage(struct page *page, struct writeback_control *wbc)
goto out;
}
SetPageUptodate(page);
- unlock_page(page);
out:
+ unlock_page(page);
return rc;
}
@@ -193,11 +205,7 @@ static int ecryptfs_readpage(struct file *file, struct page *page)
&ecryptfs_inode_to_private(page->mapping->host)->crypt_stat;
int rc = 0;
- if (!crypt_stat
- || !(crypt_stat->flags & ECRYPTFS_ENCRYPTED)
- || (crypt_stat->flags & ECRYPTFS_NEW_FILE)) {
- ecryptfs_printk(KERN_DEBUG,
- "Passing through unencrypted page\n");
+ if (!crypt_stat || !(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) {
rc = ecryptfs_read_lower_page_segment(page, page->index, 0,
PAGE_CACHE_SIZE,
page->mapping->host);
@@ -295,8 +303,7 @@ static int ecryptfs_write_begin(struct file *file,
struct ecryptfs_crypt_stat *crypt_stat =
&ecryptfs_inode_to_private(mapping->host)->crypt_stat;
- if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)
- || (crypt_stat->flags & ECRYPTFS_NEW_FILE)) {
+ if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) {
rc = ecryptfs_read_lower_page_segment(
page, index, 0, PAGE_CACHE_SIZE, mapping->host);
if (rc) {
@@ -374,6 +381,11 @@ static int ecryptfs_write_begin(struct file *file,
&& (pos != 0))
zero_user(page, 0, PAGE_CACHE_SIZE);
out:
+ if (unlikely(rc)) {
+ unlock_page(page);
+ page_cache_release(page);
+ *pagep = NULL;
+ }
return rc;
}
@@ -486,13 +498,8 @@ static int ecryptfs_write_end(struct file *file,
struct ecryptfs_crypt_stat *crypt_stat =
&ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat;
int rc;
+ int need_unlock_page = 1;
- if (crypt_stat->flags & ECRYPTFS_NEW_FILE) {
- ecryptfs_printk(KERN_DEBUG, "ECRYPTFS_NEW_FILE flag set in "
- "crypt_stat at memory location [%p]\n", crypt_stat);
- crypt_stat->flags &= ~(ECRYPTFS_NEW_FILE);
- } else
- ecryptfs_printk(KERN_DEBUG, "Not a new file\n");
ecryptfs_printk(KERN_DEBUG, "Calling fill_zeros_to_end_of_page"
"(page w/ index = [0x%.16lx], to = [%d])\n", index, to);
if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) {
@@ -512,26 +519,26 @@ static int ecryptfs_write_end(struct file *file,
"zeros in page with index = [0x%.16lx]\n", index);
goto out;
}
- rc = ecryptfs_encrypt_page(page);
- if (rc) {
- ecryptfs_printk(KERN_WARNING, "Error encrypting page (upper "
- "index [0x%.16lx])\n", index);
- goto out;
- }
+ set_page_dirty(page);
+ unlock_page(page);
+ need_unlock_page = 0;
if (pos + copied > i_size_read(ecryptfs_inode)) {
i_size_write(ecryptfs_inode, pos + copied);
ecryptfs_printk(KERN_DEBUG, "Expanded file size to "
"[0x%.16llx]\n",
(unsigned long long)i_size_read(ecryptfs_inode));
+ balance_dirty_pages_ratelimited(mapping);
+ rc = ecryptfs_write_inode_size_to_metadata(ecryptfs_inode);
+ if (rc) {
+ printk(KERN_ERR "Error writing inode size to metadata; "
+ "rc = [%d]\n", rc);
+ goto out;
+ }
}
- rc = ecryptfs_write_inode_size_to_metadata(ecryptfs_inode);
- if (rc)
- printk(KERN_ERR "Error writing inode size to metadata; "
- "rc = [%d]\n", rc);
- else
- rc = copied;
+ rc = copied;
out:
- unlock_page(page);
+ if (need_unlock_page)
+ unlock_page(page);
page_cache_release(page);
return rc;
}
diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
index db184ef..85d4309 100644
--- a/fs/ecryptfs/read_write.c
+++ b/fs/ecryptfs/read_write.c
@@ -44,15 +44,11 @@ int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data,
ssize_t rc;
inode_info = ecryptfs_inode_to_private(ecryptfs_inode);
- mutex_lock(&inode_info->lower_file_mutex);
BUG_ON(!inode_info->lower_file);
- inode_info->lower_file->f_pos = offset;
fs_save = get_fs();
set_fs(get_ds());
- rc = vfs_write(inode_info->lower_file, data, size,
- &inode_info->lower_file->f_pos);
+ rc = vfs_write(inode_info->lower_file, data, size, &offset);
set_fs(fs_save);
- mutex_unlock(&inode_info->lower_file_mutex);
mark_inode_dirty_sync(ecryptfs_inode);
return rc;
}
@@ -234,15 +230,11 @@ int ecryptfs_read_lower(char *data, loff_t offset, size_t size,
mm_segment_t fs_save;
ssize_t rc;
- mutex_lock(&inode_info->lower_file_mutex);
BUG_ON(!inode_info->lower_file);
- inode_info->lower_file->f_pos = offset;
fs_save = get_fs();
set_fs(get_ds());
- rc = vfs_read(inode_info->lower_file, data, size,
- &inode_info->lower_file->f_pos);
+ rc = vfs_read(inode_info->lower_file, data, size, &offset);
set_fs(fs_save);
- mutex_unlock(&inode_info->lower_file_mutex);
return rc;
}
diff --git a/fs/ecryptfs/super.c b/fs/ecryptfs/super.c
index 3042fe1..245b517 100644
--- a/fs/ecryptfs/super.c
+++ b/fs/ecryptfs/super.c
@@ -56,6 +56,7 @@ static struct inode *ecryptfs_alloc_inode(struct super_block *sb)
goto out;
ecryptfs_init_crypt_stat(&inode_info->crypt_stat);
mutex_init(&inode_info->lower_file_mutex);
+ atomic_set(&inode_info->lower_file_count, 0);
inode_info->lower_file = NULL;
inode = &inode_info->vfs_inode;
out:
@@ -78,8 +79,7 @@ static void ecryptfs_i_callback(struct rcu_head *head)
*
* This is used during the final destruction of the inode. All
* allocation of memory related to the inode, including allocated
- * memory in the crypt_stat struct, will be released here. This
- * function also fput()'s the persistent file for the lower inode.
+ * memory in the crypt_stat struct, will be released here.
* There should be no chance that this deallocation will be missed.
*/
static void ecryptfs_destroy_inode(struct inode *inode)
@@ -87,16 +87,7 @@ static void ecryptfs_destroy_inode(struct inode *inode)
struct ecryptfs_inode_info *inode_info;
inode_info = ecryptfs_inode_to_private(inode);
- if (inode_info->lower_file) {
- struct dentry *lower_dentry =
- inode_info->lower_file->f_dentry;
-
- BUG_ON(!lower_dentry);
- if (lower_dentry->d_inode) {
- fput(inode_info->lower_file);
- inode_info->lower_file = NULL;
- }
- }
+ BUG_ON(inode_info->lower_file);
ecryptfs_destroy_crypt_stat(&inode_info->crypt_stat);
call_rcu(&inode->i_rcu, ecryptfs_i_callback);
}
@@ -198,7 +189,7 @@ static int ecryptfs_show_options(struct seq_file *m, struct vfsmount *mnt)
const struct super_operations ecryptfs_sops = {
.alloc_inode = ecryptfs_alloc_inode,
.destroy_inode = ecryptfs_destroy_inode,
- .drop_inode = generic_delete_inode,
+ .drop_inode = generic_drop_inode,
.statfs = ecryptfs_statfs,
.remount_fs = NULL,
.evict_inode = ecryptfs_evict_inode,
diff --git a/fs/efs/inode.c b/fs/efs/inode.c
index a8e7797..9c13412 100644
--- a/fs/efs/inode.c
+++ b/fs/efs/inode.c
@@ -23,7 +23,6 @@ static sector_t _efs_bmap(struct address_space *mapping, sector_t block)
}
static const struct address_space_operations efs_aops = {
.readpage = efs_readpage,
- .sync_page = block_sync_page,
.bmap = _efs_bmap
};
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index ff12f7a..f9cfd16 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -181,7 +181,7 @@ struct eventpoll {
/*
* This is a single linked list that chains all the "struct epitem" that
- * happened while transfering ready events to userspace w/out
+ * happened while transferring ready events to userspace w/out
* holding ->lock.
*/
struct epitem *ovflist;
@@ -316,6 +316,19 @@ static void ep_nested_calls_init(struct nested_calls *ncalls)
}
/**
+ * ep_events_available - Checks if ready events might be available.
+ *
+ * @ep: Pointer to the eventpoll context.
+ *
+ * Returns: Returns a value different than zero if ready events are available,
+ * or zero otherwise.
+ */
+static inline int ep_events_available(struct eventpoll *ep)
+{
+ return !list_empty(&ep->rdllist) || ep->ovflist != EP_UNACTIVE_PTR;
+}
+
+/**
* ep_call_nested - Perform a bound (possibly) nested call, by checking
* that the recursion limit is not exceeded, and that
* the same nested call (by the meaning of same cookie) is
@@ -593,7 +606,7 @@ static void ep_free(struct eventpoll *ep)
* We do not need to hold "ep->mtx" here because the epoll file
* is on the way to be removed and no one has references to it
* anymore. The only hit might come from eventpoll_release_file() but
- * holding "epmutex" is sufficent here.
+ * holding "epmutex" is sufficient here.
*/
mutex_lock(&epmutex);
@@ -707,7 +720,7 @@ void eventpoll_release_file(struct file *file)
/*
* We don't want to get "file->f_lock" because it is not
* necessary. It is not necessary because we're in the "struct file"
- * cleanup path, and this means that noone is using this file anymore.
+ * cleanup path, and this means that no one is using this file anymore.
* So, for example, epoll_ctl() cannot hit here since if we reach this
* point, the file counter already went to zero and fget() would fail.
* The only hit might come from ep_free() but by holding the mutex
@@ -1099,7 +1112,7 @@ static int ep_send_events_proc(struct eventpoll *ep, struct list_head *head,
* Trigger mode, we need to insert back inside
* the ready list, so that the next call to
* epoll_wait() will check again the events
- * availability. At this point, noone can insert
+ * availability. At this point, no one can insert
* into ep->rdllist besides us. The epoll_ctl()
* callers are locked out by
* ep_scan_ready_list() holding "mtx" and the
@@ -1135,12 +1148,29 @@ static inline struct timespec ep_set_mstimeout(long ms)
return timespec_add_safe(now, ts);
}
+/**
+ * ep_poll - Retrieves ready events, and delivers them to the caller supplied
+ * event buffer.
+ *
+ * @ep: Pointer to the eventpoll context.
+ * @events: Pointer to the userspace buffer where the ready events should be
+ * stored.
+ * @maxevents: Size (in terms of number of events) of the caller event buffer.
+ * @timeout: Maximum timeout for the ready events fetch operation, in
+ * milliseconds. If the @timeout is zero, the function will not block,
+ * while if the @timeout is less than zero, the function will block
+ * until at least one event has been retrieved (or an error
+ * occurred).
+ *
+ * Returns: Returns the number of ready events which have been fetched, or an
+ * error code, in case of error.
+ */
static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
int maxevents, long timeout)
{
- int res, eavail, timed_out = 0;
+ int res = 0, eavail, timed_out = 0;
unsigned long flags;
- long slack;
+ long slack = 0;
wait_queue_t wait;
ktime_t expires, *to = NULL;
@@ -1151,14 +1181,19 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
to = &expires;
*to = timespec_to_ktime(end_time);
} else if (timeout == 0) {
+ /*
+ * Avoid the unnecessary trip to the wait queue loop, if the
+ * caller specified a non blocking operation.
+ */
timed_out = 1;
+ spin_lock_irqsave(&ep->lock, flags);
+ goto check_events;
}
-retry:
+fetch_events:
spin_lock_irqsave(&ep->lock, flags);
- res = 0;
- if (list_empty(&ep->rdllist)) {
+ if (!ep_events_available(ep)) {
/*
* We don't have any available event to return to the caller.
* We need to sleep here, and we will be wake up by
@@ -1174,7 +1209,7 @@ retry:
* to TASK_INTERRUPTIBLE before doing the checks.
*/
set_current_state(TASK_INTERRUPTIBLE);
- if (!list_empty(&ep->rdllist) || timed_out)
+ if (ep_events_available(ep) || timed_out)
break;
if (signal_pending(current)) {
res = -EINTR;
@@ -1191,8 +1226,9 @@ retry:
set_current_state(TASK_RUNNING);
}
+check_events:
/* Is it worth to try to dig for events ? */
- eavail = !list_empty(&ep->rdllist) || ep->ovflist != EP_UNACTIVE_PTR;
+ eavail = ep_events_available(ep);
spin_unlock_irqrestore(&ep->lock, flags);
@@ -1203,7 +1239,7 @@ retry:
*/
if (!res && eavail &&
!(res = ep_send_events(ep, events, maxevents)) && !timed_out)
- goto retry;
+ goto fetch_events;
return res;
}
diff --git a/fs/exec.c b/fs/exec.c
index ba99e1a..5e62d26 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1875,7 +1875,7 @@ static void wait_for_dump_helpers(struct file *file)
/*
- * uhm_pipe_setup
+ * umh_pipe_setup
* helper function to customize the process used
* to collect the core in userspace. Specifically
* it sets up a pipe and installs it as fd 0 (stdin)
diff --git a/fs/exofs/common.h b/fs/exofs/common.h
index f0d5203..3bbd469 100644
--- a/fs/exofs/common.h
+++ b/fs/exofs/common.h
@@ -53,10 +53,14 @@
#define EXOFS_ROOT_ID 0x10002 /* object ID for root directory */
/* exofs Application specific page/attribute */
+/* Inode attrs */
# define EXOFS_APAGE_FS_DATA (OSD_APAGE_APP_DEFINED_FIRST + 3)
# define EXOFS_ATTR_INODE_DATA 1
# define EXOFS_ATTR_INODE_FILE_LAYOUT 2
# define EXOFS_ATTR_INODE_DIR_LAYOUT 3
+/* Partition attrs */
+# define EXOFS_APAGE_SB_DATA (0xF0000000U + 3)
+# define EXOFS_ATTR_SB_STATS 1
/*
* The maximum number of files we can have is limited by the size of the
@@ -86,8 +90,8 @@ enum {
*/
enum {EXOFS_FSCB_VER = 1, EXOFS_DT_VER = 1};
struct exofs_fscb {
- __le64 s_nextid; /* Highest object ID used */
- __le64 s_numfiles; /* Number of files on fs */
+ __le64 s_nextid; /* Only used after mkfs */
+ __le64 s_numfiles; /* Only used after mkfs */
__le32 s_version; /* == EXOFS_FSCB_VER */
__le16 s_magic; /* Magic signature */
__le16 s_newfs; /* Non-zero if this is a new fs */
@@ -98,10 +102,20 @@ struct exofs_fscb {
} __packed;
/*
+ * This struct is set on the FS partition's attributes.
+ * [EXOFS_APAGE_SB_DATA, EXOFS_ATTR_SB_STATS] and is written together
+ * with the create command, to atomically persist the sb writeable information.
+ */
+struct exofs_sb_stats {
+ __le64 s_nextid; /* Highest object ID used */
+ __le64 s_numfiles; /* Number of files on fs */
+} __packed;
+
+/*
* Describes the raid used in the FS. It is part of the device table.
* This here is taken from the pNFS-objects definition. In exofs we
* use one raid policy through-out the filesystem. (NOTE: the funny
- * alignment at begining. We take care of it at exofs_device_table.
+ * alignment at beginning. We take care of it at exofs_device_table.
*/
struct exofs_dt_data_map {
__le32 cb_num_comps;
@@ -122,7 +136,7 @@ struct exofs_dt_device_info {
u8 systemid[OSD_SYSTEMID_LEN];
__le64 long_name_offset; /* If !0 then offset-in-file */
__le32 osdname_len; /* */
- u8 osdname[44]; /* Embbeded, Ususally an asci uuid */
+ u8 osdname[44]; /* Embbeded, Usually an asci uuid */
} __packed;
/*
diff --git a/fs/exofs/dir.c b/fs/exofs/dir.c
index dcc941d..d0941c6 100644
--- a/fs/exofs/dir.c
+++ b/fs/exofs/dir.c
@@ -124,7 +124,7 @@ out:
Ebadsize:
EXOFS_ERR("ERROR [exofs_check_page]: "
- "size of directory #%lu is not a multiple of chunk size",
+ "size of directory(0x%lx) is not a multiple of chunk size\n",
dir->i_ino
);
goto fail;
@@ -142,8 +142,8 @@ Espan:
goto bad_entry;
bad_entry:
EXOFS_ERR(
- "ERROR [exofs_check_page]: bad entry in directory #%lu: %s - "
- "offset=%lu, inode=%llu, rec_len=%d, name_len=%d",
+ "ERROR [exofs_check_page]: bad entry in directory(0x%lx): %s - "
+ "offset=%lu, inode=0x%llu, rec_len=%d, name_len=%d\n",
dir->i_ino, error, (page->index<<PAGE_CACHE_SHIFT)+offs,
_LLU(le64_to_cpu(p->inode_no)),
rec_len, p->name_len);
@@ -151,8 +151,8 @@ bad_entry:
Eend:
p = (struct exofs_dir_entry *)(kaddr + offs);
EXOFS_ERR("ERROR [exofs_check_page]: "
- "entry in directory #%lu spans the page boundary"
- "offset=%lu, inode=%llu",
+ "entry in directory(0x%lx) spans the page boundary"
+ "offset=%lu, inode=0x%llx\n",
dir->i_ino, (page->index<<PAGE_CACHE_SHIFT)+offs,
_LLU(le64_to_cpu(p->inode_no)));
fail:
@@ -261,9 +261,8 @@ exofs_readdir(struct file *filp, void *dirent, filldir_t filldir)
struct page *page = exofs_get_page(inode, n);
if (IS_ERR(page)) {
- EXOFS_ERR("ERROR: "
- "bad page in #%lu",
- inode->i_ino);
+ EXOFS_ERR("ERROR: bad page in directory(0x%lx)\n",
+ inode->i_ino);
filp->f_pos += PAGE_CACHE_SIZE - offset;
return PTR_ERR(page);
}
@@ -283,7 +282,8 @@ exofs_readdir(struct file *filp, void *dirent, filldir_t filldir)
for (; (char *)de <= limit; de = exofs_next_entry(de)) {
if (de->rec_len == 0) {
EXOFS_ERR("ERROR: "
- "zero-length directory entry");
+ "zero-length entry in directory(0x%lx)\n",
+ inode->i_ino);
exofs_put_page(page);
return -EIO;
}
@@ -342,9 +342,9 @@ struct exofs_dir_entry *exofs_find_entry(struct inode *dir,
kaddr += exofs_last_byte(dir, n) - reclen;
while ((char *) de <= kaddr) {
if (de->rec_len == 0) {
- EXOFS_ERR(
- "ERROR: exofs_find_entry: "
- "zero-length directory entry");
+ EXOFS_ERR("ERROR: zero-length entry in "
+ "directory(0x%lx)\n",
+ dir->i_ino);
exofs_put_page(page);
goto out;
}
@@ -472,7 +472,8 @@ int exofs_add_link(struct dentry *dentry, struct inode *inode)
}
if (de->rec_len == 0) {
EXOFS_ERR("ERROR: exofs_add_link: "
- "zero-length directory entry");
+ "zero-length entry in directory(0x%lx)\n",
+ inode->i_ino);
err = -EIO;
goto out_unlock;
}
@@ -491,7 +492,8 @@ int exofs_add_link(struct dentry *dentry, struct inode *inode)
exofs_put_page(page);
}
- EXOFS_ERR("exofs_add_link: BAD dentry=%p or inode=%p", dentry, inode);
+ EXOFS_ERR("exofs_add_link: BAD dentry=%p or inode=0x%lx\n",
+ dentry, inode->i_ino);
return -EINVAL;
got_it:
@@ -542,7 +544,8 @@ int exofs_delete_entry(struct exofs_dir_entry *dir, struct page *page)
while (de < dir) {
if (de->rec_len == 0) {
EXOFS_ERR("ERROR: exofs_delete_entry:"
- "zero-length directory entry");
+ "zero-length entry in directory(0x%lx)\n",
+ inode->i_ino);
err = -EIO;
goto out;
}
diff --git a/fs/exofs/exofs.h b/fs/exofs/exofs.h
index 2dc925f..c965806 100644
--- a/fs/exofs/exofs.h
+++ b/fs/exofs/exofs.h
@@ -77,7 +77,7 @@ struct exofs_layout {
* our extension to the in-memory superblock
*/
struct exofs_sb_info {
- struct exofs_fscb s_fscb; /* Written often, pre-allocate*/
+ struct exofs_sb_stats s_ess; /* Written often, pre-allocate*/
int s_timeout; /* timeout for OSD operations */
uint64_t s_nextid; /* highest object ID used */
uint32_t s_numfiles; /* number of files on fs */
@@ -256,6 +256,8 @@ static inline int exofs_oi_read(struct exofs_i_info *oi,
}
/* inode.c */
+unsigned exofs_max_io_pages(struct exofs_layout *layout,
+ unsigned expected_pages);
int exofs_setattr(struct dentry *, struct iattr *);
int exofs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
@@ -279,7 +281,7 @@ int exofs_set_link(struct inode *, struct exofs_dir_entry *, struct page *,
struct inode *);
/* super.c */
-int exofs_sync_fs(struct super_block *sb, int wait);
+int exofs_sbi_write_stats(struct exofs_sb_info *sbi);
/*********************
* operation vectors *
diff --git a/fs/exofs/file.c b/fs/exofs/file.c
index b905c79..45ca323 100644
--- a/fs/exofs/file.c
+++ b/fs/exofs/file.c
@@ -45,22 +45,8 @@ static int exofs_release_file(struct inode *inode, struct file *filp)
static int exofs_file_fsync(struct file *filp, int datasync)
{
int ret;
- struct inode *inode = filp->f_mapping->host;
- struct super_block *sb;
-
- if (!(inode->i_state & I_DIRTY))
- return 0;
- if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
- return 0;
-
- ret = sync_inode_metadata(inode, 1);
-
- /* This is a good place to write the sb */
- /* TODO: Sechedule an sb-sync on create */
- sb = inode->i_sb;
- if (sb->s_dirt)
- exofs_sync_fs(sb, 1);
+ ret = sync_inode_metadata(filp->f_mapping->host, 1);
return ret;
}
diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c
index a755523..8472c09 100644
--- a/fs/exofs/inode.c
+++ b/fs/exofs/inode.c
@@ -43,6 +43,17 @@ enum { BIO_MAX_PAGES_KMALLOC =
PAGE_SIZE / sizeof(struct page *),
};
+unsigned exofs_max_io_pages(struct exofs_layout *layout,
+ unsigned expected_pages)
+{
+ unsigned pages = min_t(unsigned, expected_pages, MAX_PAGES_KMALLOC);
+
+ /* TODO: easily support bio chaining */
+ pages = min_t(unsigned, pages,
+ layout->group_width * BIO_MAX_PAGES_KMALLOC);
+ return pages;
+}
+
struct page_collect {
struct exofs_sb_info *sbi;
struct inode *inode;
@@ -97,8 +108,7 @@ static void _pcol_reset(struct page_collect *pcol)
static int pcol_try_alloc(struct page_collect *pcol)
{
- unsigned pages = min_t(unsigned, pcol->expected_pages,
- MAX_PAGES_KMALLOC);
+ unsigned pages;
if (!pcol->ios) { /* First time allocate io_state */
int ret = exofs_get_io_state(&pcol->sbi->layout, &pcol->ios);
@@ -108,8 +118,7 @@ static int pcol_try_alloc(struct page_collect *pcol)
}
/* TODO: easily support bio chaining */
- pages = min_t(unsigned, pages,
- pcol->sbi->layout.group_width * BIO_MAX_PAGES_KMALLOC);
+ pages = exofs_max_io_pages(&pcol->sbi->layout, pcol->expected_pages);
for (; pages; pages >>= 1) {
pcol->pages = kmalloc(pages * sizeof(struct page *),
@@ -350,8 +359,10 @@ static int readpage_strip(void *data, struct page *page)
if (!pcol->read_4_write)
unlock_page(page);
- EXOFS_DBGMSG("readpage_strip(0x%lx, 0x%lx) empty page,"
- " splitting\n", inode->i_ino, page->index);
+ EXOFS_DBGMSG("readpage_strip(0x%lx) empty page len=%zx "
+ "read_4_write=%d index=0x%lx end_index=0x%lx "
+ "splitting\n", inode->i_ino, len,
+ pcol->read_4_write, page->index, end_index);
return read_exec(pcol);
}
@@ -722,11 +733,28 @@ int exofs_write_begin(struct file *file, struct address_space *mapping,
/* read modify write */
if (!PageUptodate(page) && (len != PAGE_CACHE_SIZE)) {
+ loff_t i_size = i_size_read(mapping->host);
+ pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
+ size_t rlen;
+
+ if (page->index < end_index)
+ rlen = PAGE_CACHE_SIZE;
+ else if (page->index == end_index)
+ rlen = i_size & ~PAGE_CACHE_MASK;
+ else
+ rlen = 0;
+
+ if (!rlen) {
+ clear_highpage(page);
+ SetPageUptodate(page);
+ goto out;
+ }
+
ret = _readpage(page, true);
if (ret) {
/*SetPageError was done by _readpage. Is it ok?*/
unlock_page(page);
- EXOFS_DBGMSG("__readpage_filler failed\n");
+ EXOFS_DBGMSG("__readpage failed\n");
}
}
out:
@@ -795,7 +823,6 @@ const struct address_space_operations exofs_aops = {
.direct_IO = NULL, /* TODO: Should be trivial to do */
/* With these NULL has special meaning or default is not exported */
- .sync_page = NULL,
.get_xip_mem = NULL,
.migratepage = NULL,
.launder_page = NULL,
@@ -1030,6 +1057,7 @@ struct inode *exofs_iget(struct super_block *sb, unsigned long ino)
memcpy(oi->i_data, fcb.i_data, sizeof(fcb.i_data));
}
+ inode->i_mapping->backing_dev_info = sb->s_bdi;
if (S_ISREG(inode->i_mode)) {
inode->i_op = &exofs_file_inode_operations;
inode->i_fop = &exofs_file_operations;
@@ -1073,6 +1101,7 @@ int __exofs_wait_obj_created(struct exofs_i_info *oi)
}
return unlikely(is_bad_inode(&oi->vfs_inode)) ? -EIO : 0;
}
+
/*
* Callback function from exofs_new_inode(). The important thing is that we
* set the obj_created flag so that other methods know that the object exists on
@@ -1130,7 +1159,7 @@ struct inode *exofs_new_inode(struct inode *dir, int mode)
sbi = sb->s_fs_info;
- sb->s_dirt = 1;
+ inode->i_mapping->backing_dev_info = sb->s_bdi;
inode_init_owner(inode, dir, mode);
inode->i_ino = sbi->s_nextid++;
inode->i_blkbits = EXOFS_BLKSHIFT;
@@ -1141,6 +1170,8 @@ struct inode *exofs_new_inode(struct inode *dir, int mode)
spin_unlock(&sbi->s_next_gen_lock);
insert_inode_hash(inode);
+ exofs_sbi_write_stats(sbi); /* Make sure new sbi->s_nextid is on disk */
+
mark_inode_dirty(inode);
ret = exofs_get_io_state(&sbi->layout, &ios);
@@ -1271,7 +1302,8 @@ out:
int exofs_write_inode(struct inode *inode, struct writeback_control *wbc)
{
- return exofs_update_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
+ /* FIXME: fix fsync and use wbc->sync_mode == WB_SYNC_ALL */
+ return exofs_update_inode(inode, 1);
}
/*
diff --git a/fs/exofs/super.c b/fs/exofs/super.c
index 8c6c466..06065bd 100644
--- a/fs/exofs/super.c
+++ b/fs/exofs/super.c
@@ -48,6 +48,7 @@
* struct to hold what we get from mount options
*/
struct exofs_mountopt {
+ bool is_osdname;
const char *dev_name;
uint64_t pid;
int timeout;
@@ -56,7 +57,7 @@ struct exofs_mountopt {
/*
* exofs-specific mount-time options.
*/
-enum { Opt_pid, Opt_to, Opt_mkfs, Opt_format, Opt_err };
+enum { Opt_name, Opt_pid, Opt_to, Opt_err };
/*
* Our mount-time options. These should ideally be 64-bit unsigned, but the
@@ -64,6 +65,7 @@ enum { Opt_pid, Opt_to, Opt_mkfs, Opt_format, Opt_err };
* sufficient for most applications now.
*/
static match_table_t tokens = {
+ {Opt_name, "osdname=%s"},
{Opt_pid, "pid=%u"},
{Opt_to, "to=%u"},
{Opt_err, NULL}
@@ -94,6 +96,14 @@ static int parse_options(char *options, struct exofs_mountopt *opts)
token = match_token(p, tokens, args);
switch (token) {
+ case Opt_name:
+ opts->dev_name = match_strdup(&args[0]);
+ if (unlikely(!opts->dev_name)) {
+ EXOFS_ERR("Error allocating dev_name");
+ return -ENOMEM;
+ }
+ opts->is_osdname = true;
+ break;
case Opt_pid:
if (0 == match_strlcpy(str, &args[0], sizeof(str)))
return -EINVAL;
@@ -203,6 +213,101 @@ static void destroy_inodecache(void)
static const struct super_operations exofs_sops;
static const struct export_operations exofs_export_ops;
+static const struct osd_attr g_attr_sb_stats = ATTR_DEF(
+ EXOFS_APAGE_SB_DATA,
+ EXOFS_ATTR_SB_STATS,
+ sizeof(struct exofs_sb_stats));
+
+static int __sbi_read_stats(struct exofs_sb_info *sbi)
+{
+ struct osd_attr attrs[] = {
+ [0] = g_attr_sb_stats,
+ };
+ struct exofs_io_state *ios;
+ int ret;
+
+ ret = exofs_get_io_state(&sbi->layout, &ios);
+ if (unlikely(ret)) {
+ EXOFS_ERR("%s: exofs_get_io_state failed.\n", __func__);
+ return ret;
+ }
+
+ ios->cred = sbi->s_cred;
+
+ ios->in_attr = attrs;
+ ios->in_attr_len = ARRAY_SIZE(attrs);
+
+ ret = exofs_sbi_read(ios);
+ if (unlikely(ret)) {
+ EXOFS_ERR("Error reading super_block stats => %d\n", ret);
+ goto out;
+ }
+
+ ret = extract_attr_from_ios(ios, &attrs[0]);
+ if (ret) {
+ EXOFS_ERR("%s: extract_attr of sb_stats failed\n", __func__);
+ goto out;
+ }
+ if (attrs[0].len) {
+ struct exofs_sb_stats *ess;
+
+ if (unlikely(attrs[0].len != sizeof(*ess))) {
+ EXOFS_ERR("%s: Wrong version of exofs_sb_stats "
+ "size(%d) != expected(%zd)\n",
+ __func__, attrs[0].len, sizeof(*ess));
+ goto out;
+ }
+
+ ess = attrs[0].val_ptr;
+ sbi->s_nextid = le64_to_cpu(ess->s_nextid);
+ sbi->s_numfiles = le32_to_cpu(ess->s_numfiles);
+ }
+
+out:
+ exofs_put_io_state(ios);
+ return ret;
+}
+
+static void stats_done(struct exofs_io_state *ios, void *p)
+{
+ exofs_put_io_state(ios);
+ /* Good thanks nothing to do anymore */
+}
+
+/* Asynchronously write the stats attribute */
+int exofs_sbi_write_stats(struct exofs_sb_info *sbi)
+{
+ struct osd_attr attrs[] = {
+ [0] = g_attr_sb_stats,
+ };
+ struct exofs_io_state *ios;
+ int ret;
+
+ ret = exofs_get_io_state(&sbi->layout, &ios);
+ if (unlikely(ret)) {
+ EXOFS_ERR("%s: exofs_get_io_state failed.\n", __func__);
+ return ret;
+ }
+
+ sbi->s_ess.s_nextid = cpu_to_le64(sbi->s_nextid);
+ sbi->s_ess.s_numfiles = cpu_to_le64(sbi->s_numfiles);
+ attrs[0].val_ptr = &sbi->s_ess;
+
+ ios->cred = sbi->s_cred;
+ ios->done = stats_done;
+ ios->private = sbi;
+ ios->out_attr = attrs;
+ ios->out_attr_len = ARRAY_SIZE(attrs);
+
+ ret = exofs_sbi_write(ios);
+ if (unlikely(ret)) {
+ EXOFS_ERR("%s: exofs_sbi_write failed.\n", __func__);
+ exofs_put_io_state(ios);
+ }
+
+ return ret;
+}
+
/*
* Write the superblock to the OSD
*/
@@ -213,18 +318,25 @@ int exofs_sync_fs(struct super_block *sb, int wait)
struct exofs_io_state *ios;
int ret = -ENOMEM;
- lock_super(sb);
+ fscb = kmalloc(sizeof(*fscb), GFP_KERNEL);
+ if (unlikely(!fscb))
+ return -ENOMEM;
+
sbi = sb->s_fs_info;
- fscb = &sbi->s_fscb;
+ /* NOTE: We no longer dirty the super_block anywhere in exofs. The
+ * reason we write the fscb here on unmount is so we can stay backwards
+ * compatible with fscb->s_version == 1. (What we are not compatible
+ * with is if a new version FS crashed and then we try to mount an old
+ * version). Otherwise the exofs_fscb is read-only from mkfs time. All
+ * the writeable info is set in exofs_sbi_write_stats() above.
+ */
ret = exofs_get_io_state(&sbi->layout, &ios);
- if (ret)
+ if (unlikely(ret))
goto out;
- /* Note: We only write the changing part of the fscb. .i.e upto the
- * the fscb->s_dev_table_oid member. There is no read-modify-write
- * here.
- */
+ lock_super(sb);
+
ios->length = offsetof(struct exofs_fscb, s_dev_table_oid);
memset(fscb, 0, ios->length);
fscb->s_nextid = cpu_to_le64(sbi->s_nextid);
@@ -239,16 +351,17 @@ int exofs_sync_fs(struct super_block *sb, int wait)
ios->cred = sbi->s_cred;
ret = exofs_sbi_write(ios);
- if (unlikely(ret)) {
+ if (unlikely(ret))
EXOFS_ERR("%s: exofs_sbi_write failed.\n", __func__);
- goto out;
- }
- sb->s_dirt = 0;
+ else
+ sb->s_dirt = 0;
+
+ unlock_super(sb);
out:
EXOFS_DBGMSG("s_nextid=0x%llx ret=%d\n", _LLU(sbi->s_nextid), ret);
exofs_put_io_state(ios);
- unlock_super(sb);
+ kfree(fscb);
return ret;
}
@@ -292,13 +405,14 @@ static void exofs_put_super(struct super_block *sb)
int num_pend;
struct exofs_sb_info *sbi = sb->s_fs_info;
- if (sb->s_dirt)
- exofs_write_super(sb);
-
/* make sure there are no pending commands */
for (num_pend = atomic_read(&sbi->s_curr_pending); num_pend > 0;
num_pend = atomic_read(&sbi->s_curr_pending)) {
wait_queue_head_t wq;
+
+ printk(KERN_NOTICE "%s: !!Pending operations in flight. "
+ "This is a BUG. please report to osd-dev@open-osd.org\n",
+ __func__);
init_waitqueue_head(&wq);
wait_event_timeout(wq,
(atomic_read(&sbi->s_curr_pending) == 0),
@@ -390,6 +504,23 @@ static int _read_and_match_data_map(struct exofs_sb_info *sbi, unsigned numdevs,
return 0;
}
+static unsigned __ra_pages(struct exofs_layout *layout)
+{
+ const unsigned _MIN_RA = 32; /* min 128K read-ahead */
+ unsigned ra_pages = layout->group_width * layout->stripe_unit /
+ PAGE_SIZE;
+ unsigned max_io_pages = exofs_max_io_pages(layout, ~0);
+
+ ra_pages *= 2; /* two stripes */
+ if (ra_pages < _MIN_RA)
+ ra_pages = roundup(_MIN_RA, ra_pages / 2);
+
+ if (ra_pages > max_io_pages)
+ ra_pages = max_io_pages;
+
+ return ra_pages;
+}
+
/* @odi is valid only as long as @fscb_dev is valid */
static int exofs_devs_2_odi(struct exofs_dt_device_info *dt_dev,
struct osd_dev_info *odi)
@@ -495,7 +626,7 @@ static int exofs_read_lookup_dev_table(struct exofs_sb_info **psbi,
}
od = osduld_info_lookup(&odi);
- if (unlikely(IS_ERR(od))) {
+ if (IS_ERR(od)) {
ret = PTR_ERR(od);
EXOFS_ERR("ERROR: device requested is not found "
"osd_name-%s =>%d\n", odi.osdname, ret);
@@ -558,9 +689,17 @@ static int exofs_fill_super(struct super_block *sb, void *data, int silent)
goto free_bdi;
/* use mount options to fill superblock */
- od = osduld_path_lookup(opts->dev_name);
+ if (opts->is_osdname) {
+ struct osd_dev_info odi = {.systemid_len = 0};
+
+ odi.osdname_len = strlen(opts->dev_name);
+ odi.osdname = (u8 *)opts->dev_name;
+ od = osduld_info_lookup(&odi);
+ } else {
+ od = osduld_path_lookup(opts->dev_name);
+ }
if (IS_ERR(od)) {
- ret = PTR_ERR(od);
+ ret = -EINVAL;
goto free_sbi;
}
@@ -594,6 +733,7 @@ static int exofs_fill_super(struct super_block *sb, void *data, int silent)
goto free_sbi;
sb->s_magic = le16_to_cpu(fscb.s_magic);
+ /* NOTE: we read below to be backward compatible with old versions */
sbi->s_nextid = le64_to_cpu(fscb.s_nextid);
sbi->s_numfiles = le32_to_cpu(fscb.s_numfiles);
@@ -604,7 +744,7 @@ static int exofs_fill_super(struct super_block *sb, void *data, int silent)
ret = -EINVAL;
goto free_sbi;
}
- if (le32_to_cpu(fscb.s_version) != EXOFS_FSCB_VER) {
+ if (le32_to_cpu(fscb.s_version) > EXOFS_FSCB_VER) {
EXOFS_ERR("ERROR: Bad FSCB version expected-%d got-%d\n",
EXOFS_FSCB_VER, le32_to_cpu(fscb.s_version));
ret = -EINVAL;
@@ -622,7 +762,10 @@ static int exofs_fill_super(struct super_block *sb, void *data, int silent)
goto free_sbi;
}
+ __sbi_read_stats(sbi);
+
/* set up operation vectors */
+ sbi->bdi.ra_pages = __ra_pages(&sbi->layout);
sb->s_bdi = &sbi->bdi;
sb->s_fs_info = sbi;
sb->s_op = &exofs_sops;
@@ -652,6 +795,8 @@ static int exofs_fill_super(struct super_block *sb, void *data, int silent)
_exofs_print_device("Mounting", opts->dev_name, sbi->layout.s_ods[0],
sbi->layout.s_pid);
+ if (opts->is_osdname)
+ kfree(opts->dev_name);
return 0;
free_sbi:
@@ -660,6 +805,8 @@ free_bdi:
EXOFS_ERR("Unable to mount exofs on %s pid=0x%llx err=%d\n",
opts->dev_name, sbi->layout.s_pid, ret);
exofs_free_sbi(sbi);
+ if (opts->is_osdname)
+ kfree(opts->dev_name);
return ret;
}
@@ -677,7 +824,8 @@ static struct dentry *exofs_mount(struct file_system_type *type,
if (ret)
return ERR_PTR(ret);
- opts.dev_name = dev_name;
+ if (!opts.dev_name)
+ opts.dev_name = dev_name;
return mount_nodev(type, flags, &opts, exofs_fill_super);
}
diff --git a/fs/ext2/acl.c b/fs/ext2/acl.c
index 7b41805..abea5a1 100644
--- a/fs/ext2/acl.c
+++ b/fs/ext2/acl.c
@@ -406,7 +406,7 @@ ext2_xattr_set_acl(struct dentry *dentry, const char *name, const void *value,
return -EINVAL;
if (!test_opt(dentry->d_sb, POSIX_ACL))
return -EOPNOTSUPP;
- if (!is_owner_or_cap(dentry->d_inode))
+ if (!inode_owner_or_capable(dentry->d_inode))
return -EPERM;
if (value) {
diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
index 0d06f4e..8f44cef 100644
--- a/fs/ext2/balloc.c
+++ b/fs/ext2/balloc.c
@@ -850,7 +850,7 @@ static int find_next_reservable_window(
rsv_window_remove(sb, my_rsv);
/*
- * Let's book the whole avaliable window for now. We will check the
+ * Let's book the whole available window for now. We will check the
* disk bitmap later and then, if there are free blocks then we adjust
* the window size if it's larger than requested.
* Otherwise, we will remove this node from the tree next time
@@ -1357,9 +1357,9 @@ retry_alloc:
goto allocated;
}
/*
- * We may end up a bogus ealier ENOSPC error due to
+ * We may end up a bogus earlier ENOSPC error due to
* filesystem is "full" of reservations, but
- * there maybe indeed free blocks avaliable on disk
+ * there maybe indeed free blocks available on disk
* In this case, we just forget about the reservations
* just do block allocation as without reservations.
*/
diff --git a/fs/ext2/ext2.h b/fs/ext2/ext2.h
index 1b48c33..645be9e 100644
--- a/fs/ext2/ext2.h
+++ b/fs/ext2/ext2.h
@@ -174,3 +174,9 @@ ext2_group_first_block_no(struct super_block *sb, unsigned long group_no)
return group_no * (ext2_fsblk_t)EXT2_BLOCKS_PER_GROUP(sb) +
le32_to_cpu(EXT2_SB(sb)->s_es->s_first_data_block);
}
+
+#define ext2_set_bit __test_and_set_bit_le
+#define ext2_clear_bit __test_and_clear_bit_le
+#define ext2_test_bit test_bit_le
+#define ext2_find_first_zero_bit find_first_zero_bit_le
+#define ext2_find_next_zero_bit find_next_zero_bit_le
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 40ad210..788e09a 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -305,7 +305,7 @@ static ext2_fsblk_t ext2_find_near(struct inode *inode, Indirect *ind)
return ind->bh->b_blocknr;
/*
- * It is going to be refered from inode itself? OK, just put it into
+ * It is going to be referred from inode itself? OK, just put it into
* the same cylinder group then.
*/
bg_start = ext2_group_first_block_no(inode->i_sb, ei->i_block_group);
@@ -860,7 +860,6 @@ const struct address_space_operations ext2_aops = {
.readpage = ext2_readpage,
.readpages = ext2_readpages,
.writepage = ext2_writepage,
- .sync_page = block_sync_page,
.write_begin = ext2_write_begin,
.write_end = ext2_write_end,
.bmap = ext2_bmap,
@@ -880,7 +879,6 @@ const struct address_space_operations ext2_nobh_aops = {
.readpage = ext2_readpage,
.readpages = ext2_readpages,
.writepage = ext2_nobh_writepage,
- .sync_page = block_sync_page,
.write_begin = ext2_nobh_write_begin,
.write_end = nobh_write_end,
.bmap = ext2_bmap,
@@ -915,7 +913,7 @@ static inline int all_zeroes(__le32 *p, __le32 *q)
*
* When we do truncate() we may have to clean the ends of several indirect
* blocks but leave the blocks themselves alive. Block is partially
- * truncated if some data below the new i_size is refered from it (and
+ * truncated if some data below the new i_size is referred from it (and
* it is on the path to the first completely truncated data block, indeed).
* We have to free the top of that path along with everything to the right
* of the path. Since no allocation past the truncation point is possible
@@ -992,7 +990,7 @@ no_top:
* @p: array of block numbers
* @q: points immediately past the end of array
*
- * We are freeing all blocks refered from that array (numbers are
+ * We are freeing all blocks referred from that array (numbers are
* stored as little-endian 32-bit) and updating @inode->i_blocks
* appropriately.
*/
@@ -1032,7 +1030,7 @@ static inline void ext2_free_data(struct inode *inode, __le32 *p, __le32 *q)
* @q: pointer immediately past the end of array
* @depth: depth of the branches to free
*
- * We are freeing all blocks refered from these branches (numbers are
+ * We are freeing all blocks referred from these branches (numbers are
* stored as little-endian 32-bit) and updating @inode->i_blocks
* appropriately.
*/
diff --git a/fs/ext2/ioctl.c b/fs/ext2/ioctl.c
index e743130..f81e250 100644
--- a/fs/ext2/ioctl.c
+++ b/fs/ext2/ioctl.c
@@ -39,7 +39,7 @@ long ext2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (ret)
return ret;
- if (!is_owner_or_cap(inode)) {
+ if (!inode_owner_or_capable(inode)) {
ret = -EACCES;
goto setflags_out;
}
@@ -89,7 +89,7 @@ setflags_out:
case EXT2_IOC_GETVERSION:
return put_user(inode->i_generation, (int __user *) arg);
case EXT2_IOC_SETVERSION:
- if (!is_owner_or_cap(inode))
+ if (!inode_owner_or_capable(inode))
return -EPERM;
ret = mnt_want_write(filp->f_path.mnt);
if (ret)
@@ -115,7 +115,7 @@ setflags_out:
if (!test_opt(inode->i_sb, RESERVATION) ||!S_ISREG(inode->i_mode))
return -ENOTTY;
- if (!is_owner_or_cap(inode))
+ if (!inode_owner_or_capable(inode))
return -EACCES;
if (get_user(rsv_window_size, (int __user *)arg))
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 7731695..0a78dae 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -1382,7 +1382,7 @@ static struct dentry *ext2_mount(struct file_system_type *fs_type,
/* Read data from quotafile - avoid pagecache and such because we cannot afford
* acquiring the locks... As quota files are never truncated and quota code
- * itself serializes the operations (and noone else should touch the files)
+ * itself serializes the operations (and no one else should touch the files)
* we don't have to be afraid of races */
static ssize_t ext2_quota_read(struct super_block *sb, int type, char *data,
size_t len, loff_t off)
diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
index c2e4dce..5299706 100644
--- a/fs/ext2/xattr.c
+++ b/fs/ext2/xattr.c
@@ -35,7 +35,7 @@
* +------------------+
*
* The block header is followed by multiple entry descriptors. These entry
- * descriptors are variable in size, and alligned to EXT2_XATTR_PAD
+ * descriptors are variable in size, and aligned to EXT2_XATTR_PAD
* byte boundaries. The entry descriptors are sorted by attribute name,
* so that two extended attribute blocks can be compared efficiently.
*
diff --git a/fs/ext3/acl.c b/fs/ext3/acl.c
index e4fa49e..9d021c0 100644
--- a/fs/ext3/acl.c
+++ b/fs/ext3/acl.c
@@ -435,7 +435,7 @@ ext3_xattr_set_acl(struct dentry *dentry, const char *name, const void *value,
return -EINVAL;
if (!test_opt(inode->i_sb, POSIX_ACL))
return -EOPNOTSUPP;
- if (!is_owner_or_cap(inode))
+ if (!inode_owner_or_capable(inode))
return -EPERM;
if (value) {
diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
index 15324218..fe52297 100644
--- a/fs/ext3/balloc.c
+++ b/fs/ext3/balloc.c
@@ -590,7 +590,7 @@ do_more:
BUFFER_TRACE(debug_bh, "Deleted!");
if (!bh2jh(bitmap_bh)->b_committed_data)
BUFFER_TRACE(debug_bh,
- "No commited data in bitmap");
+ "No committed data in bitmap");
BUFFER_TRACE2(debug_bh, bitmap_bh, "bitmap");
__brelse(debug_bh);
}
@@ -1063,7 +1063,7 @@ static int find_next_reservable_window(
rsv_window_remove(sb, my_rsv);
/*
- * Let's book the whole avaliable window for now. We will check the
+ * Let's book the whole available window for now. We will check the
* disk bitmap later and then, if there are free blocks then we adjust
* the window size if it's larger than requested.
* Otherwise, we will remove this node from the tree next time
@@ -1456,7 +1456,7 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi)
*
* ext3_should_retry_alloc() is called when ENOSPC is returned, and if
* it is profitable to retry the operation, this function will wait
- * for the current or commiting transaction to complete, and then
+ * for the current or committing transaction to complete, and then
* return TRUE.
*
* if the total number of retries exceed three times, return FALSE.
@@ -1632,9 +1632,9 @@ retry_alloc:
goto allocated;
}
/*
- * We may end up a bogus ealier ENOSPC error due to
+ * We may end up a bogus earlier ENOSPC error due to
* filesystem is "full" of reservations, but
- * there maybe indeed free blocks avaliable on disk
+ * there maybe indeed free blocks available on disk
* In this case, we just forget about the reservations
* just do block allocation as without reservations.
*/
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index ae94f6d..68b2e43 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -1894,7 +1894,6 @@ static const struct address_space_operations ext3_ordered_aops = {
.readpage = ext3_readpage,
.readpages = ext3_readpages,
.writepage = ext3_ordered_writepage,
- .sync_page = block_sync_page,
.write_begin = ext3_write_begin,
.write_end = ext3_ordered_write_end,
.bmap = ext3_bmap,
@@ -1910,7 +1909,6 @@ static const struct address_space_operations ext3_writeback_aops = {
.readpage = ext3_readpage,
.readpages = ext3_readpages,
.writepage = ext3_writeback_writepage,
- .sync_page = block_sync_page,
.write_begin = ext3_write_begin,
.write_end = ext3_writeback_write_end,
.bmap = ext3_bmap,
@@ -1926,7 +1924,6 @@ static const struct address_space_operations ext3_journalled_aops = {
.readpage = ext3_readpage,
.readpages = ext3_readpages,
.writepage = ext3_journalled_writepage,
- .sync_page = block_sync_page,
.write_begin = ext3_write_begin,
.write_end = ext3_journalled_write_end,
.set_page_dirty = ext3_journalled_set_page_dirty,
@@ -2058,7 +2055,7 @@ static inline int all_zeroes(__le32 *p, __le32 *q)
*
* When we do truncate() we may have to clean the ends of several
* indirect blocks but leave the blocks themselves alive. Block is
- * partially truncated if some data below the new i_size is refered
+ * partially truncated if some data below the new i_size is referred
* from it (and it is on the path to the first completely truncated
* data block, indeed). We have to free the top of that path along
* with everything to the right of the path. Since no allocation
@@ -2187,7 +2184,7 @@ static void ext3_clear_blocks(handle_t *handle, struct inode *inode,
* @first: array of block numbers
* @last: points immediately past the end of array
*
- * We are freeing all blocks refered from that array (numbers are stored as
+ * We are freeing all blocks referred from that array (numbers are stored as
* little-endian 32-bit) and updating @inode->i_blocks appropriately.
*
* We accumulate contiguous runs of blocks to free. Conveniently, if these
@@ -2275,7 +2272,7 @@ static void ext3_free_data(handle_t *handle, struct inode *inode,
* @last: pointer immediately past the end of array
* @depth: depth of the branches to free
*
- * We are freeing all blocks refered from these branches (numbers are
+ * We are freeing all blocks referred from these branches (numbers are
* stored as little-endian 32-bit) and updating @inode->i_blocks
* appropriately.
*/
@@ -3294,7 +3291,7 @@ static int ext3_writepage_trans_blocks(struct inode *inode)
if (ext3_should_journal_data(inode))
ret = 3 * (bpp + indirects) + 2;
else
- ret = 2 * (bpp + indirects) + 2;
+ ret = 2 * (bpp + indirects) + indirects + 2;
#ifdef CONFIG_QUOTA
/* We know that structure was already allocated during dquot_initialize so
diff --git a/fs/ext3/ioctl.c b/fs/ext3/ioctl.c
index fc080dd..f4090bd 100644
--- a/fs/ext3/ioctl.c
+++ b/fs/ext3/ioctl.c
@@ -38,7 +38,7 @@ long ext3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
unsigned int oldflags;
unsigned int jflag;
- if (!is_owner_or_cap(inode))
+ if (!inode_owner_or_capable(inode))
return -EACCES;
if (get_user(flags, (int __user *) arg))
@@ -123,7 +123,7 @@ flags_out:
__u32 generation;
int err;
- if (!is_owner_or_cap(inode))
+ if (!inode_owner_or_capable(inode))
return -EPERM;
err = mnt_want_write(filp->f_path.mnt);
@@ -192,7 +192,7 @@ setversion_out:
if (err)
return err;
- if (!is_owner_or_cap(inode)) {
+ if (!inode_owner_or_capable(inode)) {
err = -EACCES;
goto setrsvsz_out;
}
diff --git a/fs/ext3/resize.c b/fs/ext3/resize.c
index 108b142..7916e4ce 100644
--- a/fs/ext3/resize.c
+++ b/fs/ext3/resize.c
@@ -1009,7 +1009,7 @@ int ext3_group_extend(struct super_block *sb, struct ext3_super_block *es,
if (test_opt(sb, DEBUG))
printk(KERN_DEBUG "EXT3-fs: extending last group from "E3FSBLK
- " upto "E3FSBLK" blocks\n",
+ " up to "E3FSBLK" blocks\n",
o_blocks_count, n_blocks_count);
if (n_blocks_count == 0 || n_blocks_count == o_blocks_count)
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 071689f..3c6a9e0 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -2925,7 +2925,7 @@ static int ext3_quota_on(struct super_block *sb, int type, int format_id,
/* Read data from quotafile - avoid pagecache and such because we cannot afford
* acquiring the locks... As quota files are never truncated and quota code
- * itself serializes the operations (and noone else should touch the files)
+ * itself serializes the operations (and no one else should touch the files)
* we don't have to be afraid of races */
static ssize_t ext3_quota_read(struct super_block *sb, int type, char *data,
size_t len, loff_t off)
diff --git a/fs/ext4/acl.c b/fs/ext4/acl.c
index e0270d1..21eacd7 100644
--- a/fs/ext4/acl.c
+++ b/fs/ext4/acl.c
@@ -433,7 +433,7 @@ ext4_xattr_set_acl(struct dentry *dentry, const char *name, const void *value,
return -EINVAL;
if (!test_opt(inode->i_sb, POSIX_ACL))
return -EOPNOTSUPP;
- if (!is_owner_or_cap(inode))
+ if (!inode_owner_or_capable(inode))
return -EPERM;
if (value) {
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index adf96b8..1c67139 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -21,6 +21,8 @@
#include "ext4_jbd2.h"
#include "mballoc.h"
+#include <trace/events/ext4.h>
+
/*
* balloc.c contains the blocks allocation and deallocation routines
*/
@@ -342,6 +344,7 @@ ext4_read_block_bitmap(struct super_block *sb, ext4_group_t block_group)
* We do it here so the bitmap uptodate bit
* get set with buffer lock held.
*/
+ trace_ext4_read_block_bitmap_load(sb, block_group);
set_bitmap_uptodate(bh);
if (bh_submit_read(bh) < 0) {
put_bh(bh);
@@ -544,7 +547,7 @@ int ext4_claim_free_blocks(struct ext4_sb_info *sbi,
*
* ext4_should_retry_alloc() is called when ENOSPC is returned, and if
* it is profitable to retry the operation, this function will wait
- * for the current or commiting transaction to complete, and then
+ * for the current or committing transaction to complete, and then
* return TRUE.
*
* if the total number of retries exceed three times, return FALSE.
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 3aa0b72..4daaf2b 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -923,14 +923,14 @@ struct ext4_inode_info {
#define test_opt2(sb, opt) (EXT4_SB(sb)->s_mount_opt2 & \
EXT4_MOUNT2_##opt)
-#define ext4_set_bit ext2_set_bit
+#define ext4_set_bit __test_and_set_bit_le
#define ext4_set_bit_atomic ext2_set_bit_atomic
-#define ext4_clear_bit ext2_clear_bit
+#define ext4_clear_bit __test_and_clear_bit_le
#define ext4_clear_bit_atomic ext2_clear_bit_atomic
-#define ext4_test_bit ext2_test_bit
-#define ext4_find_first_zero_bit ext2_find_first_zero_bit
-#define ext4_find_next_zero_bit ext2_find_next_zero_bit
-#define ext4_find_next_bit ext2_find_next_bit
+#define ext4_test_bit test_bit_le
+#define ext4_find_first_zero_bit find_first_zero_bit_le
+#define ext4_find_next_zero_bit find_next_zero_bit_le
+#define ext4_find_next_bit find_next_bit_le
/*
* Maximal mount counts between two filesystem checks
diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h
index d8b992e..d0f5353 100644
--- a/fs/ext4/ext4_jbd2.h
+++ b/fs/ext4/ext4_jbd2.h
@@ -86,8 +86,8 @@
#ifdef CONFIG_QUOTA
/* Amount of blocks needed for quota update - we know that the structure was
- * allocated so we need to update only inode+data */
-#define EXT4_QUOTA_TRANS_BLOCKS(sb) (test_opt(sb, QUOTA) ? 2 : 0)
+ * allocated so we need to update only data block */
+#define EXT4_QUOTA_TRANS_BLOCKS(sb) (test_opt(sb, QUOTA) ? 1 : 0)
/* Amount of blocks needed for quota insert/delete - we do some block writes
* but inode, sb and group updates are done only once */
#define EXT4_QUOTA_INIT_BLOCKS(sb) (test_opt(sb, QUOTA) ? (DQUOT_INIT_ALLOC*\
@@ -202,13 +202,6 @@ static inline int ext4_handle_has_enough_credits(handle_t *handle, int needed)
return 1;
}
-static inline void ext4_journal_release_buffer(handle_t *handle,
- struct buffer_head *bh)
-{
- if (ext4_handle_valid(handle))
- jbd2_journal_release_buffer(handle, bh);
-}
-
static inline handle_t *ext4_journal_start(struct inode *inode, int nblocks)
{
return ext4_journal_start_sb(inode->i_sb, nblocks);
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 7516fb9..4890d6f 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -44,6 +44,8 @@
#include "ext4_jbd2.h"
#include "ext4_extents.h"
+#include <trace/events/ext4.h>
+
static int ext4_ext_truncate_extend_restart(handle_t *handle,
struct inode *inode,
int needed)
@@ -664,6 +666,8 @@ ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
if (unlikely(!bh))
goto err;
if (!bh_uptodate_or_lock(bh)) {
+ trace_ext4_ext_load_extent(inode, block,
+ path[ppos].p_block);
if (bh_submit_read(bh) < 0) {
put_bh(bh);
goto err;
@@ -1034,7 +1038,7 @@ cleanup:
for (i = 0; i < depth; i++) {
if (!ablocks[i])
continue;
- ext4_free_blocks(handle, inode, 0, ablocks[i], 1,
+ ext4_free_blocks(handle, inode, NULL, ablocks[i], 1,
EXT4_FREE_BLOCKS_METADATA);
}
}
@@ -1725,7 +1729,7 @@ repeat:
BUG_ON(npath->p_depth != path->p_depth);
eh = npath[depth].p_hdr;
if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) {
- ext_debug("next leaf isnt full(%d)\n",
+ ext_debug("next leaf isn't full(%d)\n",
le16_to_cpu(eh->eh_entries));
path = npath;
goto repeat;
@@ -2059,7 +2063,7 @@ static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
if (err)
return err;
ext_debug("index is empty, remove it, free block %llu\n", leaf);
- ext4_free_blocks(handle, inode, 0, leaf, 1,
+ ext4_free_blocks(handle, inode, NULL, leaf, 1,
EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
return err;
}
@@ -2156,7 +2160,7 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
num = le32_to_cpu(ex->ee_block) + ee_len - from;
start = ext4_ext_pblock(ex) + ee_len - num;
ext_debug("free last %u blocks starting %llu\n", num, start);
- ext4_free_blocks(handle, inode, 0, start, num, flags);
+ ext4_free_blocks(handle, inode, NULL, start, num, flags);
} else if (from == le32_to_cpu(ex->ee_block)
&& to <= le32_to_cpu(ex->ee_block) + ee_len - 1) {
printk(KERN_INFO "strange request: removal %u-%u from %u:%u\n",
@@ -2529,7 +2533,7 @@ static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
/*
* This function is called by ext4_ext_map_blocks() if someone tries to write
* to an uninitialized extent. It may result in splitting the uninitialized
- * extent into multiple extents (upto three - one initialized and two
+ * extent into multiple extents (up to three - one initialized and two
* uninitialized).
* There are three possibilities:
* a> There is no split required: Entire extent should be initialized
@@ -3108,14 +3112,13 @@ static int check_eofblocks_fl(handle_t *handle, struct inode *inode,
{
int i, depth;
struct ext4_extent_header *eh;
- struct ext4_extent *ex, *last_ex;
+ struct ext4_extent *last_ex;
if (!ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS))
return 0;
depth = ext_depth(inode);
eh = path[depth].p_hdr;
- ex = path[depth].p_ext;
if (unlikely(!eh->eh_entries)) {
EXT4_ERROR_INODE(inode, "eh->eh_entries == 0 and "
@@ -3171,7 +3174,7 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
path, flags);
/*
* Flag the inode(non aio case) or end_io struct (aio case)
- * that this IO needs to convertion to written when IO is
+ * that this IO needs to conversion to written when IO is
* completed
*/
if (io && !(io->flag & EXT4_IO_END_UNWRITTEN)) {
@@ -3295,9 +3298,8 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
struct ext4_map_blocks *map, int flags)
{
struct ext4_ext_path *path = NULL;
- struct ext4_extent_header *eh;
struct ext4_extent newex, *ex;
- ext4_fsblk_t newblock;
+ ext4_fsblk_t newblock = 0;
int err = 0, depth, ret;
unsigned int allocated = 0;
struct ext4_allocation_request ar;
@@ -3305,6 +3307,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
ext_debug("blocks %u/%u requested for inode %lu\n",
map->m_lblk, map->m_len, inode->i_ino);
+ trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
/* check in cache */
if (ext4_ext_in_cache(inode, map->m_lblk, &newex)) {
@@ -3352,7 +3355,6 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
err = -EIO;
goto out2;
}
- eh = path[depth].p_hdr;
ex = path[depth].p_ext;
if (ex) {
@@ -3458,10 +3460,10 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
ext4_ext_mark_uninitialized(&newex);
/*
* io_end structure was created for every IO write to an
- * uninitialized extent. To avoid unecessary conversion,
+ * uninitialized extent. To avoid unnecessary conversion,
* here we flag the IO that really needs the conversion.
* For non asycn direct IO case, flag the inode state
- * that we need to perform convertion when IO is done.
+ * that we need to perform conversion when IO is done.
*/
if ((flags & EXT4_GET_BLOCKS_PRE_IO)) {
if (io && !(io->flag & EXT4_IO_END_UNWRITTEN)) {
@@ -3485,7 +3487,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
/* not a good idea to call discard here directly,
* but otherwise we'd need to call it every free() */
ext4_discard_preallocations(inode);
- ext4_free_blocks(handle, inode, 0, ext4_ext_pblock(&newex),
+ ext4_free_blocks(handle, inode, NULL, ext4_ext_pblock(&newex),
ext4_ext_get_actual_len(&newex), 0);
goto out2;
}
@@ -3525,6 +3527,8 @@ out2:
ext4_ext_drop_refs(path);
kfree(path);
}
+ trace_ext4_ext_map_blocks_exit(inode, map->m_lblk,
+ newblock, map->m_len, err ? err : allocated);
return err ? err : allocated;
}
@@ -3658,6 +3662,7 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
return -EOPNOTSUPP;
+ trace_ext4_fallocate_enter(inode, offset, len, mode);
map.m_lblk = offset >> blkbits;
/*
* We can't just convert len to max_blocks because
@@ -3673,6 +3678,7 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
ret = inode_newsize_ok(inode, (len + offset));
if (ret) {
mutex_unlock(&inode->i_mutex);
+ trace_ext4_fallocate_exit(inode, offset, max_blocks, ret);
return ret;
}
retry:
@@ -3717,6 +3723,8 @@ retry:
goto retry;
}
mutex_unlock(&inode->i_mutex);
+ trace_ext4_fallocate_exit(inode, offset, max_blocks,
+ ret > 0 ? ret2 : ret);
return ret > 0 ? ret2 : ret;
}
@@ -3775,6 +3783,7 @@ int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
}
return ret > 0 ? ret2 : ret;
}
+
/*
* Callback function called for each extent to gather FIEMAP information.
*/
@@ -3782,38 +3791,162 @@ static int ext4_ext_fiemap_cb(struct inode *inode, struct ext4_ext_path *path,
struct ext4_ext_cache *newex, struct ext4_extent *ex,
void *data)
{
- struct fiemap_extent_info *fieinfo = data;
- unsigned char blksize_bits = inode->i_sb->s_blocksize_bits;
__u64 logical;
__u64 physical;
__u64 length;
+ loff_t size;
__u32 flags = 0;
- int error;
+ int ret = 0;
+ struct fiemap_extent_info *fieinfo = data;
+ unsigned char blksize_bits;
- logical = (__u64)newex->ec_block << blksize_bits;
+ blksize_bits = inode->i_sb->s_blocksize_bits;
+ logical = (__u64)newex->ec_block << blksize_bits;
if (newex->ec_start == 0) {
- pgoff_t offset;
- struct page *page;
+ /*
+ * No extent in extent-tree contains block @newex->ec_start,
+ * then the block may stay in 1)a hole or 2)delayed-extent.
+ *
+ * Holes or delayed-extents are processed as follows.
+ * 1. lookup dirty pages with specified range in pagecache.
+ * If no page is got, then there is no delayed-extent and
+ * return with EXT_CONTINUE.
+ * 2. find the 1st mapped buffer,
+ * 3. check if the mapped buffer is both in the request range
+ * and a delayed buffer. If not, there is no delayed-extent,
+ * then return.
+ * 4. a delayed-extent is found, the extent will be collected.
+ */
+ ext4_lblk_t end = 0;
+ pgoff_t last_offset;
+ pgoff_t offset;
+ pgoff_t index;
+ struct page **pages = NULL;
struct buffer_head *bh = NULL;
+ struct buffer_head *head = NULL;
+ unsigned int nr_pages = PAGE_SIZE / sizeof(struct page *);
+
+ pages = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (pages == NULL)
+ return -ENOMEM;
offset = logical >> PAGE_SHIFT;
- page = find_get_page(inode->i_mapping, offset);
- if (!page || !page_has_buffers(page))
- return EXT_CONTINUE;
+repeat:
+ last_offset = offset;
+ head = NULL;
+ ret = find_get_pages_tag(inode->i_mapping, &offset,
+ PAGECACHE_TAG_DIRTY, nr_pages, pages);
+
+ if (!(flags & FIEMAP_EXTENT_DELALLOC)) {
+ /* First time, try to find a mapped buffer. */
+ if (ret == 0) {
+out:
+ for (index = 0; index < ret; index++)
+ page_cache_release(pages[index]);
+ /* just a hole. */
+ kfree(pages);
+ return EXT_CONTINUE;
+ }
- bh = page_buffers(page);
+ /* Try to find the 1st mapped buffer. */
+ end = ((__u64)pages[0]->index << PAGE_SHIFT) >>
+ blksize_bits;
+ if (!page_has_buffers(pages[0]))
+ goto out;
+ head = page_buffers(pages[0]);
+ if (!head)
+ goto out;
- if (!bh)
- return EXT_CONTINUE;
+ bh = head;
+ do {
+ if (buffer_mapped(bh)) {
+ /* get the 1st mapped buffer. */
+ if (end > newex->ec_block +
+ newex->ec_len)
+ /* The buffer is out of
+ * the request range.
+ */
+ goto out;
+ goto found_mapped_buffer;
+ }
+ bh = bh->b_this_page;
+ end++;
+ } while (bh != head);
- if (buffer_delay(bh)) {
- flags |= FIEMAP_EXTENT_DELALLOC;
- page_cache_release(page);
+ /* No mapped buffer found. */
+ goto out;
} else {
- page_cache_release(page);
- return EXT_CONTINUE;
+ /*Find contiguous delayed buffers. */
+ if (ret > 0 && pages[0]->index == last_offset)
+ head = page_buffers(pages[0]);
+ bh = head;
}
+
+found_mapped_buffer:
+ if (bh != NULL && buffer_delay(bh)) {
+ /* 1st or contiguous delayed buffer found. */
+ if (!(flags & FIEMAP_EXTENT_DELALLOC)) {
+ /*
+ * 1st delayed buffer found, record
+ * the start of extent.
+ */
+ flags |= FIEMAP_EXTENT_DELALLOC;
+ newex->ec_block = end;
+ logical = (__u64)end << blksize_bits;
+ }
+ /* Find contiguous delayed buffers. */
+ do {
+ if (!buffer_delay(bh))
+ goto found_delayed_extent;
+ bh = bh->b_this_page;
+ end++;
+ } while (bh != head);
+
+ for (index = 1; index < ret; index++) {
+ if (!page_has_buffers(pages[index])) {
+ bh = NULL;
+ break;
+ }
+ head = page_buffers(pages[index]);
+ if (!head) {
+ bh = NULL;
+ break;
+ }
+ if (pages[index]->index !=
+ pages[0]->index + index) {
+ /* Blocks are not contiguous. */
+ bh = NULL;
+ break;
+ }
+ bh = head;
+ do {
+ if (!buffer_delay(bh))
+ /* Delayed-extent ends. */
+ goto found_delayed_extent;
+ bh = bh->b_this_page;
+ end++;
+ } while (bh != head);
+ }
+ } else if (!(flags & FIEMAP_EXTENT_DELALLOC))
+ /* a hole found. */
+ goto out;
+
+found_delayed_extent:
+ newex->ec_len = min(end - newex->ec_block,
+ (ext4_lblk_t)EXT_INIT_MAX_LEN);
+ if (ret == nr_pages && bh != NULL &&
+ newex->ec_len < EXT_INIT_MAX_LEN &&
+ buffer_delay(bh)) {
+ /* Have not collected an extent and continue. */
+ for (index = 0; index < ret; index++)
+ page_cache_release(pages[index]);
+ goto repeat;
+ }
+
+ for (index = 0; index < ret; index++)
+ page_cache_release(pages[index]);
+ kfree(pages);
}
physical = (__u64)newex->ec_start << blksize_bits;
@@ -3822,32 +3955,16 @@ static int ext4_ext_fiemap_cb(struct inode *inode, struct ext4_ext_path *path,
if (ex && ext4_ext_is_uninitialized(ex))
flags |= FIEMAP_EXTENT_UNWRITTEN;
- /*
- * If this extent reaches EXT_MAX_BLOCK, it must be last.
- *
- * Or if ext4_ext_next_allocated_block is EXT_MAX_BLOCK,
- * this also indicates no more allocated blocks.
- *
- * XXX this might miss a single-block extent at EXT_MAX_BLOCK
- */
- if (ext4_ext_next_allocated_block(path) == EXT_MAX_BLOCK ||
- newex->ec_block + newex->ec_len - 1 == EXT_MAX_BLOCK) {
- loff_t size = i_size_read(inode);
- loff_t bs = EXT4_BLOCK_SIZE(inode->i_sb);
-
+ size = i_size_read(inode);
+ if (logical + length >= size)
flags |= FIEMAP_EXTENT_LAST;
- if ((flags & FIEMAP_EXTENT_DELALLOC) &&
- logical+length > size)
- length = (size - logical + bs - 1) & ~(bs-1);
- }
- error = fiemap_fill_next_extent(fieinfo, logical, physical,
+ ret = fiemap_fill_next_extent(fieinfo, logical, physical,
length, flags);
- if (error < 0)
- return error;
- if (error == 1)
+ if (ret < 0)
+ return ret;
+ if (ret == 1)
return EXT_BREAK;
-
return EXT_CONTINUE;
}
diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c
index 7829b28..e9473cb 100644
--- a/fs/ext4/fsync.c
+++ b/fs/ext4/fsync.c
@@ -101,7 +101,7 @@ extern int ext4_flush_completed_IO(struct inode *inode)
* to the work-to-be schedule is freed.
*
* Thus we need to keep the io structure still valid here after
- * convertion finished. The io structure has a flag to
+ * conversion finished. The io structure has a flag to
* avoid double converting from both fsync and background work
* queue work.
*/
@@ -125,9 +125,11 @@ extern int ext4_flush_completed_IO(struct inode *inode)
* the parent directory's parent as well, and so on recursively, if
* they are also freshly created.
*/
-static void ext4_sync_parent(struct inode *inode)
+static int ext4_sync_parent(struct inode *inode)
{
+ struct writeback_control wbc;
struct dentry *dentry = NULL;
+ int ret = 0;
while (inode && ext4_test_inode_state(inode, EXT4_STATE_NEWENTRY)) {
ext4_clear_inode_state(inode, EXT4_STATE_NEWENTRY);
@@ -136,8 +138,17 @@ static void ext4_sync_parent(struct inode *inode)
if (!dentry || !dentry->d_parent || !dentry->d_parent->d_inode)
break;
inode = dentry->d_parent->d_inode;
- sync_mapping_buffers(inode->i_mapping);
+ ret = sync_mapping_buffers(inode->i_mapping);
+ if (ret)
+ break;
+ memset(&wbc, 0, sizeof(wbc));
+ wbc.sync_mode = WB_SYNC_ALL;
+ wbc.nr_to_write = 0; /* only write out the inode */
+ ret = sync_inode(inode, &wbc);
+ if (ret)
+ break;
}
+ return ret;
}
/*
@@ -164,20 +175,20 @@ int ext4_sync_file(struct file *file, int datasync)
J_ASSERT(ext4_journal_current_handle() == NULL);
- trace_ext4_sync_file(file, datasync);
+ trace_ext4_sync_file_enter(file, datasync);
if (inode->i_sb->s_flags & MS_RDONLY)
return 0;
ret = ext4_flush_completed_IO(inode);
if (ret < 0)
- return ret;
+ goto out;
if (!journal) {
ret = generic_file_fsync(file, datasync);
if (!ret && !list_empty(&inode->i_dentry))
- ext4_sync_parent(inode);
- return ret;
+ ret = ext4_sync_parent(inode);
+ goto out;
}
/*
@@ -194,8 +205,10 @@ int ext4_sync_file(struct file *file, int datasync)
* (they were dirtied by commit). But that's OK - the blocks are
* safe in-journal, which is all fsync() needs to ensure.
*/
- if (ext4_should_journal_data(inode))
- return ext4_force_commit(inode->i_sb);
+ if (ext4_should_journal_data(inode)) {
+ ret = ext4_force_commit(inode->i_sb);
+ goto out;
+ }
commit_tid = datasync ? ei->i_datasync_tid : ei->i_sync_tid;
if (jbd2_log_start_commit(journal, commit_tid)) {
@@ -215,5 +228,7 @@ int ext4_sync_file(struct file *file, int datasync)
ret = jbd2_log_wait_commit(journal, commit_tid);
} else if (journal->j_flags & JBD2_BARRIER)
blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
+ out:
+ trace_ext4_sync_file_exit(inode, ret);
return ret;
}
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 78b79e1..21bb2f6 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -152,6 +152,7 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
* We do it here so the bitmap uptodate bit
* get set with buffer lock held.
*/
+ trace_ext4_load_inode_bitmap(sb, block_group);
set_bitmap_uptodate(bh);
if (bh_submit_read(bh) < 0) {
put_bh(bh);
@@ -649,7 +650,7 @@ static int find_group_other(struct super_block *sb, struct inode *parent,
*group = parent_group + flex_size;
if (*group > ngroups)
*group = 0;
- return find_group_orlov(sb, parent, group, mode, 0);
+ return find_group_orlov(sb, parent, group, mode, NULL);
}
/*
@@ -1054,6 +1055,11 @@ got:
}
}
+ if (ext4_handle_valid(handle)) {
+ ei->i_sync_tid = handle->h_transaction->t_tid;
+ ei->i_datasync_tid = handle->h_transaction->t_tid;
+ }
+
err = ext4_mark_inode_dirty(handle, inode);
if (err) {
ext4_std_error(sb, err);
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 9f7f9e4..f2fa5e8 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -173,7 +173,7 @@ int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode,
BUG_ON(EXT4_JOURNAL(inode) == NULL);
jbd_debug(2, "restarting handle %p\n", handle);
up_write(&EXT4_I(inode)->i_data_sem);
- ret = ext4_journal_restart(handle, blocks_for_truncate(inode));
+ ret = ext4_journal_restart(handle, nblocks);
down_write(&EXT4_I(inode)->i_data_sem);
ext4_discard_preallocations(inode);
@@ -720,7 +720,7 @@ allocated:
return ret;
failed_out:
for (i = 0; i < index; i++)
- ext4_free_blocks(handle, inode, 0, new_blocks[i], 1, 0);
+ ext4_free_blocks(handle, inode, NULL, new_blocks[i], 1, 0);
return ret;
}
@@ -823,20 +823,20 @@ static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
return err;
failed:
/* Allocation failed, free what we already allocated */
- ext4_free_blocks(handle, inode, 0, new_blocks[0], 1, 0);
+ ext4_free_blocks(handle, inode, NULL, new_blocks[0], 1, 0);
for (i = 1; i <= n ; i++) {
/*
* branch[i].bh is newly allocated, so there is no
* need to revoke the block, which is why we don't
* need to set EXT4_FREE_BLOCKS_METADATA.
*/
- ext4_free_blocks(handle, inode, 0, new_blocks[i], 1,
+ ext4_free_blocks(handle, inode, NULL, new_blocks[i], 1,
EXT4_FREE_BLOCKS_FORGET);
}
for (i = n+1; i < indirect_blks; i++)
- ext4_free_blocks(handle, inode, 0, new_blocks[i], 1, 0);
+ ext4_free_blocks(handle, inode, NULL, new_blocks[i], 1, 0);
- ext4_free_blocks(handle, inode, 0, new_blocks[i], num, 0);
+ ext4_free_blocks(handle, inode, NULL, new_blocks[i], num, 0);
return err;
}
@@ -924,7 +924,7 @@ err_out:
ext4_free_blocks(handle, inode, where[i].bh, 0, 1,
EXT4_FREE_BLOCKS_FORGET);
}
- ext4_free_blocks(handle, inode, 0, le32_to_cpu(where[num].key),
+ ext4_free_blocks(handle, inode, NULL, le32_to_cpu(where[num].key),
blks, 0);
return err;
@@ -973,6 +973,7 @@ static int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
int count = 0;
ext4_fsblk_t first_block = 0;
+ trace_ext4_ind_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
J_ASSERT(!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)));
J_ASSERT(handle != NULL || (flags & EXT4_GET_BLOCKS_CREATE) == 0);
depth = ext4_block_to_path(inode, map->m_lblk, offsets,
@@ -1058,6 +1059,8 @@ cleanup:
partial--;
}
out:
+ trace_ext4_ind_map_blocks_exit(inode, map->m_lblk,
+ map->m_pblk, map->m_len, err);
return err;
}
@@ -2060,7 +2063,7 @@ static int mpage_da_submit_io(struct mpage_da_data *mpd,
if (nr_pages == 0)
break;
for (i = 0; i < nr_pages; i++) {
- int commit_write = 0, redirty_page = 0;
+ int commit_write = 0, skip_page = 0;
struct page *page = pvec.pages[i];
index = page->index;
@@ -2086,14 +2089,12 @@ static int mpage_da_submit_io(struct mpage_da_data *mpd,
* If the page does not have buffers (for
* whatever reason), try to create them using
* __block_write_begin. If this fails,
- * redirty the page and move on.
+ * skip the page and move on.
*/
if (!page_has_buffers(page)) {
if (__block_write_begin(page, 0, len,
noalloc_get_block_write)) {
- redirty_page:
- redirty_page_for_writepage(mpd->wbc,
- page);
+ skip_page:
unlock_page(page);
continue;
}
@@ -2104,7 +2105,7 @@ static int mpage_da_submit_io(struct mpage_da_data *mpd,
block_start = 0;
do {
if (!bh)
- goto redirty_page;
+ goto skip_page;
if (map && (cur_logical >= map->m_lblk) &&
(cur_logical <= (map->m_lblk +
(map->m_len - 1)))) {
@@ -2120,22 +2121,23 @@ static int mpage_da_submit_io(struct mpage_da_data *mpd,
clear_buffer_unwritten(bh);
}
- /* redirty page if block allocation undone */
+ /* skip page if block allocation undone */
if (buffer_delay(bh) || buffer_unwritten(bh))
- redirty_page = 1;
+ skip_page = 1;
bh = bh->b_this_page;
block_start += bh->b_size;
cur_logical++;
pblock++;
} while (bh != page_bufs);
- if (redirty_page)
- goto redirty_page;
+ if (skip_page)
+ goto skip_page;
if (commit_write)
/* mark the buffer_heads as dirty & uptodate */
block_commit_write(page, 0, len);
+ clear_page_dirty_for_io(page);
/*
* Delalloc doesn't support data journalling,
* but eventually maybe we'll lift this
@@ -2165,8 +2167,7 @@ static int mpage_da_submit_io(struct mpage_da_data *mpd,
return ret;
}
-static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd,
- sector_t logical, long blk_cnt)
+static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd)
{
int nr_pages, i;
pgoff_t index, end;
@@ -2174,9 +2175,8 @@ static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd,
struct inode *inode = mpd->inode;
struct address_space *mapping = inode->i_mapping;
- index = logical >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
- end = (logical + blk_cnt - 1) >>
- (PAGE_CACHE_SHIFT - inode->i_blkbits);
+ index = mpd->first_page;
+ end = mpd->next_page - 1;
while (index <= end) {
nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
if (nr_pages == 0)
@@ -2279,9 +2279,8 @@ static void mpage_da_map_and_submit(struct mpage_da_data *mpd)
err = blks;
/*
* If get block returns EAGAIN or ENOSPC and there
- * appears to be free blocks we will call
- * ext4_writepage() for all of the pages which will
- * just redirty the pages.
+ * appears to be free blocks we will just let
+ * mpage_da_submit_io() unlock all of the pages.
*/
if (err == -EAGAIN)
goto submit_io;
@@ -2312,8 +2311,10 @@ static void mpage_da_map_and_submit(struct mpage_da_data *mpd)
ext4_print_free_blocks(mpd->inode);
}
/* invalidate all the pages */
- ext4_da_block_invalidatepages(mpd, next,
- mpd->b_size >> mpd->inode->i_blkbits);
+ ext4_da_block_invalidatepages(mpd);
+
+ /* Mark this page range as having been completed */
+ mpd->io_done = 1;
return;
}
BUG_ON(blks == 0);
@@ -2438,102 +2439,6 @@ static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh)
}
/*
- * __mpage_da_writepage - finds extent of pages and blocks
- *
- * @page: page to consider
- * @wbc: not used, we just follow rules
- * @data: context
- *
- * The function finds extents of pages and scan them for all blocks.
- */
-static int __mpage_da_writepage(struct page *page,
- struct writeback_control *wbc,
- struct mpage_da_data *mpd)
-{
- struct inode *inode = mpd->inode;
- struct buffer_head *bh, *head;
- sector_t logical;
-
- /*
- * Can we merge this page to current extent?
- */
- if (mpd->next_page != page->index) {
- /*
- * Nope, we can't. So, we map non-allocated blocks
- * and start IO on them
- */
- if (mpd->next_page != mpd->first_page) {
- mpage_da_map_and_submit(mpd);
- /*
- * skip rest of the page in the page_vec
- */
- redirty_page_for_writepage(wbc, page);
- unlock_page(page);
- return MPAGE_DA_EXTENT_TAIL;
- }
-
- /*
- * Start next extent of pages ...
- */
- mpd->first_page = page->index;
-
- /*
- * ... and blocks
- */
- mpd->b_size = 0;
- mpd->b_state = 0;
- mpd->b_blocknr = 0;
- }
-
- mpd->next_page = page->index + 1;
- logical = (sector_t) page->index <<
- (PAGE_CACHE_SHIFT - inode->i_blkbits);
-
- if (!page_has_buffers(page)) {
- mpage_add_bh_to_extent(mpd, logical, PAGE_CACHE_SIZE,
- (1 << BH_Dirty) | (1 << BH_Uptodate));
- if (mpd->io_done)
- return MPAGE_DA_EXTENT_TAIL;
- } else {
- /*
- * Page with regular buffer heads, just add all dirty ones
- */
- head = page_buffers(page);
- bh = head;
- do {
- BUG_ON(buffer_locked(bh));
- /*
- * We need to try to allocate
- * unmapped blocks in the same page.
- * Otherwise we won't make progress
- * with the page in ext4_writepage
- */
- if (ext4_bh_delay_or_unwritten(NULL, bh)) {
- mpage_add_bh_to_extent(mpd, logical,
- bh->b_size,
- bh->b_state);
- if (mpd->io_done)
- return MPAGE_DA_EXTENT_TAIL;
- } else if (buffer_dirty(bh) && (buffer_mapped(bh))) {
- /*
- * mapped dirty buffer. We need to update
- * the b_state because we look at
- * b_state in mpage_da_map_blocks. We don't
- * update b_size because if we find an
- * unmapped buffer_head later we need to
- * use the b_state flag of that buffer_head.
- */
- if (mpd->b_size == 0)
- mpd->b_state = bh->b_state & BH_FLAGS;
- }
- logical++;
- } while ((bh = bh->b_this_page) != head);
- }
-
- return 0;
-}
-
-/*
* This is a special get_blocks_t callback which is used by
* ext4_da_write_begin(). It will either return mapped block or
* reserve space for a single block.
@@ -2684,7 +2589,7 @@ static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate);
* because we should have holes filled from ext4_page_mkwrite(). We even don't
* need to file the inode to the transaction's list in ordered mode because if
* we are writing back data added by write(), the inode is already there and if
- * we are writing back data modified via mmap(), noone guarantees in which
+ * we are writing back data modified via mmap(), no one guarantees in which
* transaction the data will hit the disk. In case we are journaling data, we
* cannot start transaction directly because transaction start ranks above page
* lock so we have to do some magic.
@@ -2786,7 +2691,7 @@ static int ext4_writepage(struct page *page,
/*
* This is called via ext4_da_writepages() to
- * calulate the total number of credits to reserve to fit
+ * calculate the total number of credits to reserve to fit
* a single extent allocation into a single transaction,
* ext4_da_writpeages() will loop calling this before
* the block allocation.
@@ -2811,27 +2716,27 @@ static int ext4_da_writepages_trans_blocks(struct inode *inode)
/*
* write_cache_pages_da - walk the list of dirty pages of the given
- * address space and call the callback function (which usually writes
- * the pages).
- *
- * This is a forked version of write_cache_pages(). Differences:
- * Range cyclic is ignored.
- * no_nrwrite_index_update is always presumed true
+ * address space and accumulate pages that need writing, and call
+ * mpage_da_map_and_submit to map a single contiguous memory region
+ * and then write them.
*/
static int write_cache_pages_da(struct address_space *mapping,
struct writeback_control *wbc,
struct mpage_da_data *mpd,
pgoff_t *done_index)
{
- int ret = 0;
- int done = 0;
- struct pagevec pvec;
- unsigned nr_pages;
- pgoff_t index;
- pgoff_t end; /* Inclusive */
- long nr_to_write = wbc->nr_to_write;
- int tag;
-
+ struct buffer_head *bh, *head;
+ struct inode *inode = mapping->host;
+ struct pagevec pvec;
+ unsigned int nr_pages;
+ sector_t logical;
+ pgoff_t index, end;
+ long nr_to_write = wbc->nr_to_write;
+ int i, tag, ret = 0;
+
+ memset(mpd, 0, sizeof(struct mpage_da_data));
+ mpd->wbc = wbc;
+ mpd->inode = inode;
pagevec_init(&pvec, 0);
index = wbc->range_start >> PAGE_CACHE_SHIFT;
end = wbc->range_end >> PAGE_CACHE_SHIFT;
@@ -2842,13 +2747,11 @@ static int write_cache_pages_da(struct address_space *mapping,
tag = PAGECACHE_TAG_DIRTY;
*done_index = index;
- while (!done && (index <= end)) {
- int i;
-
+ while (index <= end) {
nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
if (nr_pages == 0)
- break;
+ return 0;
for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i];
@@ -2860,60 +2763,100 @@ static int write_cache_pages_da(struct address_space *mapping,
* mapping. However, page->index will not change
* because we have a reference on the page.
*/
- if (page->index > end) {
- done = 1;
- break;
- }
+ if (page->index > end)
+ goto out;
*done_index = page->index + 1;
+ /*
+ * If we can't merge this page, and we have
+ * accumulated an contiguous region, write it
+ */
+ if ((mpd->next_page != page->index) &&
+ (mpd->next_page != mpd->first_page)) {
+ mpage_da_map_and_submit(mpd);
+ goto ret_extent_tail;
+ }
+
lock_page(page);
/*
- * Page truncated or invalidated. We can freely skip it
- * then, even for data integrity operations: the page
- * has disappeared concurrently, so there could be no
- * real expectation of this data interity operation
- * even if there is now a new, dirty page at the same
- * pagecache address.
+ * If the page is no longer dirty, or its
+ * mapping no longer corresponds to inode we
+ * are writing (which means it has been
+ * truncated or invalidated), or the page is
+ * already under writeback and we are not
+ * doing a data integrity writeback, skip the page
*/
- if (unlikely(page->mapping != mapping)) {
-continue_unlock:
+ if (!PageDirty(page) ||
+ (PageWriteback(page) &&
+ (wbc->sync_mode == WB_SYNC_NONE)) ||
+ unlikely(page->mapping != mapping)) {
unlock_page(page);
continue;
}
- if (!PageDirty(page)) {
- /* someone wrote it for us */
- goto continue_unlock;
- }
-
- if (PageWriteback(page)) {
- if (wbc->sync_mode != WB_SYNC_NONE)
- wait_on_page_writeback(page);
- else
- goto continue_unlock;
- }
+ if (PageWriteback(page))
+ wait_on_page_writeback(page);
BUG_ON(PageWriteback(page));
- if (!clear_page_dirty_for_io(page))
- goto continue_unlock;
- ret = __mpage_da_writepage(page, wbc, mpd);
- if (unlikely(ret)) {
- if (ret == AOP_WRITEPAGE_ACTIVATE) {
- unlock_page(page);
- ret = 0;
- } else {
- done = 1;
- break;
- }
+ if (mpd->next_page != page->index)
+ mpd->first_page = page->index;
+ mpd->next_page = page->index + 1;
+ logical = (sector_t) page->index <<
+ (PAGE_CACHE_SHIFT - inode->i_blkbits);
+
+ if (!page_has_buffers(page)) {
+ mpage_add_bh_to_extent(mpd, logical,
+ PAGE_CACHE_SIZE,
+ (1 << BH_Dirty) | (1 << BH_Uptodate));
+ if (mpd->io_done)
+ goto ret_extent_tail;
+ } else {
+ /*
+ * Page with regular buffer heads,
+ * just add all dirty ones
+ */
+ head = page_buffers(page);
+ bh = head;
+ do {
+ BUG_ON(buffer_locked(bh));
+ /*
+ * We need to try to allocate
+ * unmapped blocks in the same page.
+ * Otherwise we won't make progress
+ * with the page in ext4_writepage
+ */
+ if (ext4_bh_delay_or_unwritten(NULL, bh)) {
+ mpage_add_bh_to_extent(mpd, logical,
+ bh->b_size,
+ bh->b_state);
+ if (mpd->io_done)
+ goto ret_extent_tail;
+ } else if (buffer_dirty(bh) && (buffer_mapped(bh))) {
+ /*
+ * mapped dirty buffer. We need
+ * to update the b_state
+ * because we look at b_state
+ * in mpage_da_map_blocks. We
+ * don't update b_size because
+ * if we find an unmapped
+ * buffer_head later we need to
+ * use the b_state flag of that
+ * buffer_head.
+ */
+ if (mpd->b_size == 0)
+ mpd->b_state = bh->b_state & BH_FLAGS;
+ }
+ logical++;
+ } while ((bh = bh->b_this_page) != head);
}
if (nr_to_write > 0) {
nr_to_write--;
if (nr_to_write == 0 &&
- wbc->sync_mode == WB_SYNC_NONE) {
+ wbc->sync_mode == WB_SYNC_NONE)
/*
* We stop writing back only if we are
* not doing integrity sync. In case of
@@ -2924,14 +2867,18 @@ continue_unlock:
* pages, but have not synced all of the
* old dirty pages.
*/
- done = 1;
- break;
- }
+ goto out;
}
}
pagevec_release(&pvec);
cond_resched();
}
+ return 0;
+ret_extent_tail:
+ ret = MPAGE_DA_EXTENT_TAIL;
+out:
+ pagevec_release(&pvec);
+ cond_resched();
return ret;
}
@@ -2945,7 +2892,6 @@ static int ext4_da_writepages(struct address_space *mapping,
struct mpage_da_data mpd;
struct inode *inode = mapping->host;
int pages_written = 0;
- long pages_skipped;
unsigned int max_pages;
int range_cyclic, cycled = 1, io_done = 0;
int needed_blocks, ret = 0;
@@ -3028,11 +2974,6 @@ static int ext4_da_writepages(struct address_space *mapping,
wbc->nr_to_write = desired_nr_to_write;
}
- mpd.wbc = wbc;
- mpd.inode = mapping->host;
-
- pages_skipped = wbc->pages_skipped;
-
retry:
if (wbc->sync_mode == WB_SYNC_ALL)
tag_pages_for_writeback(mapping, index, end);
@@ -3059,22 +3000,10 @@ retry:
}
/*
- * Now call __mpage_da_writepage to find the next
+ * Now call write_cache_pages_da() to find the next
* contiguous region of logical blocks that need
- * blocks to be allocated by ext4. We don't actually
- * submit the blocks for I/O here, even though
- * write_cache_pages thinks it will, and will set the
- * pages as clean for write before calling
- * __mpage_da_writepage().
+ * blocks to be allocated by ext4 and submit them.
*/
- mpd.b_size = 0;
- mpd.b_state = 0;
- mpd.b_blocknr = 0;
- mpd.first_page = 0;
- mpd.next_page = 0;
- mpd.io_done = 0;
- mpd.pages_written = 0;
- mpd.retval = 0;
ret = write_cache_pages_da(mapping, wbc, &mpd, &done_index);
/*
* If we have a contiguous extent of pages and we
@@ -3096,7 +3025,6 @@ retry:
* and try again
*/
jbd2_journal_force_commit_nested(sbi->s_journal);
- wbc->pages_skipped = pages_skipped;
ret = 0;
} else if (ret == MPAGE_DA_EXTENT_TAIL) {
/*
@@ -3104,7 +3032,6 @@ retry:
* rest of the pages
*/
pages_written += mpd.pages_written;
- wbc->pages_skipped = pages_skipped;
ret = 0;
io_done = 1;
} else if (wbc->nr_to_write)
@@ -3122,11 +3049,6 @@ retry:
wbc->range_end = mapping->writeback_index - 1;
goto retry;
}
- if (pages_skipped != wbc->pages_skipped)
- ext4_msg(inode->i_sb, KERN_CRIT,
- "This should not happen leaving %s "
- "with nr_to_write = %ld ret = %d",
- __func__, wbc->nr_to_write, ret);
/* Update index */
wbc->range_cyclic = range_cyclic;
@@ -3383,7 +3305,7 @@ int ext4_alloc_da_blocks(struct inode *inode)
* the pages by calling redirty_page_for_writepage() but that
* would be ugly in the extreme. So instead we would need to
* replicate parts of the code in the above functions,
- * simplifying them becuase we wouldn't actually intend to
+ * simplifying them because we wouldn't actually intend to
* write out the pages, but rather only collect contiguous
* logical block extents, call the multi-block allocator, and
* then update the buffer heads with the block allocations.
@@ -3460,6 +3382,7 @@ static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
static int ext4_readpage(struct file *file, struct page *page)
{
+ trace_ext4_readpage(page);
return mpage_readpage(page, ext4_get_block);
}
@@ -3494,6 +3417,8 @@ static void ext4_invalidatepage(struct page *page, unsigned long offset)
{
journal_t *journal = EXT4_JOURNAL(page->mapping->host);
+ trace_ext4_invalidatepage(page, offset);
+
/*
* free any io_end structure allocated for buffers to be discarded
*/
@@ -3515,6 +3440,8 @@ static int ext4_releasepage(struct page *page, gfp_t wait)
{
journal_t *journal = EXT4_JOURNAL(page->mapping->host);
+ trace_ext4_releasepage(page);
+
WARN_ON(PageChecked(page));
if (!page_has_buffers(page))
return 0;
@@ -3768,7 +3695,7 @@ retry:
*
* The unwrritten extents will be converted to written when DIO is completed.
* For async direct IO, since the IO may still pending when return, we
- * set up an end_io call back function, which will do the convertion
+ * set up an end_io call back function, which will do the conversion
* when async direct IO completed.
*
* If the O_DIRECT write will extend the file then add this inode to the
@@ -3791,7 +3718,7 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
* We could direct write to holes and fallocate.
*
* Allocated blocks to fill the hole are marked as uninitialized
- * to prevent paralel buffered read to expose the stale data
+ * to prevent parallel buffered read to expose the stale data
* before DIO complete the data IO.
*
* As to previously fallocated extents, ext4 get_block
@@ -3852,7 +3779,7 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
int err;
/*
* for non AIO case, since the IO is already
- * completed, we could do the convertion right here
+ * completed, we could do the conversion right here
*/
err = ext4_convert_unwritten_extents(inode,
offset, ret);
@@ -3873,11 +3800,16 @@ static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
{
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
+ ssize_t ret;
+ trace_ext4_direct_IO_enter(inode, offset, iov_length(iov, nr_segs), rw);
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
- return ext4_ext_direct_IO(rw, iocb, iov, offset, nr_segs);
-
- return ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs);
+ ret = ext4_ext_direct_IO(rw, iocb, iov, offset, nr_segs);
+ else
+ ret = ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs);
+ trace_ext4_direct_IO_exit(inode, offset,
+ iov_length(iov, nr_segs), rw, ret);
+ return ret;
}
/*
@@ -3903,7 +3835,6 @@ static const struct address_space_operations ext4_ordered_aops = {
.readpage = ext4_readpage,
.readpages = ext4_readpages,
.writepage = ext4_writepage,
- .sync_page = block_sync_page,
.write_begin = ext4_write_begin,
.write_end = ext4_ordered_write_end,
.bmap = ext4_bmap,
@@ -3919,7 +3850,6 @@ static const struct address_space_operations ext4_writeback_aops = {
.readpage = ext4_readpage,
.readpages = ext4_readpages,
.writepage = ext4_writepage,
- .sync_page = block_sync_page,
.write_begin = ext4_write_begin,
.write_end = ext4_writeback_write_end,
.bmap = ext4_bmap,
@@ -3935,7 +3865,6 @@ static const struct address_space_operations ext4_journalled_aops = {
.readpage = ext4_readpage,
.readpages = ext4_readpages,
.writepage = ext4_writepage,
- .sync_page = block_sync_page,
.write_begin = ext4_write_begin,
.write_end = ext4_journalled_write_end,
.set_page_dirty = ext4_journalled_set_page_dirty,
@@ -3951,7 +3880,6 @@ static const struct address_space_operations ext4_da_aops = {
.readpages = ext4_readpages,
.writepage = ext4_writepage,
.writepages = ext4_da_writepages,
- .sync_page = block_sync_page,
.write_begin = ext4_da_write_begin,
.write_end = ext4_da_write_end,
.bmap = ext4_bmap,
@@ -4098,7 +4026,7 @@ static inline int all_zeroes(__le32 *p, __le32 *q)
*
* When we do truncate() we may have to clean the ends of several
* indirect blocks but leave the blocks themselves alive. Block is
- * partially truncated if some data below the new i_size is refered
+ * partially truncated if some data below the new i_size is referred
* from it (and it is on the path to the first completely truncated
* data block, indeed). We have to free the top of that path along
* with everything to the right of the path. Since no allocation
@@ -4177,6 +4105,9 @@ no_top:
*
* We release `count' blocks on disk, but (last - first) may be greater
* than `count' because there can be holes in there.
+ *
+ * Return 0 on success, 1 on invalid block range
+ * and < 0 on fatal error.
*/
static int ext4_clear_blocks(handle_t *handle, struct inode *inode,
struct buffer_head *bh,
@@ -4203,33 +4134,32 @@ static int ext4_clear_blocks(handle_t *handle, struct inode *inode,
if (bh) {
BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
err = ext4_handle_dirty_metadata(handle, inode, bh);
- if (unlikely(err)) {
- ext4_std_error(inode->i_sb, err);
- return 1;
- }
+ if (unlikely(err))
+ goto out_err;
}
err = ext4_mark_inode_dirty(handle, inode);
- if (unlikely(err)) {
- ext4_std_error(inode->i_sb, err);
- return 1;
- }
+ if (unlikely(err))
+ goto out_err;
err = ext4_truncate_restart_trans(handle, inode,
blocks_for_truncate(inode));
- if (unlikely(err)) {
- ext4_std_error(inode->i_sb, err);
- return 1;
- }
+ if (unlikely(err))
+ goto out_err;
if (bh) {
BUFFER_TRACE(bh, "retaking write access");
- ext4_journal_get_write_access(handle, bh);
+ err = ext4_journal_get_write_access(handle, bh);
+ if (unlikely(err))
+ goto out_err;
}
}
for (p = first; p < last; p++)
*p = 0;
- ext4_free_blocks(handle, inode, 0, block_to_free, count, flags);
+ ext4_free_blocks(handle, inode, NULL, block_to_free, count, flags);
return 0;
+out_err:
+ ext4_std_error(inode->i_sb, err);
+ return err;
}
/**
@@ -4240,7 +4170,7 @@ static int ext4_clear_blocks(handle_t *handle, struct inode *inode,
* @first: array of block numbers
* @last: points immediately past the end of array
*
- * We are freeing all blocks refered from that array (numbers are stored as
+ * We are freeing all blocks referred from that array (numbers are stored as
* little-endian 32-bit) and updating @inode->i_blocks appropriately.
*
* We accumulate contiguous runs of blocks to free. Conveniently, if these
@@ -4263,7 +4193,7 @@ static void ext4_free_data(handle_t *handle, struct inode *inode,
ext4_fsblk_t nr; /* Current block # */
__le32 *p; /* Pointer into inode/ind
for current block */
- int err;
+ int err = 0;
if (this_bh) { /* For indirect block */
BUFFER_TRACE(this_bh, "get_write_access");
@@ -4285,9 +4215,10 @@ static void ext4_free_data(handle_t *handle, struct inode *inode,
} else if (nr == block_to_free + count) {
count++;
} else {
- if (ext4_clear_blocks(handle, inode, this_bh,
- block_to_free, count,
- block_to_free_p, p))
+ err = ext4_clear_blocks(handle, inode, this_bh,
+ block_to_free, count,
+ block_to_free_p, p);
+ if (err)
break;
block_to_free = nr;
block_to_free_p = p;
@@ -4296,9 +4227,12 @@ static void ext4_free_data(handle_t *handle, struct inode *inode,
}
}
- if (count > 0)
- ext4_clear_blocks(handle, inode, this_bh, block_to_free,
- count, block_to_free_p, p);
+ if (!err && count > 0)
+ err = ext4_clear_blocks(handle, inode, this_bh, block_to_free,
+ count, block_to_free_p, p);
+ if (err < 0)
+ /* fatal error */
+ return;
if (this_bh) {
BUFFER_TRACE(this_bh, "call ext4_handle_dirty_metadata");
@@ -4328,7 +4262,7 @@ static void ext4_free_data(handle_t *handle, struct inode *inode,
* @last: pointer immediately past the end of array
* @depth: depth of the branches to free
*
- * We are freeing all blocks refered from these branches (numbers are
+ * We are freeing all blocks referred from these branches (numbers are
* stored as little-endian 32-bit) and updating @inode->i_blocks
* appropriately.
*/
@@ -4416,7 +4350,7 @@ static void ext4_free_branches(handle_t *handle, struct inode *inode,
* transaction where the data blocks are
* actually freed.
*/
- ext4_free_blocks(handle, inode, 0, nr, 1,
+ ext4_free_blocks(handle, inode, NULL, nr, 1,
EXT4_FREE_BLOCKS_METADATA|
EXT4_FREE_BLOCKS_FORGET);
@@ -4496,10 +4430,12 @@ void ext4_truncate(struct inode *inode)
Indirect chain[4];
Indirect *partial;
__le32 nr = 0;
- int n;
- ext4_lblk_t last_block;
+ int n = 0;
+ ext4_lblk_t last_block, max_block;
unsigned blocksize = inode->i_sb->s_blocksize;
+ trace_ext4_truncate_enter(inode);
+
if (!ext4_can_truncate(inode))
return;
@@ -4510,6 +4446,7 @@ void ext4_truncate(struct inode *inode)
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
ext4_ext_truncate(inode);
+ trace_ext4_truncate_exit(inode);
return;
}
@@ -4519,14 +4456,18 @@ void ext4_truncate(struct inode *inode)
last_block = (inode->i_size + blocksize-1)
>> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
+ max_block = (EXT4_SB(inode->i_sb)->s_bitmap_maxbytes + blocksize-1)
+ >> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
if (inode->i_size & (blocksize - 1))
if (ext4_block_truncate_page(handle, mapping, inode->i_size))
goto out_stop;
- n = ext4_block_to_path(inode, last_block, offsets, NULL);
- if (n == 0)
- goto out_stop; /* error */
+ if (last_block != max_block) {
+ n = ext4_block_to_path(inode, last_block, offsets, NULL);
+ if (n == 0)
+ goto out_stop; /* error */
+ }
/*
* OK. This truncate is going to happen. We add the inode to the
@@ -4557,7 +4498,13 @@ void ext4_truncate(struct inode *inode)
*/
ei->i_disksize = inode->i_size;
- if (n == 1) { /* direct blocks */
+ if (last_block == max_block) {
+ /*
+ * It is unnecessary to free any data blocks if last_block is
+ * equal to the indirect block limit.
+ */
+ goto out_unlock;
+ } else if (n == 1) { /* direct blocks */
ext4_free_data(handle, inode, NULL, i_data+offsets[0],
i_data + EXT4_NDIR_BLOCKS);
goto do_indirects;
@@ -4617,6 +4564,7 @@ do_indirects:
;
}
+out_unlock:
up_write(&ei->i_data_sem);
inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
ext4_mark_inode_dirty(handle, inode);
@@ -4639,6 +4587,7 @@ out_stop:
ext4_orphan_del(handle, inode);
ext4_journal_stop(handle);
+ trace_ext4_truncate_exit(inode);
}
/*
@@ -4770,6 +4719,7 @@ make_io:
* has in-inode xattrs, or we don't have this inode in memory.
* Read the block from disk.
*/
+ trace_ext4_load_inode(inode);
get_bh(bh);
bh->b_end_io = end_buffer_read_sync;
submit_bh(READ_META, bh);
@@ -4875,7 +4825,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
return inode;
ei = EXT4_I(inode);
- iloc.bh = 0;
+ iloc.bh = NULL;
ret = __ext4_get_inode_loc(inode, &iloc, 0);
if (ret < 0)
@@ -5460,13 +5410,12 @@ static int ext4_indirect_trans_blocks(struct inode *inode, int nrblocks,
/* if nrblocks are contiguous */
if (chunk) {
/*
- * With N contiguous data blocks, it need at most
- * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) indirect blocks
- * 2 dindirect blocks
- * 1 tindirect block
+ * With N contiguous data blocks, we need at most
+ * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) + 1 indirect blocks,
+ * 2 dindirect blocks, and 1 tindirect block
*/
- indirects = nrblocks / EXT4_ADDR_PER_BLOCK(inode->i_sb);
- return indirects + 3;
+ return DIV_ROUND_UP(nrblocks,
+ EXT4_ADDR_PER_BLOCK(inode->i_sb)) + 4;
}
/*
* if nrblocks are not contiguous, worse case, each block touch
@@ -5540,7 +5489,7 @@ static int ext4_meta_trans_blocks(struct inode *inode, int nrblocks, int chunk)
}
/*
- * Calulate the total number of credits to reserve to fit
+ * Calculate the total number of credits to reserve to fit
* the modification of a single pages into a single transaction,
* which may include multiple chunks of block allocations.
*
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index eb3bc2f..808c554 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -38,7 +38,7 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
unsigned int oldflags;
unsigned int jflag;
- if (!is_owner_or_cap(inode))
+ if (!inode_owner_or_capable(inode))
return -EACCES;
if (get_user(flags, (int __user *) arg))
@@ -146,7 +146,7 @@ flags_out:
__u32 generation;
int err;
- if (!is_owner_or_cap(inode))
+ if (!inode_owner_or_capable(inode))
return -EPERM;
err = mnt_want_write(filp->f_path.mnt);
@@ -298,7 +298,7 @@ mext_out:
case EXT4_IOC_MIGRATE:
{
int err;
- if (!is_owner_or_cap(inode))
+ if (!inode_owner_or_capable(inode))
return -EACCES;
err = mnt_want_write(filp->f_path.mnt);
@@ -320,7 +320,7 @@ mext_out:
case EXT4_IOC_ALLOC_DA_BLKS:
{
int err;
- if (!is_owner_or_cap(inode))
+ if (!inode_owner_or_capable(inode))
return -EACCES;
err = mnt_want_write(filp->f_path.mnt);
@@ -334,16 +334,22 @@ mext_out:
case FITRIM:
{
struct super_block *sb = inode->i_sb;
+ struct request_queue *q = bdev_get_queue(sb->s_bdev);
struct fstrim_range range;
int ret = 0;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
+ if (!blk_queue_discard(q))
+ return -EOPNOTSUPP;
+
if (copy_from_user(&range, (struct fstrim_range *)arg,
sizeof(range)))
return -EFAULT;
+ range.minlen = max((unsigned int)range.minlen,
+ q->limits.discard_granularity);
ret = ext4_trim_fs(sb, &range);
if (ret < 0)
return ret;
@@ -421,6 +427,7 @@ long ext4_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return err;
}
case EXT4_IOC_MOVE_EXT:
+ case FITRIM:
break;
default:
return -ENOIOCTLCMD;
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index d1fe09a..d8a16ee 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -92,7 +92,7 @@
* between CPUs. It is possible to get scheduled at this point.
*
* The locality group prealloc space is used looking at whether we have
- * enough free space (pa_free) withing the prealloc space.
+ * enough free space (pa_free) within the prealloc space.
*
* If we can't allocate blocks via inode prealloc or/and locality group
* prealloc then we look at the buddy cache. The buddy cache is represented
@@ -432,9 +432,10 @@ static void *mb_find_buddy(struct ext4_buddy *e4b, int order, int *max)
}
/* at order 0 we see each particular block */
- *max = 1 << (e4b->bd_blkbits + 3);
- if (order == 0)
+ if (order == 0) {
+ *max = 1 << (e4b->bd_blkbits + 3);
return EXT4_MB_BITMAP(e4b);
+ }
bb = EXT4_MB_BUDDY(e4b) + EXT4_SB(e4b->bd_sb)->s_mb_offsets[order];
*max = EXT4_SB(e4b->bd_sb)->s_mb_maxs[order];
@@ -616,7 +617,6 @@ static int __mb_check_buddy(struct ext4_buddy *e4b, char *file,
MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments);
grp = ext4_get_group_info(sb, e4b->bd_group);
- buddy = mb_find_buddy(e4b, 0, &max);
list_for_each(cur, &grp->bb_prealloc_list) {
ext4_group_t groupnr;
struct ext4_prealloc_space *pa;
@@ -635,7 +635,12 @@ static int __mb_check_buddy(struct ext4_buddy *e4b, char *file,
#define mb_check_buddy(e4b)
#endif
-/* FIXME!! need more doc */
+/*
+ * Divide blocks started from @first with length @len into
+ * smaller chunks with power of 2 blocks.
+ * Clear the bits in bitmap which the blocks of the chunk(s) covered,
+ * then increase bb_counters[] for corresponded chunk size.
+ */
static void ext4_mb_mark_free_simple(struct super_block *sb,
void *buddy, ext4_grpblk_t first, ext4_grpblk_t len,
struct ext4_group_info *grp)
@@ -2381,7 +2386,7 @@ static int ext4_mb_init_backend(struct super_block *sb)
/* An 8TB filesystem with 64-bit pointers requires a 4096 byte
* kmalloc. A 128kb malloc should suffice for a 256TB filesystem.
* So a two level scheme suffices for now. */
- sbi->s_group_info = kmalloc(array_size, GFP_KERNEL);
+ sbi->s_group_info = kzalloc(array_size, GFP_KERNEL);
if (sbi->s_group_info == NULL) {
printk(KERN_ERR "EXT4-fs: can't allocate buddy meta group\n");
return -ENOMEM;
@@ -3208,7 +3213,7 @@ ext4_mb_check_group_pa(ext4_fsblk_t goal_block,
cur_distance = abs(goal_block - cpa->pa_pstart);
new_distance = abs(goal_block - pa->pa_pstart);
- if (cur_distance < new_distance)
+ if (cur_distance <= new_distance)
return cpa;
/* drop the previous reference */
@@ -3907,7 +3912,8 @@ static void ext4_mb_show_ac(struct ext4_allocation_context *ac)
struct super_block *sb = ac->ac_sb;
ext4_group_t ngroups, i;
- if (EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED)
+ if (!mb_enable_debug ||
+ (EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED))
return;
printk(KERN_ERR "EXT4-fs: Can't allocate:"
@@ -4753,7 +4759,8 @@ static int ext4_trim_extent(struct super_block *sb, int start, int count,
* bitmap. Then issue a TRIM command on this extent and free the extent in
* the group buddy bitmap. This is done until whole group is scanned.
*/
-ext4_grpblk_t ext4_trim_all_free(struct super_block *sb, struct ext4_buddy *e4b,
+static ext4_grpblk_t
+ext4_trim_all_free(struct super_block *sb, struct ext4_buddy *e4b,
ext4_grpblk_t start, ext4_grpblk_t max, ext4_grpblk_t minblocks)
{
void *bitmap;
@@ -4863,10 +4870,15 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
break;
}
- if (len >= EXT4_BLOCKS_PER_GROUP(sb))
- len -= (EXT4_BLOCKS_PER_GROUP(sb) - first_block);
- else
+ /*
+ * For all the groups except the last one, last block will
+ * always be EXT4_BLOCKS_PER_GROUP(sb), so we only need to
+ * change it for the last group in which case start +
+ * len < EXT4_BLOCKS_PER_GROUP(sb).
+ */
+ if (first_block + len < EXT4_BLOCKS_PER_GROUP(sb))
last_block = first_block + len;
+ len -= last_block - first_block;
if (e4b.bd_info->bb_free >= minlen) {
cnt = ext4_trim_all_free(sb, &e4b, first_block,
diff --git a/fs/ext4/mballoc.h b/fs/ext4/mballoc.h
index b619322..22bd4d7 100644
--- a/fs/ext4/mballoc.h
+++ b/fs/ext4/mballoc.h
@@ -169,7 +169,7 @@ struct ext4_allocation_context {
/* original request */
struct ext4_free_extent ac_o_ex;
- /* goal request (after normalization) */
+ /* goal request (normalized ac_o_ex) */
struct ext4_free_extent ac_g_ex;
/* the best found extent */
diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c
index b0a126f..92816b4 100644
--- a/fs/ext4/migrate.c
+++ b/fs/ext4/migrate.c
@@ -263,7 +263,7 @@ static int free_dind_blocks(handle_t *handle,
for (i = 0; i < max_entries; i++) {
if (tmp_idata[i]) {
extend_credit_for_blkdel(handle, inode);
- ext4_free_blocks(handle, inode, 0,
+ ext4_free_blocks(handle, inode, NULL,
le32_to_cpu(tmp_idata[i]), 1,
EXT4_FREE_BLOCKS_METADATA |
EXT4_FREE_BLOCKS_FORGET);
@@ -271,7 +271,7 @@ static int free_dind_blocks(handle_t *handle,
}
put_bh(bh);
extend_credit_for_blkdel(handle, inode);
- ext4_free_blocks(handle, inode, 0, le32_to_cpu(i_data), 1,
+ ext4_free_blocks(handle, inode, NULL, le32_to_cpu(i_data), 1,
EXT4_FREE_BLOCKS_METADATA |
EXT4_FREE_BLOCKS_FORGET);
return 0;
@@ -302,7 +302,7 @@ static int free_tind_blocks(handle_t *handle,
}
put_bh(bh);
extend_credit_for_blkdel(handle, inode);
- ext4_free_blocks(handle, inode, 0, le32_to_cpu(i_data), 1,
+ ext4_free_blocks(handle, inode, NULL, le32_to_cpu(i_data), 1,
EXT4_FREE_BLOCKS_METADATA |
EXT4_FREE_BLOCKS_FORGET);
return 0;
@@ -315,7 +315,7 @@ static int free_ind_block(handle_t *handle, struct inode *inode, __le32 *i_data)
/* ei->i_data[EXT4_IND_BLOCK] */
if (i_data[0]) {
extend_credit_for_blkdel(handle, inode);
- ext4_free_blocks(handle, inode, 0,
+ ext4_free_blocks(handle, inode, NULL,
le32_to_cpu(i_data[0]), 1,
EXT4_FREE_BLOCKS_METADATA |
EXT4_FREE_BLOCKS_FORGET);
@@ -428,7 +428,7 @@ static int free_ext_idx(handle_t *handle, struct inode *inode,
}
put_bh(bh);
extend_credit_for_blkdel(handle, inode);
- ext4_free_blocks(handle, inode, 0, block, 1,
+ ext4_free_blocks(handle, inode, NULL, block, 1,
EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
return retval;
}
@@ -517,7 +517,7 @@ int ext4_ext_migrate(struct inode *inode)
* start with one credit accounted for
* superblock modification.
*
- * For the tmp_inode we already have commited the
+ * For the tmp_inode we already have committed the
* trascation that created the inode. Later as and
* when we add extents we extent the journal
*/
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index e781b7e..67fd0b0 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -40,6 +40,7 @@
#include "xattr.h"
#include "acl.h"
+#include <trace/events/ext4.h>
/*
* define how far ahead to read directories while searching them.
*/
@@ -2183,6 +2184,7 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry)
struct ext4_dir_entry_2 *de;
handle_t *handle;
+ trace_ext4_unlink_enter(dir, dentry);
/* Initialize quotas before so that eventual writes go
* in separate transaction */
dquot_initialize(dir);
@@ -2228,6 +2230,7 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry)
end_unlink:
ext4_journal_stop(handle);
brelse(bh);
+ trace_ext4_unlink_exit(dentry, retval);
return retval;
}
@@ -2402,6 +2405,10 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
if (!new_inode && new_dir != old_dir &&
EXT4_DIR_LINK_MAX(new_dir))
goto end_rename;
+ BUFFER_TRACE(dir_bh, "get_write_access");
+ retval = ext4_journal_get_write_access(handle, dir_bh);
+ if (retval)
+ goto end_rename;
}
if (!new_bh) {
retval = ext4_add_entry(handle, new_dentry, old_inode);
@@ -2409,7 +2416,9 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
goto end_rename;
} else {
BUFFER_TRACE(new_bh, "get write access");
- ext4_journal_get_write_access(handle, new_bh);
+ retval = ext4_journal_get_write_access(handle, new_bh);
+ if (retval)
+ goto end_rename;
new_de->inode = cpu_to_le32(old_inode->i_ino);
if (EXT4_HAS_INCOMPAT_FEATURE(new_dir->i_sb,
EXT4_FEATURE_INCOMPAT_FILETYPE))
@@ -2470,8 +2479,6 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
old_dir->i_ctime = old_dir->i_mtime = ext4_current_time(old_dir);
ext4_update_dx_flag(old_dir);
if (dir_bh) {
- BUFFER_TRACE(dir_bh, "get_write_access");
- ext4_journal_get_write_access(handle, dir_bh);
PARENT_INO(dir_bh->b_data, new_dir->i_sb->s_blocksize) =
cpu_to_le32(new_dir->i_ino);
BUFFER_TRACE(dir_bh, "call ext4_handle_dirty_metadata");
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index 955cc30..b6dbd05 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -259,6 +259,11 @@ static void ext4_end_bio(struct bio *bio, int error)
bi_sector >> (inode->i_blkbits - 9));
}
+ if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) {
+ ext4_free_io_end(io_end);
+ return;
+ }
+
/* Add the io_end to per-inode completed io list*/
spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags);
list_add_tail(&io_end->list, &EXT4_I(inode)->i_completed_io_list);
@@ -279,9 +284,9 @@ void ext4_io_submit(struct ext4_io_submit *io)
BUG_ON(bio_flagged(io->io_bio, BIO_EOPNOTSUPP));
bio_put(io->io_bio);
}
- io->io_bio = 0;
+ io->io_bio = NULL;
io->io_op = 0;
- io->io_end = 0;
+ io->io_end = NULL;
}
static int io_submit_init(struct ext4_io_submit *io,
@@ -310,8 +315,7 @@ static int io_submit_init(struct ext4_io_submit *io,
io_end->offset = (page->index << PAGE_CACHE_SHIFT) + bh_offset(bh);
io->io_bio = bio;
- io->io_op = (wbc->sync_mode == WB_SYNC_ALL ?
- WRITE_SYNC_PLUG : WRITE);
+ io->io_op = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE);
io->io_next_block = bh->b_blocknr;
return 0;
}
@@ -381,8 +385,6 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
BUG_ON(!PageLocked(page));
BUG_ON(PageWriteback(page));
- set_page_writeback(page);
- ClearPageError(page);
io_page = kmem_cache_alloc(io_page_cachep, GFP_NOFS);
if (!io_page) {
@@ -393,6 +395,8 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
io_page->p_page = page;
atomic_set(&io_page->p_count, 1);
get_page(page);
+ set_page_writeback(page);
+ ClearPageError(page);
for (bh = head = page_buffers(page), block_start = 0;
bh != head || !block_start;
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index 3ecc6e4..80bbc9c 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -230,7 +230,7 @@ static int setup_new_group_blocks(struct super_block *sb,
}
/* Zero out all of the reserved backup group descriptor table blocks */
- ext4_debug("clear inode table blocks %#04llx -> %#04llx\n",
+ ext4_debug("clear inode table blocks %#04llx -> %#04lx\n",
block, sbi->s_itb_per_group);
err = sb_issue_zeroout(sb, gdblocks + start + 1, reserved_gdb,
GFP_NOFS);
@@ -248,7 +248,7 @@ static int setup_new_group_blocks(struct super_block *sb,
/* Zero out all of the inode table blocks */
block = input->inode_table;
- ext4_debug("clear inode table blocks %#04llx -> %#04llx\n",
+ ext4_debug("clear inode table blocks %#04llx -> %#04lx\n",
block, sbi->s_itb_per_group);
err = sb_issue_zeroout(sb, block, sbi->s_itb_per_group, GFP_NOFS);
if (err)
@@ -499,12 +499,12 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
return err;
exit_inode:
- /* ext4_journal_release_buffer(handle, iloc.bh); */
+ /* ext4_handle_release_buffer(handle, iloc.bh); */
brelse(iloc.bh);
exit_dindj:
- /* ext4_journal_release_buffer(handle, dind); */
+ /* ext4_handle_release_buffer(handle, dind); */
exit_sbh:
- /* ext4_journal_release_buffer(handle, EXT4_SB(sb)->s_sbh); */
+ /* ext4_handle_release_buffer(handle, EXT4_SB(sb)->s_sbh); */
exit_dind:
brelse(dind);
exit_bh:
@@ -586,7 +586,7 @@ static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
/*
int j;
for (j = 0; j < i; j++)
- ext4_journal_release_buffer(handle, primary[j]);
+ ext4_handle_release_buffer(handle, primary[j]);
*/
goto exit_bh;
}
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 203f9e4..8553dfb 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -54,9 +54,9 @@
static struct proc_dir_entry *ext4_proc_root;
static struct kset *ext4_kset;
-struct ext4_lazy_init *ext4_li_info;
-struct mutex ext4_li_mtx;
-struct ext4_features *ext4_feat;
+static struct ext4_lazy_init *ext4_li_info;
+static struct mutex ext4_li_mtx;
+static struct ext4_features *ext4_feat;
static int ext4_load_journal(struct super_block *, struct ext4_super_block *,
unsigned long journal_devnum);
@@ -75,6 +75,7 @@ static void ext4_write_super(struct super_block *sb);
static int ext4_freeze(struct super_block *sb);
static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags,
const char *dev_name, void *data);
+static int ext4_feature_set_ok(struct super_block *sb, int readonly);
static void ext4_destroy_lazyinit_thread(void);
static void ext4_unregister_li_request(struct super_block *sb);
static void ext4_clear_request_list(void);
@@ -241,27 +242,44 @@ static void ext4_put_nojournal(handle_t *handle)
* journal_end calls result in the superblock being marked dirty, so
* that sync() will call the filesystem's write_super callback if
* appropriate.
+ *
+ * To avoid j_barrier hold in userspace when a user calls freeze(),
+ * ext4 prevents a new handle from being started by s_frozen, which
+ * is in an upper layer.
*/
handle_t *ext4_journal_start_sb(struct super_block *sb, int nblocks)
{
journal_t *journal;
+ handle_t *handle;
if (sb->s_flags & MS_RDONLY)
return ERR_PTR(-EROFS);
- vfs_check_frozen(sb, SB_FREEZE_TRANS);
- /* Special case here: if the journal has aborted behind our
- * backs (eg. EIO in the commit thread), then we still need to
- * take the FS itself readonly cleanly. */
journal = EXT4_SB(sb)->s_journal;
- if (journal) {
- if (is_journal_aborted(journal)) {
- ext4_abort(sb, "Detected aborted journal");
- return ERR_PTR(-EROFS);
- }
- return jbd2_journal_start(journal, nblocks);
+ handle = ext4_journal_current_handle();
+
+ /*
+ * If a handle has been started, it should be allowed to
+ * finish, otherwise deadlock could happen between freeze
+ * and others(e.g. truncate) due to the restart of the
+ * journal handle if the filesystem is forzen and active
+ * handles are not stopped.
+ */
+ if (!handle)
+ vfs_check_frozen(sb, SB_FREEZE_TRANS);
+
+ if (!journal)
+ return ext4_get_nojournal();
+ /*
+ * Special case here: if the journal has aborted behind our
+ * backs (eg. EIO in the commit thread), then we still need to
+ * take the FS itself readonly cleanly.
+ */
+ if (is_journal_aborted(journal)) {
+ ext4_abort(sb, "Detected aborted journal");
+ return ERR_PTR(-EROFS);
}
- return ext4_get_nojournal();
+ return jbd2_journal_start(journal, nblocks);
}
/*
@@ -594,7 +612,7 @@ __acquires(bitlock)
vaf.fmt = fmt;
vaf.va = &args;
- printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: group %u",
+ printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: group %u, ",
sb->s_id, function, line, grp);
if (ino)
printk(KERN_CONT "inode %lu: ", ino);
@@ -616,7 +634,7 @@ __acquires(bitlock)
* filesystem will have already been marked read/only and the
* journal has been aborted. We return 1 as a hint to callers
* who might what to use the return value from
- * ext4_grp_locked_error() to distinguish beween the
+ * ext4_grp_locked_error() to distinguish between the
* ERRORS_CONT and ERRORS_RO case, and perhaps return more
* aggressively from the ext4 function in question, with a
* more appropriate error code.
@@ -997,13 +1015,10 @@ static int ext4_show_options(struct seq_file *seq, struct vfsmount *vfs)
if (test_opt(sb, OLDALLOC))
seq_puts(seq, ",oldalloc");
#ifdef CONFIG_EXT4_FS_XATTR
- if (test_opt(sb, XATTR_USER) &&
- !(def_mount_opts & EXT4_DEFM_XATTR_USER))
+ if (test_opt(sb, XATTR_USER))
seq_puts(seq, ",user_xattr");
- if (!test_opt(sb, XATTR_USER) &&
- (def_mount_opts & EXT4_DEFM_XATTR_USER)) {
+ if (!test_opt(sb, XATTR_USER))
seq_puts(seq, ",nouser_xattr");
- }
#endif
#ifdef CONFIG_EXT4_FS_POSIX_ACL
if (test_opt(sb, POSIX_ACL) && !(def_mount_opts & EXT4_DEFM_ACL))
@@ -1041,8 +1056,8 @@ static int ext4_show_options(struct seq_file *seq, struct vfsmount *vfs)
!(def_mount_opts & EXT4_DEFM_NODELALLOC))
seq_puts(seq, ",nodelalloc");
- if (test_opt(sb, MBLK_IO_SUBMIT))
- seq_puts(seq, ",mblk_io_submit");
+ if (!test_opt(sb, MBLK_IO_SUBMIT))
+ seq_puts(seq, ",nomblk_io_submit");
if (sbi->s_stripe)
seq_printf(seq, ",stripe=%lu", sbi->s_stripe);
/*
@@ -1451,7 +1466,7 @@ static int parse_options(char *options, struct super_block *sb,
* Initialize args struct so we know whether arg was
* found; some options take optional arguments.
*/
- args[0].to = args[0].from = 0;
+ args[0].to = args[0].from = NULL;
token = match_token(p, tokens, args);
switch (token) {
case Opt_bsd_df:
@@ -1771,7 +1786,7 @@ set_qf_format:
return 0;
if (option < 0 || option > (1 << 30))
return 0;
- if (!is_power_of_2(option)) {
+ if (option && !is_power_of_2(option)) {
ext4_msg(sb, KERN_ERR,
"EXT4-fs: inode_readahead_blks"
" must be a power of 2");
@@ -2120,6 +2135,13 @@ static void ext4_orphan_cleanup(struct super_block *sb,
return;
}
+ /* Check if feature set would not allow a r/w mount */
+ if (!ext4_feature_set_ok(sb, 0)) {
+ ext4_msg(sb, KERN_INFO, "Skipping orphan cleanup due to "
+ "unknown ROCOMPAT features");
+ return;
+ }
+
if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
if (es->s_last_orphan)
jbd_debug(1, "Errors on filesystem, "
@@ -2412,7 +2434,7 @@ static ssize_t inode_readahead_blks_store(struct ext4_attr *a,
if (parse_strtoul(buf, 0x40000000, &t))
return -EINVAL;
- if (!is_power_of_2(t))
+ if (t && !is_power_of_2(t))
return -EINVAL;
sbi->s_inode_readahead_blks = t;
@@ -2970,6 +2992,12 @@ static int ext4_register_li_request(struct super_block *sb,
mutex_unlock(&ext4_li_info->li_list_mtx);
sbi->s_li_request = elr;
+ /*
+ * set elr to NULL here since it has been inserted to
+ * the request_list and the removal and free of it is
+ * handled by ext4_clear_request_list from now on.
+ */
+ elr = NULL;
if (!(ext4_li_info->li_state & EXT4_LAZYINIT_RUNNING)) {
ret = ext4_run_lazyinit_thread();
@@ -3095,14 +3123,14 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
}
if (def_mount_opts & EXT4_DEFM_UID16)
set_opt(sb, NO_UID32);
+ /* xattr user namespace & acls are now defaulted on */
#ifdef CONFIG_EXT4_FS_XATTR
- if (def_mount_opts & EXT4_DEFM_XATTR_USER)
- set_opt(sb, XATTR_USER);
+ set_opt(sb, XATTR_USER);
#endif
#ifdef CONFIG_EXT4_FS_POSIX_ACL
- if (def_mount_opts & EXT4_DEFM_ACL)
- set_opt(sb, POSIX_ACL);
+ set_opt(sb, POSIX_ACL);
#endif
+ set_opt(sb, MBLK_IO_SUBMIT);
if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA)
set_opt(sb, JOURNAL_DATA);
else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_ORDERED)
@@ -3380,6 +3408,10 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
get_random_bytes(&sbi->s_next_generation, sizeof(u32));
spin_lock_init(&sbi->s_next_gen_lock);
+ init_timer(&sbi->s_err_report);
+ sbi->s_err_report.function = print_daily_error_info;
+ sbi->s_err_report.data = (unsigned long) sb;
+
err = percpu_counter_init(&sbi->s_freeblocks_counter,
ext4_count_free_blocks(sb));
if (!err) {
@@ -3516,7 +3548,7 @@ no_journal:
* concurrency isn't really necessary. Limit it to 1.
*/
EXT4_SB(sb)->dio_unwritten_wq =
- alloc_workqueue("ext4-dio-unwritten", WQ_MEM_RECLAIM, 1);
+ alloc_workqueue("ext4-dio-unwritten", WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
if (!EXT4_SB(sb)->dio_unwritten_wq) {
printk(KERN_ERR "EXT4-fs: failed to create DIO workqueue\n");
goto failed_mount_wq;
@@ -3531,17 +3563,16 @@ no_journal:
if (IS_ERR(root)) {
ext4_msg(sb, KERN_ERR, "get root inode failed");
ret = PTR_ERR(root);
+ root = NULL;
goto failed_mount4;
}
if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
- iput(root);
ext4_msg(sb, KERN_ERR, "corrupt root inode, run e2fsck");
goto failed_mount4;
}
sb->s_root = d_alloc_root(root);
if (!sb->s_root) {
ext4_msg(sb, KERN_ERR, "get root dentry failed");
- iput(root);
ret = -ENOMEM;
goto failed_mount4;
}
@@ -3642,9 +3673,6 @@ no_journal:
"Opts: %s%s%s", descr, sbi->s_es->s_mount_opts,
*sbi->s_es->s_mount_opts ? "; " : "", orig_data);
- init_timer(&sbi->s_err_report);
- sbi->s_err_report.function = print_daily_error_info;
- sbi->s_err_report.data = (unsigned long) sb;
if (es->s_error_count)
mod_timer(&sbi->s_err_report, jiffies + 300*HZ); /* 5 minutes */
@@ -3657,6 +3685,8 @@ cantfind_ext4:
goto failed_mount;
failed_mount4:
+ iput(root);
+ sb->s_root = NULL;
ext4_msg(sb, KERN_ERR, "mount failed");
destroy_workqueue(EXT4_SB(sb)->dio_unwritten_wq);
failed_mount_wq:
@@ -3666,6 +3696,7 @@ failed_mount_wq:
sbi->s_journal = NULL;
}
failed_mount3:
+ del_timer(&sbi->s_err_report);
if (sbi->s_flex_groups) {
if (is_vmalloc_addr(sbi->s_flex_groups))
vfree(sbi->s_flex_groups);
@@ -4132,6 +4163,11 @@ static int ext4_sync_fs(struct super_block *sb, int wait)
/*
* LVM calls this function before a (read-only) snapshot is created. This
* gives us a chance to flush the journal completely and mark the fs clean.
+ *
+ * Note that only this function cannot bring a filesystem to be in a clean
+ * state independently, because ext4 prevents a new handle from being started
+ * by @sb->s_frozen, which stays in an upper layer. It thus needs help from
+ * the upper layer.
*/
static int ext4_freeze(struct super_block *sb)
{
@@ -4608,17 +4644,30 @@ static int ext4_quota_on(struct super_block *sb, int type, int format_id,
static int ext4_quota_off(struct super_block *sb, int type)
{
+ struct inode *inode = sb_dqopt(sb)->files[type];
+ handle_t *handle;
+
/* Force all delayed allocation blocks to be allocated.
* Caller already holds s_umount sem */
if (test_opt(sb, DELALLOC))
sync_filesystem(sb);
+ /* Update modification times of quota files when userspace can
+ * start looking at them */
+ handle = ext4_journal_start(inode, 1);
+ if (IS_ERR(handle))
+ goto out;
+ inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+ ext4_mark_inode_dirty(handle, inode);
+ ext4_journal_stop(handle);
+
+out:
return dquot_quota_off(sb, type);
}
/* Read data from quotafile - avoid pagecache and such because we cannot afford
* acquiring the locks... As quota files are never truncated and quota code
- * itself serializes the operations (and noone else should touch the files)
+ * itself serializes the operations (and no one else should touch the files)
* we don't have to be afraid of races */
static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
size_t len, loff_t off)
@@ -4708,9 +4757,8 @@ out:
if (inode->i_size < off + len) {
i_size_write(inode, off + len);
EXT4_I(inode)->i_disksize = inode->i_size;
+ ext4_mark_inode_dirty(handle, inode);
}
- inode->i_mtime = inode->i_ctime = CURRENT_TIME;
- ext4_mark_inode_dirty(handle, inode);
mutex_unlock(&inode->i_mutex);
return len;
}
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index fc32176..b545ca1 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -735,7 +735,7 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
int offset = (char *)s->here - bs->bh->b_data;
unlock_buffer(bs->bh);
- jbd2_journal_release_buffer(handle, bs->bh);
+ ext4_handle_release_buffer(handle, bs->bh);
if (ce) {
mb_cache_entry_release(ce);
ce = NULL;
@@ -833,7 +833,7 @@ inserted:
new_bh = sb_getblk(sb, block);
if (!new_bh) {
getblk_failed:
- ext4_free_blocks(handle, inode, 0, block, 1,
+ ext4_free_blocks(handle, inode, NULL, block, 1,
EXT4_FREE_BLOCKS_METADATA);
error = -EIO;
goto cleanup;
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 0e277ec..8d68690 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -236,7 +236,6 @@ static const struct address_space_operations fat_aops = {
.readpages = fat_readpages,
.writepage = fat_writepage,
.writepages = fat_writepages,
- .sync_page = block_sync_page,
.write_begin = fat_write_begin,
.write_end = fat_write_end,
.direct_IO = fat_direct_IO,
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 6c82e5b..22764c7 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -159,7 +159,7 @@ static int setfl(int fd, struct file * filp, unsigned long arg)
/* O_NOATIME can only be set by the owner or superuser */
if ((arg & O_NOATIME) && !(filp->f_flags & O_NOATIME))
- if (!is_owner_or_cap(inode))
+ if (!inode_owner_or_capable(inode))
return -EPERM;
/* required for strict SunOS emulation */
diff --git a/fs/fhandle.c b/fs/fhandle.c
index bf93ad2..6b08864 100644
--- a/fs/fhandle.c
+++ b/fs/fhandle.c
@@ -7,6 +7,7 @@
#include <linux/exportfs.h>
#include <linux/fs_struct.h>
#include <linux/fsnotify.h>
+#include <linux/personality.h>
#include <asm/uaccess.h>
#include "internal.h"
diff --git a/fs/fifo.c b/fs/fifo.c
index 4e303c2..b1a524d 100644
--- a/fs/fifo.c
+++ b/fs/fifo.c
@@ -66,8 +66,7 @@ static int fifo_open(struct inode *inode, struct file *filp)
/* suppress POLLHUP until we have
* seen a writer */
filp->f_version = pipe->w_counter;
- } else
- {
+ } else {
wait_for_partner(inode, &pipe->w_counter);
if(signal_pending(current))
goto err_rd;
diff --git a/fs/filesystems.c b/fs/filesystems.c
index 751d6b2..0845f84 100644
--- a/fs/filesystems.c
+++ b/fs/filesystems.c
@@ -110,14 +110,13 @@ int unregister_filesystem(struct file_system_type * fs)
*tmp = fs->next;
fs->next = NULL;
write_unlock(&file_systems_lock);
+ synchronize_rcu();
return 0;
}
tmp = &(*tmp)->next;
}
write_unlock(&file_systems_lock);
- synchronize_rcu();
-
return -EINVAL;
}
diff --git a/fs/freevxfs/vxfs_fshead.c b/fs/freevxfs/vxfs_fshead.c
index 78948b4..c9a6a94 100644
--- a/fs/freevxfs/vxfs_fshead.c
+++ b/fs/freevxfs/vxfs_fshead.c
@@ -164,7 +164,7 @@ vxfs_read_fshead(struct super_block *sbp)
goto out_free_pfp;
}
if (!VXFS_ISILT(VXFS_INO(infp->vsi_stilist))) {
- printk(KERN_ERR "vxfs: structual list inode is of wrong type (%x)\n",
+ printk(KERN_ERR "vxfs: structural list inode is of wrong type (%x)\n",
VXFS_INO(infp->vsi_stilist)->vii_mode & VXFS_TYPE_MASK);
goto out_iput_stilist;
}
diff --git a/fs/freevxfs/vxfs_lookup.c b/fs/freevxfs/vxfs_lookup.c
index 6c5131d..3360f1e 100644
--- a/fs/freevxfs/vxfs_lookup.c
+++ b/fs/freevxfs/vxfs_lookup.c
@@ -162,7 +162,7 @@ vxfs_find_entry(struct inode *ip, struct dentry *dp, struct page **ppp)
/**
* vxfs_inode_by_name - find inode number for dentry
* @dip: directory to search in
- * @dp: dentry we seach for
+ * @dp: dentry we search for
*
* Description:
* vxfs_inode_by_name finds out the inode number of
diff --git a/fs/freevxfs/vxfs_olt.h b/fs/freevxfs/vxfs_olt.h
index d832429..b7b3af5 100644
--- a/fs/freevxfs/vxfs_olt.h
+++ b/fs/freevxfs/vxfs_olt.h
@@ -60,7 +60,7 @@ enum {
*
* The Object Location Table header is placed at the beginning of each
* OLT extent. It is used to fing certain filesystem-wide metadata, e.g.
- * the inital inode list, the fileset header or the device configuration.
+ * the initial inode list, the fileset header or the device configuration.
*/
struct vxfs_olt {
u_int32_t olt_magic; /* magic number */
diff --git a/fs/freevxfs/vxfs_subr.c b/fs/freevxfs/vxfs_subr.c
index 1429f3ae..5d318c4 100644
--- a/fs/freevxfs/vxfs_subr.c
+++ b/fs/freevxfs/vxfs_subr.c
@@ -44,7 +44,6 @@ static sector_t vxfs_bmap(struct address_space *, sector_t);
const struct address_space_operations vxfs_aops = {
.readpage = vxfs_readpage,
.bmap = vxfs_bmap,
- .sync_page = block_sync_page,
};
inline void
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 59c6e49..34591ee8 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -144,7 +144,7 @@ __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
*
* Description:
* This does WB_SYNC_NONE opportunistic writeback. The IO is only
- * started when this function returns, we make no guarentees on
+ * started when this function returns, we make no guarantees on
* completion. Caller need not hold sb s_umount semaphore.
*
*/
@@ -176,6 +176,17 @@ void bdi_start_background_writeback(struct backing_dev_info *bdi)
}
/*
+ * Remove the inode from the writeback list it is on.
+ */
+void inode_wb_list_del(struct inode *inode)
+{
+ spin_lock(&inode_wb_list_lock);
+ list_del_init(&inode->i_wb_list);
+ spin_unlock(&inode_wb_list_lock);
+}
+
+
+/*
* Redirty an inode: set its when-it-was dirtied timestamp and move it to the
* furthest end of its superblock's dirty-inode list.
*
@@ -188,6 +199,7 @@ static void redirty_tail(struct inode *inode)
{
struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
+ assert_spin_locked(&inode_wb_list_lock);
if (!list_empty(&wb->b_dirty)) {
struct inode *tail;
@@ -205,14 +217,17 @@ static void requeue_io(struct inode *inode)
{
struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
+ assert_spin_locked(&inode_wb_list_lock);
list_move(&inode->i_wb_list, &wb->b_more_io);
}
static void inode_sync_complete(struct inode *inode)
{
/*
- * Prevent speculative execution through spin_unlock(&inode_lock);
+ * Prevent speculative execution through
+ * spin_unlock(&inode_wb_list_lock);
*/
+
smp_mb();
wake_up_bit(&inode->i_state, __I_SYNC);
}
@@ -286,6 +301,7 @@ static void move_expired_inodes(struct list_head *delaying_queue,
*/
static void queue_io(struct bdi_writeback *wb, unsigned long *older_than_this)
{
+ assert_spin_locked(&inode_wb_list_lock);
list_splice_init(&wb->b_more_io, &wb->b_io);
move_expired_inodes(&wb->b_dirty, &wb->b_io, older_than_this);
}
@@ -306,25 +322,25 @@ static void inode_wait_for_writeback(struct inode *inode)
wait_queue_head_t *wqh;
wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
- while (inode->i_state & I_SYNC) {
- spin_unlock(&inode_lock);
+ while (inode->i_state & I_SYNC) {
+ spin_unlock(&inode->i_lock);
+ spin_unlock(&inode_wb_list_lock);
__wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE);
- spin_lock(&inode_lock);
+ spin_lock(&inode_wb_list_lock);
+ spin_lock(&inode->i_lock);
}
}
/*
- * Write out an inode's dirty pages. Called under inode_lock. Either the
- * caller has ref on the inode (either via __iget or via syscall against an fd)
- * or the inode has I_WILL_FREE set (via generic_forget_inode)
+ * Write out an inode's dirty pages. Called under inode_wb_list_lock and
+ * inode->i_lock. Either the caller has an active reference on the inode or
+ * the inode has I_WILL_FREE set.
*
* If `wait' is set, wait on the writeout.
*
* The whole writeout design is quite complex and fragile. We want to avoid
* starvation of particular inodes when others are being redirtied, prevent
* livelocks, etc.
- *
- * Called under inode_lock.
*/
static int
writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
@@ -333,6 +349,9 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
unsigned dirty;
int ret;
+ assert_spin_locked(&inode_wb_list_lock);
+ assert_spin_locked(&inode->i_lock);
+
if (!atomic_read(&inode->i_count))
WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
else
@@ -363,7 +382,8 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
/* Set I_SYNC, reset I_DIRTY_PAGES */
inode->i_state |= I_SYNC;
inode->i_state &= ~I_DIRTY_PAGES;
- spin_unlock(&inode_lock);
+ spin_unlock(&inode->i_lock);
+ spin_unlock(&inode_wb_list_lock);
ret = do_writepages(mapping, wbc);
@@ -383,10 +403,10 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
* due to delalloc, clear dirty metadata flags right before
* write_inode()
*/
- spin_lock(&inode_lock);
+ spin_lock(&inode->i_lock);
dirty = inode->i_state & I_DIRTY;
inode->i_state &= ~(I_DIRTY_SYNC | I_DIRTY_DATASYNC);
- spin_unlock(&inode_lock);
+ spin_unlock(&inode->i_lock);
/* Don't write the inode if only I_DIRTY_PAGES was set */
if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
int err = write_inode(inode, wbc);
@@ -394,7 +414,8 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
ret = err;
}
- spin_lock(&inode_lock);
+ spin_lock(&inode_wb_list_lock);
+ spin_lock(&inode->i_lock);
inode->i_state &= ~I_SYNC;
if (!(inode->i_state & I_FREEING)) {
if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
@@ -506,7 +527,9 @@ static int writeback_sb_inodes(struct super_block *sb, struct bdi_writeback *wb,
* kind does not need peridic writeout yet, and for the latter
* kind writeout is handled by the freer.
*/
+ spin_lock(&inode->i_lock);
if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
+ spin_unlock(&inode->i_lock);
requeue_io(inode);
continue;
}
@@ -515,10 +538,13 @@ static int writeback_sb_inodes(struct super_block *sb, struct bdi_writeback *wb,
* Was this inode dirtied after sync_sb_inodes was called?
* This keeps sync from extra jobs and livelock.
*/
- if (inode_dirtied_after(inode, wbc->wb_start))
+ if (inode_dirtied_after(inode, wbc->wb_start)) {
+ spin_unlock(&inode->i_lock);
return 1;
+ }
__iget(inode);
+
pages_skipped = wbc->pages_skipped;
writeback_single_inode(inode, wbc);
if (wbc->pages_skipped != pages_skipped) {
@@ -528,10 +554,11 @@ static int writeback_sb_inodes(struct super_block *sb, struct bdi_writeback *wb,
*/
redirty_tail(inode);
}
- spin_unlock(&inode_lock);
+ spin_unlock(&inode->i_lock);
+ spin_unlock(&inode_wb_list_lock);
iput(inode);
cond_resched();
- spin_lock(&inode_lock);
+ spin_lock(&inode_wb_list_lock);
if (wbc->nr_to_write <= 0) {
wbc->more_io = 1;
return 1;
@@ -550,7 +577,7 @@ void writeback_inodes_wb(struct bdi_writeback *wb,
if (!wbc->wb_start)
wbc->wb_start = jiffies; /* livelock avoidance */
- spin_lock(&inode_lock);
+ spin_lock(&inode_wb_list_lock);
if (!wbc->for_kupdate || list_empty(&wb->b_io))
queue_io(wb, wbc->older_than_this);
@@ -568,7 +595,7 @@ void writeback_inodes_wb(struct bdi_writeback *wb,
if (ret)
break;
}
- spin_unlock(&inode_lock);
+ spin_unlock(&inode_wb_list_lock);
/* Leave any unwritten inodes on b_io */
}
@@ -577,11 +604,11 @@ static void __writeback_inodes_sb(struct super_block *sb,
{
WARN_ON(!rwsem_is_locked(&sb->s_umount));
- spin_lock(&inode_lock);
+ spin_lock(&inode_wb_list_lock);
if (!wbc->for_kupdate || list_empty(&wb->b_io))
queue_io(wb, wbc->older_than_this);
writeback_sb_inodes(sb, wb, wbc, true);
- spin_unlock(&inode_lock);
+ spin_unlock(&inode_wb_list_lock);
}
/*
@@ -720,13 +747,15 @@ static long wb_writeback(struct bdi_writeback *wb,
* become available for writeback. Otherwise
* we'll just busyloop.
*/
- spin_lock(&inode_lock);
+ spin_lock(&inode_wb_list_lock);
if (!list_empty(&wb->b_more_io)) {
inode = wb_inode(wb->b_more_io.prev);
trace_wbc_writeback_wait(&wbc, wb->bdi);
+ spin_lock(&inode->i_lock);
inode_wait_for_writeback(inode);
+ spin_unlock(&inode->i_lock);
}
- spin_unlock(&inode_lock);
+ spin_unlock(&inode_wb_list_lock);
}
return wrote;
@@ -992,7 +1021,6 @@ void __mark_inode_dirty(struct inode *inode, int flags)
{
struct super_block *sb = inode->i_sb;
struct backing_dev_info *bdi = NULL;
- bool wakeup_bdi = false;
/*
* Don't do this for I_DIRTY_PAGES - that doesn't actually
@@ -1016,7 +1044,7 @@ void __mark_inode_dirty(struct inode *inode, int flags)
if (unlikely(block_dump))
block_dump___mark_inode_dirty(inode);
- spin_lock(&inode_lock);
+ spin_lock(&inode->i_lock);
if ((inode->i_state & flags) != flags) {
const int was_dirty = inode->i_state & I_DIRTY;
@@ -1028,7 +1056,7 @@ void __mark_inode_dirty(struct inode *inode, int flags)
* superblock list, based upon its state.
*/
if (inode->i_state & I_SYNC)
- goto out;
+ goto out_unlock_inode;
/*
* Only add valid (hashed) inodes to the superblock's
@@ -1036,16 +1064,17 @@ void __mark_inode_dirty(struct inode *inode, int flags)
*/
if (!S_ISBLK(inode->i_mode)) {
if (inode_unhashed(inode))
- goto out;
+ goto out_unlock_inode;
}
if (inode->i_state & I_FREEING)
- goto out;
+ goto out_unlock_inode;
/*
* If the inode was already on b_dirty/b_io/b_more_io, don't
* reposition it (that would break b_dirty time-ordering).
*/
if (!was_dirty) {
+ bool wakeup_bdi = false;
bdi = inode_to_bdi(inode);
if (bdi_cap_writeback_dirty(bdi)) {
@@ -1062,15 +1091,20 @@ void __mark_inode_dirty(struct inode *inode, int flags)
wakeup_bdi = true;
}
+ spin_unlock(&inode->i_lock);
+ spin_lock(&inode_wb_list_lock);
inode->dirtied_when = jiffies;
list_move(&inode->i_wb_list, &bdi->wb.b_dirty);
+ spin_unlock(&inode_wb_list_lock);
+
+ if (wakeup_bdi)
+ bdi_wakeup_thread_delayed(bdi);
+ return;
}
}
-out:
- spin_unlock(&inode_lock);
+out_unlock_inode:
+ spin_unlock(&inode->i_lock);
- if (wakeup_bdi)
- bdi_wakeup_thread_delayed(bdi);
}
EXPORT_SYMBOL(__mark_inode_dirty);
@@ -1101,7 +1135,7 @@ static void wait_sb_inodes(struct super_block *sb)
*/
WARN_ON(!rwsem_is_locked(&sb->s_umount));
- spin_lock(&inode_lock);
+ spin_lock(&inode_sb_list_lock);
/*
* Data integrity sync. Must wait for all pages under writeback,
@@ -1111,22 +1145,25 @@ static void wait_sb_inodes(struct super_block *sb)
* we still have to wait for that writeout.
*/
list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
- struct address_space *mapping;
+ struct address_space *mapping = inode->i_mapping;
- if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW))
- continue;
- mapping = inode->i_mapping;
- if (mapping->nrpages == 0)
+ spin_lock(&inode->i_lock);
+ if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
+ (mapping->nrpages == 0)) {
+ spin_unlock(&inode->i_lock);
continue;
+ }
__iget(inode);
- spin_unlock(&inode_lock);
+ spin_unlock(&inode->i_lock);
+ spin_unlock(&inode_sb_list_lock);
+
/*
- * We hold a reference to 'inode' so it couldn't have
- * been removed from s_inodes list while we dropped the
- * inode_lock. We cannot iput the inode now as we can
- * be holding the last reference and we cannot iput it
- * under inode_lock. So we keep the reference and iput
- * it later.
+ * We hold a reference to 'inode' so it couldn't have been
+ * removed from s_inodes list while we dropped the
+ * inode_sb_list_lock. We cannot iput the inode now as we can
+ * be holding the last reference and we cannot iput it under
+ * inode_sb_list_lock. So we keep the reference and iput it
+ * later.
*/
iput(old_inode);
old_inode = inode;
@@ -1135,9 +1172,9 @@ static void wait_sb_inodes(struct super_block *sb)
cond_resched();
- spin_lock(&inode_lock);
+ spin_lock(&inode_sb_list_lock);
}
- spin_unlock(&inode_lock);
+ spin_unlock(&inode_sb_list_lock);
iput(old_inode);
}
@@ -1271,9 +1308,11 @@ int write_inode_now(struct inode *inode, int sync)
wbc.nr_to_write = 0;
might_sleep();
- spin_lock(&inode_lock);
+ spin_lock(&inode_wb_list_lock);
+ spin_lock(&inode->i_lock);
ret = writeback_single_inode(inode, &wbc);
- spin_unlock(&inode_lock);
+ spin_unlock(&inode->i_lock);
+ spin_unlock(&inode_wb_list_lock);
if (sync)
inode_sync_wait(inode);
return ret;
@@ -1295,9 +1334,11 @@ int sync_inode(struct inode *inode, struct writeback_control *wbc)
{
int ret;
- spin_lock(&inode_lock);
+ spin_lock(&inode_wb_list_lock);
+ spin_lock(&inode->i_lock);
ret = writeback_single_inode(inode, wbc);
- spin_unlock(&inode_lock);
+ spin_unlock(&inode->i_lock);
+ spin_unlock(&inode_wb_list_lock);
return ret;
}
EXPORT_SYMBOL(sync_inode);
diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
index 7c39b885..b6cca47 100644
--- a/fs/fuse/cuse.c
+++ b/fs/fuse/cuse.c
@@ -305,7 +305,7 @@ static void cuse_gendev_release(struct device *dev)
static void cuse_process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
{
struct cuse_conn *cc = fc_to_cc(fc);
- struct cuse_init_out *arg = &req->misc.cuse_init_out;
+ struct cuse_init_out *arg = req->out.args[0].value;
struct page *page = req->pages[0];
struct cuse_devinfo devinfo = { };
struct device *dev;
@@ -384,6 +384,7 @@ static void cuse_process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
dev_set_uevent_suppress(dev, 0);
kobject_uevent(&dev->kobj, KOBJ_ADD);
out:
+ kfree(arg);
__free_page(page);
return;
@@ -405,6 +406,7 @@ static int cuse_send_init(struct cuse_conn *cc)
struct page *page;
struct fuse_conn *fc = &cc->fc;
struct cuse_init_in *arg;
+ void *outarg;
BUILD_BUG_ON(CUSE_INIT_INFO_MAX > PAGE_SIZE);
@@ -419,6 +421,10 @@ static int cuse_send_init(struct cuse_conn *cc)
if (!page)
goto err_put_req;
+ outarg = kzalloc(sizeof(struct cuse_init_out), GFP_KERNEL);
+ if (!outarg)
+ goto err_free_page;
+
arg = &req->misc.cuse_init_in;
arg->major = FUSE_KERNEL_VERSION;
arg->minor = FUSE_KERNEL_MINOR_VERSION;
@@ -429,7 +435,7 @@ static int cuse_send_init(struct cuse_conn *cc)
req->in.args[0].value = arg;
req->out.numargs = 2;
req->out.args[0].size = sizeof(struct cuse_init_out);
- req->out.args[0].value = &req->misc.cuse_init_out;
+ req->out.args[0].value = outarg;
req->out.args[1].size = CUSE_INIT_INFO_MAX;
req->out.argvar = 1;
req->out.argpages = 1;
@@ -440,6 +446,8 @@ static int cuse_send_init(struct cuse_conn *cc)
return 0;
+err_free_page:
+ __free_page(page);
err_put_req:
fuse_put_request(fc, req);
err:
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index cf8d28d..640fc22 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -737,14 +737,12 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
if (WARN_ON(PageMlocked(oldpage)))
goto out_fallback_unlock;
- remove_from_page_cache(oldpage);
- page_cache_release(oldpage);
-
- err = add_to_page_cache_locked(newpage, mapping, index, GFP_KERNEL);
+ err = replace_page_cache_page(oldpage, newpage, GFP_KERNEL);
if (err) {
- printk(KERN_WARNING "fuse_try_move_page: failed to add page");
- goto out_fallback_unlock;
+ unlock_page(newpage);
+ return err;
}
+
page_cache_get(newpage);
if (!(buf->flags & PIPE_BUF_FLAG_LRU))
@@ -1910,6 +1908,21 @@ __acquires(fc->lock)
kfree(dequeue_forget(fc, 1, NULL));
}
+static void end_polls(struct fuse_conn *fc)
+{
+ struct rb_node *p;
+
+ p = rb_first(&fc->polled_files);
+
+ while (p) {
+ struct fuse_file *ff;
+ ff = rb_entry(p, struct fuse_file, polled_node);
+ wake_up_interruptible_all(&ff->poll_wait);
+
+ p = rb_next(p);
+ }
+}
+
/*
* Abort all requests.
*
@@ -1937,6 +1950,7 @@ void fuse_abort_conn(struct fuse_conn *fc)
fc->blocked = 0;
end_io_requests(fc);
end_queued_requests(fc);
+ end_polls(fc);
wake_up_all(&fc->waitq);
wake_up_all(&fc->blocked_waitq);
kill_fasync(&fc->fasync, SIGIO, POLL_IN);
@@ -1953,6 +1967,7 @@ int fuse_dev_release(struct inode *inode, struct file *file)
fc->connected = 0;
fc->blocked = 0;
end_queued_requests(fc);
+ end_polls(fc);
wake_up_all(&fc->blocked_waitq);
spin_unlock(&fc->lock);
fuse_conn_put(fc);
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 8bd0ef9..c6ba49b 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -158,10 +158,7 @@ static int fuse_dentry_revalidate(struct dentry *entry, struct nameidata *nd)
{
struct inode *inode;
- if (nd && nd->flags & LOOKUP_RCU)
- return -ECHILD;
-
- inode = entry->d_inode;
+ inode = ACCESS_ONCE(entry->d_inode);
if (inode && is_bad_inode(inode))
return 0;
else if (fuse_dentry_time(entry) < get_jiffies_64()) {
@@ -177,6 +174,9 @@ static int fuse_dentry_revalidate(struct dentry *entry, struct nameidata *nd)
if (!inode)
return 0;
+ if (nd->flags & LOOKUP_RCU)
+ return -ECHILD;
+
fc = get_fuse_conn(inode);
req = fuse_get_req(fc);
if (IS_ERR(req))
@@ -970,6 +970,14 @@ static int fuse_access(struct inode *inode, int mask)
return err;
}
+static int fuse_perm_getattr(struct inode *inode, int flags)
+{
+ if (flags & IPERM_FLAG_RCU)
+ return -ECHILD;
+
+ return fuse_do_getattr(inode, NULL, NULL);
+}
+
/*
* Check permission. The two basic access models of FUSE are:
*
@@ -989,9 +997,6 @@ static int fuse_permission(struct inode *inode, int mask, unsigned int flags)
bool refreshed = false;
int err = 0;
- if (flags & IPERM_FLAG_RCU)
- return -ECHILD;
-
if (!fuse_allow_task(fc, current))
return -EACCES;
@@ -1000,9 +1005,15 @@ static int fuse_permission(struct inode *inode, int mask, unsigned int flags)
*/
if ((fc->flags & FUSE_DEFAULT_PERMISSIONS) ||
((mask & MAY_EXEC) && S_ISREG(inode->i_mode))) {
- err = fuse_update_attributes(inode, NULL, NULL, &refreshed);
- if (err)
- return err;
+ struct fuse_inode *fi = get_fuse_inode(inode);
+
+ if (fi->i_time < get_jiffies_64()) {
+ refreshed = true;
+
+ err = fuse_perm_getattr(inode, flags);
+ if (err)
+ return err;
+ }
}
if (fc->flags & FUSE_DEFAULT_PERMISSIONS) {
@@ -1012,7 +1023,7 @@ static int fuse_permission(struct inode *inode, int mask, unsigned int flags)
attributes. This is also needed, because the root
node will at first have no permissions */
if (err == -EACCES && !refreshed) {
- err = fuse_do_getattr(inode, NULL, NULL);
+ err = fuse_perm_getattr(inode, flags);
if (!err)
err = generic_permission(inode, mask,
flags, NULL);
@@ -1023,13 +1034,16 @@ static int fuse_permission(struct inode *inode, int mask, unsigned int flags)
noticed immediately, only after the attribute
timeout has expired */
} else if (mask & (MAY_ACCESS | MAY_CHDIR)) {
+ if (flags & IPERM_FLAG_RCU)
+ return -ECHILD;
+
err = fuse_access(inode, mask);
} else if ((mask & MAY_EXEC) && S_ISREG(inode->i_mode)) {
if (!(inode->i_mode & S_IXUGO)) {
if (refreshed)
return -EACCES;
- err = fuse_do_getattr(inode, NULL, NULL);
+ err = fuse_perm_getattr(inode, flags);
if (!err && !(inode->i_mode & S_IXUGO))
return -EACCES;
}
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 9e0832d..82a6646 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -222,7 +222,7 @@ static void fuse_prepare_release(struct fuse_file *ff, int flags, int opcode)
rb_erase(&ff->polled_node, &fc->polled_files);
spin_unlock(&fc->lock);
- wake_up_interruptible_sync(&ff->poll_wait);
+ wake_up_interruptible_all(&ff->poll_wait);
inarg->fh = ff->fh;
inarg->flags = flags;
@@ -523,7 +523,7 @@ static int fuse_readpage(struct file *file, struct page *page)
goto out;
/*
- * Page writeback can extend beyond the liftime of the
+ * Page writeback can extend beyond the lifetime of the
* page-cache page, so make sure we read a properly synced
* page.
*/
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index d428694..b788bec 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -272,7 +272,6 @@ struct fuse_req {
struct fuse_init_in init_in;
struct fuse_init_out init_out;
struct cuse_init_in cuse_init_in;
- struct cuse_init_out cuse_init_out;
struct {
struct fuse_read_in in;
u64 attr_ver;
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 051b1a0..cc6ec4b 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -870,7 +870,6 @@ static int fuse_bdi_init(struct fuse_conn *fc, struct super_block *sb)
fc->bdi.name = "fuse";
fc->bdi.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
- fc->bdi.unplug_io_fn = default_unplug_io_fn;
/* fuse does it's own writeback accounting */
fc->bdi.capabilities = BDI_CAP_NO_ACCT_WB;
diff --git a/fs/generic_acl.c b/fs/generic_acl.c
index 06c48a8..8f26d1a 100644
--- a/fs/generic_acl.c
+++ b/fs/generic_acl.c
@@ -74,7 +74,7 @@ generic_acl_set(struct dentry *dentry, const char *name, const void *value,
return -EINVAL;
if (S_ISLNK(inode->i_mode))
return -EOPNOTSUPP;
- if (!is_owner_or_cap(inode))
+ if (!inode_owner_or_capable(inode))
return -EPERM;
if (value) {
acl = posix_acl_from_xattr(value, size);
diff --git a/fs/gfs2/Makefile b/fs/gfs2/Makefile
index 21f7e46..f3d23ef 100644
--- a/fs/gfs2/Makefile
+++ b/fs/gfs2/Makefile
@@ -1,4 +1,4 @@
-EXTRA_CFLAGS := -I$(src)
+ccflags-y := -I$(src)
obj-$(CONFIG_GFS2_FS) += gfs2.o
gfs2-y := acl.o bmap.o dir.o xattr.o glock.o \
glops.o inode.o log.o lops.o main.o meta_io.o \
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index aad77e4..0f5c4f9 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -884,8 +884,8 @@ static int gfs2_write_end(struct file *file, struct address_space *mapping,
}
brelse(dibh);
- gfs2_trans_end(sdp);
failed:
+ gfs2_trans_end(sdp);
if (al) {
gfs2_inplace_release(ip);
gfs2_quota_unlock(ip);
@@ -1117,7 +1117,6 @@ static const struct address_space_operations gfs2_writeback_aops = {
.writepages = gfs2_writeback_writepages,
.readpage = gfs2_readpage,
.readpages = gfs2_readpages,
- .sync_page = block_sync_page,
.write_begin = gfs2_write_begin,
.write_end = gfs2_write_end,
.bmap = gfs2_bmap,
@@ -1133,7 +1132,6 @@ static const struct address_space_operations gfs2_ordered_aops = {
.writepage = gfs2_ordered_writepage,
.readpage = gfs2_readpage,
.readpages = gfs2_readpages,
- .sync_page = block_sync_page,
.write_begin = gfs2_write_begin,
.write_end = gfs2_write_end,
.set_page_dirty = gfs2_set_page_dirty,
@@ -1151,7 +1149,6 @@ static const struct address_space_operations gfs2_jdata_aops = {
.writepages = gfs2_jdata_writepages,
.readpage = gfs2_readpage,
.readpages = gfs2_readpages,
- .sync_page = block_sync_page,
.write_begin = gfs2_write_begin,
.write_end = gfs2_write_end,
.set_page_dirty = gfs2_set_page_dirty,
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index ef3dc4b..74add2d 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -1136,7 +1136,7 @@ void gfs2_trim_blocks(struct inode *inode)
* earlier versions of GFS2 have a bug in the stuffed file reading
* code which will result in a buffer overrun if the size is larger
* than the max stuffed file size. In order to prevent this from
- * occuring, such files are unstuffed, but in other cases we can
+ * occurring, such files are unstuffed, but in other cases we can
* just update the inode size directly.
*
* Returns: 0 on success, or -ve on error
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
index 5c356d0..f789c57 100644
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -1506,7 +1506,7 @@ struct inode *gfs2_dir_search(struct inode *dir, const struct qstr *name)
inode = gfs2_inode_lookup(dir->i_sb,
be16_to_cpu(dent->de_type),
be64_to_cpu(dent->de_inum.no_addr),
- be64_to_cpu(dent->de_inum.no_formal_ino));
+ be64_to_cpu(dent->de_inum.no_formal_ino), 0);
brelse(bh);
return inode;
}
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index 4074b95..e483108 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -221,7 +221,7 @@ static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask)
goto out_drop_write;
error = -EACCES;
- if (!is_owner_or_cap(inode))
+ if (!inode_owner_or_capable(inode))
goto out;
error = 0;
@@ -617,18 +617,51 @@ static ssize_t gfs2_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
return generic_file_aio_write(iocb, iov, nr_segs, pos);
}
-static void empty_write_end(struct page *page, unsigned from,
- unsigned to)
+static int empty_write_end(struct page *page, unsigned from,
+ unsigned to, int mode)
{
- struct gfs2_inode *ip = GFS2_I(page->mapping->host);
+ struct inode *inode = page->mapping->host;
+ struct gfs2_inode *ip = GFS2_I(inode);
+ struct buffer_head *bh;
+ unsigned offset, blksize = 1 << inode->i_blkbits;
+ pgoff_t end_index = i_size_read(inode) >> PAGE_CACHE_SHIFT;
zero_user(page, from, to-from);
mark_page_accessed(page);
- if (!gfs2_is_writeback(ip))
- gfs2_page_add_databufs(ip, page, from, to);
+ if (page->index < end_index || !(mode & FALLOC_FL_KEEP_SIZE)) {
+ if (!gfs2_is_writeback(ip))
+ gfs2_page_add_databufs(ip, page, from, to);
+
+ block_commit_write(page, from, to);
+ return 0;
+ }
+
+ offset = 0;
+ bh = page_buffers(page);
+ while (offset < to) {
+ if (offset >= from) {
+ set_buffer_uptodate(bh);
+ mark_buffer_dirty(bh);
+ clear_buffer_new(bh);
+ write_dirty_buffer(bh, WRITE);
+ }
+ offset += blksize;
+ bh = bh->b_this_page;
+ }
- block_commit_write(page, from, to);
+ offset = 0;
+ bh = page_buffers(page);
+ while (offset < to) {
+ if (offset >= from) {
+ wait_on_buffer(bh);
+ if (!buffer_uptodate(bh))
+ return -EIO;
+ }
+ offset += blksize;
+ bh = bh->b_this_page;
+ }
+ return 0;
}
static int needs_empty_write(sector_t block, struct inode *inode)
@@ -643,7 +676,8 @@ static int needs_empty_write(sector_t block, struct inode *inode)
return !buffer_mapped(&bh_map);
}
-static int write_empty_blocks(struct page *page, unsigned from, unsigned to)
+static int write_empty_blocks(struct page *page, unsigned from, unsigned to,
+ int mode)
{
struct inode *inode = page->mapping->host;
unsigned start, end, next, blksize;
@@ -668,7 +702,9 @@ static int write_empty_blocks(struct page *page, unsigned from, unsigned to)
gfs2_block_map);
if (unlikely(ret))
return ret;
- empty_write_end(page, start, end);
+ ret = empty_write_end(page, start, end, mode);
+ if (unlikely(ret))
+ return ret;
end = 0;
}
start = next;
@@ -682,7 +718,9 @@ static int write_empty_blocks(struct page *page, unsigned from, unsigned to)
ret = __block_write_begin(page, start, end - start, gfs2_block_map);
if (unlikely(ret))
return ret;
- empty_write_end(page, start, end);
+ ret = empty_write_end(page, start, end, mode);
+ if (unlikely(ret))
+ return ret;
}
return 0;
@@ -731,7 +769,7 @@ static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len,
if (curr == end)
to = end_offset;
- error = write_empty_blocks(page, from, to);
+ error = write_empty_blocks(page, from, to, mode);
if (!error && offset + to > inode->i_size &&
!(mode & FALLOC_FL_KEEP_SIZE)) {
i_size_write(inode, offset + to);
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index e243131..7a4fb63 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -93,14 +93,12 @@ static unsigned int gl_hash(const struct gfs2_sbd *sdp,
static inline void spin_lock_bucket(unsigned int hash)
{
- struct hlist_bl_head *bl = &gl_hash_table[hash];
- bit_spin_lock(0, (unsigned long *)bl);
+ hlist_bl_lock(&gl_hash_table[hash]);
}
static inline void spin_unlock_bucket(unsigned int hash)
{
- struct hlist_bl_head *bl = &gl_hash_table[hash];
- __bit_spin_unlock(0, (unsigned long *)bl);
+ hlist_bl_unlock(&gl_hash_table[hash]);
}
static void gfs2_glock_dealloc(struct rcu_head *rcu)
@@ -1123,7 +1121,7 @@ void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
* @number: the lock number
* @glops: the glock operations for the type of glock
* @state: the state to acquire the glock in
- * @flags: modifier flags for the aquisition
+ * @flags: modifier flags for the acquisition
* @gh: the struct gfs2_holder
*
* Returns: errno
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index 3754e3c..25eeb2b 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -385,6 +385,10 @@ static int trans_go_demote_ok(const struct gfs2_glock *gl)
static void iopen_go_callback(struct gfs2_glock *gl)
{
struct gfs2_inode *ip = (struct gfs2_inode *)gl->gl_object;
+ struct gfs2_sbd *sdp = gl->gl_sbd;
+
+ if (sdp->sd_vfs->s_flags & MS_RDONLY)
+ return;
if (gl->gl_demote_state == LM_ST_UNLOCKED &&
gl->gl_state == LM_ST_SHARED && ip) {
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 97d54a2..9134dcb 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -40,37 +40,61 @@ struct gfs2_inum_range_host {
u64 ir_length;
};
+struct gfs2_skip_data {
+ u64 no_addr;
+ int skipped;
+ int non_block;
+};
+
static int iget_test(struct inode *inode, void *opaque)
{
struct gfs2_inode *ip = GFS2_I(inode);
- u64 *no_addr = opaque;
+ struct gfs2_skip_data *data = opaque;
- if (ip->i_no_addr == *no_addr)
+ if (ip->i_no_addr == data->no_addr) {
+ if (data->non_block &&
+ inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE)) {
+ data->skipped = 1;
+ return 0;
+ }
return 1;
-
+ }
return 0;
}
static int iget_set(struct inode *inode, void *opaque)
{
struct gfs2_inode *ip = GFS2_I(inode);
- u64 *no_addr = opaque;
+ struct gfs2_skip_data *data = opaque;
- inode->i_ino = (unsigned long)*no_addr;
- ip->i_no_addr = *no_addr;
+ if (data->skipped)
+ return -ENOENT;
+ inode->i_ino = (unsigned long)(data->no_addr);
+ ip->i_no_addr = data->no_addr;
return 0;
}
struct inode *gfs2_ilookup(struct super_block *sb, u64 no_addr)
{
unsigned long hash = (unsigned long)no_addr;
- return ilookup5(sb, hash, iget_test, &no_addr);
+ struct gfs2_skip_data data;
+
+ data.no_addr = no_addr;
+ data.skipped = 0;
+ data.non_block = 0;
+ return ilookup5(sb, hash, iget_test, &data);
}
-static struct inode *gfs2_iget(struct super_block *sb, u64 no_addr)
+static struct inode *gfs2_iget(struct super_block *sb, u64 no_addr,
+ int non_block)
{
+ struct gfs2_skip_data data;
unsigned long hash = (unsigned long)no_addr;
- return iget5_locked(sb, hash, iget_test, iget_set, &no_addr);
+
+ data.no_addr = no_addr;
+ data.skipped = 0;
+ data.non_block = non_block;
+ return iget5_locked(sb, hash, iget_test, iget_set, &data);
}
/**
@@ -111,19 +135,20 @@ static void gfs2_set_iop(struct inode *inode)
* @sb: The super block
* @no_addr: The inode number
* @type: The type of the inode
+ * non_block: Can we block on inodes that are being freed?
*
* Returns: A VFS inode, or an error
*/
struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
- u64 no_addr, u64 no_formal_ino)
+ u64 no_addr, u64 no_formal_ino, int non_block)
{
struct inode *inode;
struct gfs2_inode *ip;
struct gfs2_glock *io_gl = NULL;
int error;
- inode = gfs2_iget(sb, no_addr);
+ inode = gfs2_iget(sb, no_addr, non_block);
ip = GFS2_I(inode);
if (!inode)
@@ -185,11 +210,12 @@ struct inode *gfs2_lookup_by_inum(struct gfs2_sbd *sdp, u64 no_addr,
{
struct super_block *sb = sdp->sd_vfs;
struct gfs2_holder i_gh;
- struct inode *inode;
+ struct inode *inode = NULL;
int error;
+ /* Must not read in block until block type is verified */
error = gfs2_glock_nq_num(sdp, no_addr, &gfs2_inode_glops,
- LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
+ LM_ST_EXCLUSIVE, GL_SKIP, &i_gh);
if (error)
return ERR_PTR(error);
@@ -197,7 +223,7 @@ struct inode *gfs2_lookup_by_inum(struct gfs2_sbd *sdp, u64 no_addr,
if (error)
goto fail;
- inode = gfs2_inode_lookup(sb, DT_UNKNOWN, no_addr, 0);
+ inode = gfs2_inode_lookup(sb, DT_UNKNOWN, no_addr, 0, 1);
if (IS_ERR(inode))
goto fail;
@@ -843,7 +869,7 @@ struct inode *gfs2_createi(struct gfs2_holder *ghs, const struct qstr *name,
goto fail_gunlock2;
inode = gfs2_inode_lookup(dir->i_sb, IF2DT(mode), inum.no_addr,
- inum.no_formal_ino);
+ inum.no_formal_ino, 0);
if (IS_ERR(inode))
goto fail_gunlock2;
diff --git a/fs/gfs2/inode.h b/fs/gfs2/inode.h
index 3e00a66..099ca30 100644
--- a/fs/gfs2/inode.h
+++ b/fs/gfs2/inode.h
@@ -97,7 +97,8 @@ err:
}
extern struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned type,
- u64 no_addr, u64 no_formal_ino);
+ u64 no_addr, u64 no_formal_ino,
+ int non_block);
extern struct inode *gfs2_lookup_by_inum(struct gfs2_sbd *sdp, u64 no_addr,
u64 *no_formal_ino,
unsigned int blktype);
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index e7ed31f..5b102c1 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -121,7 +121,7 @@ __acquires(&sdp->sd_ail_lock)
lock_buffer(bh);
if (test_clear_buffer_dirty(bh)) {
bh->b_end_io = end_buffer_write_sync;
- submit_bh(WRITE_SYNC_PLUG, bh);
+ submit_bh(WRITE_SYNC, bh);
} else {
unlock_buffer(bh);
brelse(bh);
@@ -647,7 +647,7 @@ static void gfs2_ordered_write(struct gfs2_sbd *sdp)
lock_buffer(bh);
if (buffer_mapped(bh) && test_clear_buffer_dirty(bh)) {
bh->b_end_io = end_buffer_write_sync;
- submit_bh(WRITE_SYNC_PLUG, bh);
+ submit_bh(WRITE_SYNC, bh);
} else {
unlock_buffer(bh);
brelse(bh);
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index e919abf..51d27f0 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -204,7 +204,7 @@ static void buf_lo_before_commit(struct gfs2_sbd *sdp)
}
gfs2_log_unlock(sdp);
- submit_bh(WRITE_SYNC_PLUG, bh);
+ submit_bh(WRITE_SYNC, bh);
gfs2_log_lock(sdp);
n = 0;
@@ -214,7 +214,7 @@ static void buf_lo_before_commit(struct gfs2_sbd *sdp)
gfs2_log_unlock(sdp);
lock_buffer(bd2->bd_bh);
bh = gfs2_log_fake_buf(sdp, bd2->bd_bh);
- submit_bh(WRITE_SYNC_PLUG, bh);
+ submit_bh(WRITE_SYNC, bh);
gfs2_log_lock(sdp);
if (++n >= num)
break;
@@ -356,7 +356,7 @@ static void revoke_lo_before_commit(struct gfs2_sbd *sdp)
sdp->sd_log_num_revoke--;
if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) {
- submit_bh(WRITE_SYNC_PLUG, bh);
+ submit_bh(WRITE_SYNC, bh);
bh = gfs2_log_get_buf(sdp);
mh = (struct gfs2_meta_header *)bh->b_data;
@@ -373,7 +373,7 @@ static void revoke_lo_before_commit(struct gfs2_sbd *sdp)
}
gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
- submit_bh(WRITE_SYNC_PLUG, bh);
+ submit_bh(WRITE_SYNC, bh);
}
static void revoke_lo_before_scan(struct gfs2_jdesc *jd,
@@ -575,7 +575,7 @@ static void gfs2_write_blocks(struct gfs2_sbd *sdp, struct buffer_head *bh,
ptr = bh_log_ptr(bh);
get_bh(bh);
- submit_bh(WRITE_SYNC_PLUG, bh);
+ submit_bh(WRITE_SYNC, bh);
gfs2_log_lock(sdp);
while(!list_empty(list)) {
bd = list_entry(list->next, struct gfs2_bufdata, bd_le.le_list);
@@ -601,7 +601,7 @@ static void gfs2_write_blocks(struct gfs2_sbd *sdp, struct buffer_head *bh,
} else {
bh1 = gfs2_log_fake_buf(sdp, bd->bd_bh);
}
- submit_bh(WRITE_SYNC_PLUG, bh1);
+ submit_bh(WRITE_SYNC, bh1);
gfs2_log_lock(sdp);
ptr += 2;
}
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
index 01d97f4..675349b 100644
--- a/fs/gfs2/meta_io.c
+++ b/fs/gfs2/meta_io.c
@@ -37,7 +37,7 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb
struct buffer_head *bh, *head;
int nr_underway = 0;
int write_op = REQ_META |
- (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC_PLUG : WRITE);
+ (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE);
BUG_ON(!PageLocked(page));
BUG_ON(!page_has_buffers(page));
@@ -94,7 +94,6 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb
const struct address_space_operations gfs2_meta_aops = {
.writepage = gfs2_aspace_writepage,
.releasepage = gfs2_releasepage,
- .sync_page = block_sync_page,
};
/**
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index 42ef243..d3c69eb 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -430,7 +430,7 @@ static int gfs2_lookup_root(struct super_block *sb, struct dentry **dptr,
struct dentry *dentry;
struct inode *inode;
- inode = gfs2_inode_lookup(sb, DT_DIR, no_addr, 0);
+ inode = gfs2_inode_lookup(sb, DT_DIR, no_addr, 0, 0);
if (IS_ERR(inode)) {
fs_err(sdp, "can't read in %s inode: %ld\n", name, PTR_ERR(inode));
return PTR_ERR(inode);
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index cf930cd..6fcae84 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -945,7 +945,7 @@ static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip
/* rgblk_search can return a block < goal, so we need to
keep it marching forward. */
no_addr = block + rgd->rd_data0;
- goal++;
+ goal = max(block + 1, goal + 1);
if (*last_unlinked != NO_BLOCK && no_addr <= *last_unlinked)
continue;
if (no_addr == skip)
@@ -971,7 +971,7 @@ static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip
found++;
/* Limit reclaim to sensible number of tasks */
- if (found > 2*NR_CPUS)
+ if (found > NR_CPUS)
return;
}
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index ec73ed7..b9f28e6 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -657,7 +657,7 @@ out:
* @sdp: the file system
*
* This function flushes data and meta data for all machines by
- * aquiring the transaction log exclusively. All journals are
+ * acquiring the transaction log exclusively. All journals are
* ensured to be in a clean state as well.
*
* Returns: errno
@@ -1318,15 +1318,17 @@ static int gfs2_show_options(struct seq_file *s, struct vfsmount *mnt)
static void gfs2_evict_inode(struct inode *inode)
{
- struct gfs2_sbd *sdp = inode->i_sb->s_fs_info;
+ struct super_block *sb = inode->i_sb;
+ struct gfs2_sbd *sdp = sb->s_fs_info;
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_holder gh;
int error;
- if (inode->i_nlink)
+ if (inode->i_nlink || (sb->s_flags & MS_RDONLY))
goto out;
- error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
+ /* Must not read inode block until block type has been verified */
+ error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, &gh);
if (unlikely(error)) {
gfs2_glock_dq_uninit(&ip->i_iopen_gh);
goto out;
@@ -1336,6 +1338,12 @@ static void gfs2_evict_inode(struct inode *inode)
if (error)
goto out_truncate;
+ if (test_bit(GIF_INVALID, &ip->i_flags)) {
+ error = gfs2_inode_refresh(ip);
+ if (error)
+ goto out_truncate;
+ }
+
ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
gfs2_glock_dq_wait(&ip->i_iopen_gh);
gfs2_holder_reinit(LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB | GL_NOCACHE, &ip->i_iopen_gh);
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
index dffb4e9..fff16c9 100644
--- a/fs/hfs/inode.c
+++ b/fs/hfs/inode.c
@@ -150,7 +150,6 @@ static int hfs_writepages(struct address_space *mapping,
const struct address_space_operations hfs_btree_aops = {
.readpage = hfs_readpage,
.writepage = hfs_writepage,
- .sync_page = block_sync_page,
.write_begin = hfs_write_begin,
.write_end = generic_write_end,
.bmap = hfs_bmap,
@@ -160,7 +159,6 @@ const struct address_space_operations hfs_btree_aops = {
const struct address_space_operations hfs_aops = {
.readpage = hfs_readpage,
.writepage = hfs_writepage,
- .sync_page = block_sync_page,
.write_begin = hfs_write_begin,
.write_end = generic_write_end,
.bmap = hfs_bmap,
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
index a8df651..b248a6cf 100644
--- a/fs/hfsplus/inode.c
+++ b/fs/hfsplus/inode.c
@@ -146,7 +146,6 @@ static int hfsplus_writepages(struct address_space *mapping,
const struct address_space_operations hfsplus_btree_aops = {
.readpage = hfsplus_readpage,
.writepage = hfsplus_writepage,
- .sync_page = block_sync_page,
.write_begin = hfsplus_write_begin,
.write_end = generic_write_end,
.bmap = hfsplus_bmap,
@@ -156,7 +155,6 @@ const struct address_space_operations hfsplus_btree_aops = {
const struct address_space_operations hfsplus_aops = {
.readpage = hfsplus_readpage,
.writepage = hfsplus_writepage,
- .sync_page = block_sync_page,
.write_begin = hfsplus_write_begin,
.write_end = generic_write_end,
.bmap = hfsplus_bmap,
diff --git a/fs/hfsplus/ioctl.c b/fs/hfsplus/ioctl.c
index 508ce66..fbaa669 100644
--- a/fs/hfsplus/ioctl.c
+++ b/fs/hfsplus/ioctl.c
@@ -47,7 +47,7 @@ static int hfsplus_ioctl_setflags(struct file *file, int __user *user_flags)
if (err)
goto out;
- if (!is_owner_or_cap(inode)) {
+ if (!inode_owner_or_capable(inode)) {
err = -EACCES;
goto out_drop_write;
}
diff --git a/fs/hpfs/file.c b/fs/hpfs/file.c
index 2dbae20..9b9eb69 100644
--- a/fs/hpfs/file.c
+++ b/fs/hpfs/file.c
@@ -119,7 +119,6 @@ static sector_t _hpfs_bmap(struct address_space *mapping, sector_t block)
const struct address_space_operations hpfs_aops = {
.readpage = hpfs_readpage,
.writepage = hpfs_writepage,
- .sync_page = block_sync_page,
.write_begin = hpfs_write_begin,
.write_end = generic_write_end,
.bmap = _hpfs_bmap
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 9885082..b9eeb1c 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -332,8 +332,7 @@ static void truncate_huge_page(struct page *page)
{
cancel_dirty_page(page, /* No IO accounting for huge pages? */0);
ClearPageUptodate(page);
- remove_from_page_cache(page);
- put_page(page);
+ delete_from_page_cache(page);
}
static void truncate_hugepages(struct inode *inode, loff_t lstart)
diff --git a/fs/inode.c b/fs/inode.c
index 9910c03..33c963d 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -25,6 +25,39 @@
#include <linux/async.h>
#include <linux/posix_acl.h>
#include <linux/ima.h>
+#include <linux/cred.h>
+#include "internal.h"
+
+/*
+ * inode locking rules.
+ *
+ * inode->i_lock protects:
+ * inode->i_state, inode->i_hash, __iget()
+ * inode_lru_lock protects:
+ * inode_lru, inode->i_lru
+ * inode_sb_list_lock protects:
+ * sb->s_inodes, inode->i_sb_list
+ * inode_wb_list_lock protects:
+ * bdi->wb.b_{dirty,io,more_io}, inode->i_wb_list
+ * inode_hash_lock protects:
+ * inode_hashtable, inode->i_hash
+ *
+ * Lock ordering:
+ *
+ * inode_sb_list_lock
+ * inode->i_lock
+ * inode_lru_lock
+ *
+ * inode_wb_list_lock
+ * inode->i_lock
+ *
+ * inode_hash_lock
+ * inode_sb_list_lock
+ * inode->i_lock
+ *
+ * iunique_lock
+ * inode_hash_lock
+ */
/*
* This is needed for the following functions:
@@ -59,6 +92,8 @@
static unsigned int i_hash_mask __read_mostly;
static unsigned int i_hash_shift __read_mostly;
+static struct hlist_head *inode_hashtable __read_mostly;
+static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock);
/*
* Each inode can be on two separate lists. One is
@@ -73,15 +108,10 @@ static unsigned int i_hash_shift __read_mostly;
*/
static LIST_HEAD(inode_lru);
-static struct hlist_head *inode_hashtable __read_mostly;
+static DEFINE_SPINLOCK(inode_lru_lock);
-/*
- * A simple spinlock to protect the list manipulations.
- *
- * NOTE! You also have to own the lock if you change
- * the i_state of an inode while it is in use..
- */
-DEFINE_SPINLOCK(inode_lock);
+__cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_sb_list_lock);
+__cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_wb_list_lock);
/*
* iprune_sem provides exclusion between the icache shrinking and the
@@ -95,6 +125,14 @@ DEFINE_SPINLOCK(inode_lock);
static DECLARE_RWSEM(iprune_sem);
/*
+ * Empty aops. Can be used for the cases where the user does not
+ * define any of the address_space operations.
+ */
+const struct address_space_operations empty_aops = {
+};
+EXPORT_SYMBOL(empty_aops);
+
+/*
* Statistics gathering..
*/
struct inodes_stat_t inodes_stat;
@@ -136,15 +174,6 @@ int proc_nr_inodes(ctl_table *table, int write,
}
#endif
-static void wake_up_inode(struct inode *inode)
-{
- /*
- * Prevent speculative execution through spin_unlock(&inode_lock);
- */
- smp_mb();
- wake_up_bit(&inode->i_state, __I_NEW);
-}
-
/**
* inode_init_always - perform inode structure intialisation
* @sb: superblock inode belongs to
@@ -155,7 +184,6 @@ static void wake_up_inode(struct inode *inode)
*/
int inode_init_always(struct super_block *sb, struct inode *inode)
{
- static const struct address_space_operations empty_aops;
static const struct inode_operations empty_iops;
static const struct file_operations empty_fops;
struct address_space *const mapping = &inode->i_data;
@@ -335,7 +363,7 @@ static void init_once(void *foo)
}
/*
- * inode_lock must be held
+ * inode->i_lock must be held
*/
void __iget(struct inode *inode)
{
@@ -353,23 +381,22 @@ EXPORT_SYMBOL(ihold);
static void inode_lru_list_add(struct inode *inode)
{
+ spin_lock(&inode_lru_lock);
if (list_empty(&inode->i_lru)) {
list_add(&inode->i_lru, &inode_lru);
inodes_stat.nr_unused++;
}
+ spin_unlock(&inode_lru_lock);
}
static void inode_lru_list_del(struct inode *inode)
{
+ spin_lock(&inode_lru_lock);
if (!list_empty(&inode->i_lru)) {
list_del_init(&inode->i_lru);
inodes_stat.nr_unused--;
}
-}
-
-static inline void __inode_sb_list_add(struct inode *inode)
-{
- list_add(&inode->i_sb_list, &inode->i_sb->s_inodes);
+ spin_unlock(&inode_lru_lock);
}
/**
@@ -378,15 +405,17 @@ static inline void __inode_sb_list_add(struct inode *inode)
*/
void inode_sb_list_add(struct inode *inode)
{
- spin_lock(&inode_lock);
- __inode_sb_list_add(inode);
- spin_unlock(&inode_lock);
+ spin_lock(&inode_sb_list_lock);
+ list_add(&inode->i_sb_list, &inode->i_sb->s_inodes);
+ spin_unlock(&inode_sb_list_lock);
}
EXPORT_SYMBOL_GPL(inode_sb_list_add);
-static inline void __inode_sb_list_del(struct inode *inode)
+static inline void inode_sb_list_del(struct inode *inode)
{
+ spin_lock(&inode_sb_list_lock);
list_del_init(&inode->i_sb_list);
+ spin_unlock(&inode_sb_list_lock);
}
static unsigned long hash(struct super_block *sb, unsigned long hashval)
@@ -411,24 +440,15 @@ void __insert_inode_hash(struct inode *inode, unsigned long hashval)
{
struct hlist_head *b = inode_hashtable + hash(inode->i_sb, hashval);
- spin_lock(&inode_lock);
+ spin_lock(&inode_hash_lock);
+ spin_lock(&inode->i_lock);
hlist_add_head(&inode->i_hash, b);
- spin_unlock(&inode_lock);
+ spin_unlock(&inode->i_lock);
+ spin_unlock(&inode_hash_lock);
}
EXPORT_SYMBOL(__insert_inode_hash);
/**
- * __remove_inode_hash - remove an inode from the hash
- * @inode: inode to unhash
- *
- * Remove an inode from the superblock.
- */
-static void __remove_inode_hash(struct inode *inode)
-{
- hlist_del_init(&inode->i_hash);
-}
-
-/**
* remove_inode_hash - remove an inode from the hash
* @inode: inode to unhash
*
@@ -436,9 +456,11 @@ static void __remove_inode_hash(struct inode *inode)
*/
void remove_inode_hash(struct inode *inode)
{
- spin_lock(&inode_lock);
+ spin_lock(&inode_hash_lock);
+ spin_lock(&inode->i_lock);
hlist_del_init(&inode->i_hash);
- spin_unlock(&inode_lock);
+ spin_unlock(&inode->i_lock);
+ spin_unlock(&inode_hash_lock);
}
EXPORT_SYMBOL(remove_inode_hash);
@@ -455,10 +477,29 @@ void end_writeback(struct inode *inode)
}
EXPORT_SYMBOL(end_writeback);
+/*
+ * Free the inode passed in, removing it from the lists it is still connected
+ * to. We remove any pages still attached to the inode and wait for any IO that
+ * is still in progress before finally destroying the inode.
+ *
+ * An inode must already be marked I_FREEING so that we avoid the inode being
+ * moved back onto lists if we race with other code that manipulates the lists
+ * (e.g. writeback_single_inode). The caller is responsible for setting this.
+ *
+ * An inode must already be removed from the LRU list before being evicted from
+ * the cache. This should occur atomically with setting the I_FREEING state
+ * flag, so no inodes here should ever be on the LRU when being evicted.
+ */
static void evict(struct inode *inode)
{
const struct super_operations *op = inode->i_sb->s_op;
+ BUG_ON(!(inode->i_state & I_FREEING));
+ BUG_ON(!list_empty(&inode->i_lru));
+
+ inode_wb_list_del(inode);
+ inode_sb_list_del(inode);
+
if (op->evict_inode) {
op->evict_inode(inode);
} else {
@@ -470,6 +511,15 @@ static void evict(struct inode *inode)
bd_forget(inode);
if (S_ISCHR(inode->i_mode) && inode->i_cdev)
cd_forget(inode);
+
+ remove_inode_hash(inode);
+
+ spin_lock(&inode->i_lock);
+ wake_up_bit(&inode->i_state, __I_NEW);
+ BUG_ON(inode->i_state != (I_FREEING | I_CLEAR));
+ spin_unlock(&inode->i_lock);
+
+ destroy_inode(inode);
}
/*
@@ -488,14 +538,6 @@ static void dispose_list(struct list_head *head)
list_del_init(&inode->i_lru);
evict(inode);
-
- spin_lock(&inode_lock);
- __remove_inode_hash(inode);
- __inode_sb_list_del(inode);
- spin_unlock(&inode_lock);
-
- wake_up_inode(inode);
- destroy_inode(inode);
}
}
@@ -513,25 +555,23 @@ void evict_inodes(struct super_block *sb)
struct inode *inode, *next;
LIST_HEAD(dispose);
- spin_lock(&inode_lock);
+ spin_lock(&inode_sb_list_lock);
list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
if (atomic_read(&inode->i_count))
continue;
- if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE))
+
+ spin_lock(&inode->i_lock);
+ if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
+ spin_unlock(&inode->i_lock);
continue;
+ }
inode->i_state |= I_FREEING;
-
- /*
- * Move the inode off the IO lists and LRU once I_FREEING is
- * set so that it won't get moved back on there if it is dirty.
- */
- list_move(&inode->i_lru, &dispose);
- list_del_init(&inode->i_wb_list);
- if (!(inode->i_state & (I_DIRTY | I_SYNC)))
- inodes_stat.nr_unused--;
+ inode_lru_list_del(inode);
+ spin_unlock(&inode->i_lock);
+ list_add(&inode->i_lru, &dispose);
}
- spin_unlock(&inode_lock);
+ spin_unlock(&inode_sb_list_lock);
dispose_list(&dispose);
@@ -560,31 +600,30 @@ int invalidate_inodes(struct super_block *sb, bool kill_dirty)
struct inode *inode, *next;
LIST_HEAD(dispose);
- spin_lock(&inode_lock);
+ spin_lock(&inode_sb_list_lock);
list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
- if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE))
+ spin_lock(&inode->i_lock);
+ if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
+ spin_unlock(&inode->i_lock);
continue;
+ }
if (inode->i_state & I_DIRTY && !kill_dirty) {
+ spin_unlock(&inode->i_lock);
busy = 1;
continue;
}
if (atomic_read(&inode->i_count)) {
+ spin_unlock(&inode->i_lock);
busy = 1;
continue;
}
inode->i_state |= I_FREEING;
-
- /*
- * Move the inode off the IO lists and LRU once I_FREEING is
- * set so that it won't get moved back on there if it is dirty.
- */
- list_move(&inode->i_lru, &dispose);
- list_del_init(&inode->i_wb_list);
- if (!(inode->i_state & (I_DIRTY | I_SYNC)))
- inodes_stat.nr_unused--;
+ inode_lru_list_del(inode);
+ spin_unlock(&inode->i_lock);
+ list_add(&inode->i_lru, &dispose);
}
- spin_unlock(&inode_lock);
+ spin_unlock(&inode_sb_list_lock);
dispose_list(&dispose);
@@ -606,7 +645,7 @@ static int can_unuse(struct inode *inode)
/*
* Scan `goal' inodes on the unused list for freeable ones. They are moved to a
- * temporary list and then are freed outside inode_lock by dispose_list().
+ * temporary list and then are freed outside inode_lru_lock by dispose_list().
*
* Any inodes which are pinned purely because of attached pagecache have their
* pagecache removed. If the inode has metadata buffers attached to
@@ -627,7 +666,7 @@ static void prune_icache(int nr_to_scan)
unsigned long reap = 0;
down_read(&iprune_sem);
- spin_lock(&inode_lock);
+ spin_lock(&inode_lru_lock);
for (nr_scanned = 0; nr_scanned < nr_to_scan; nr_scanned++) {
struct inode *inode;
@@ -637,53 +676,67 @@ static void prune_icache(int nr_to_scan)
inode = list_entry(inode_lru.prev, struct inode, i_lru);
/*
+ * we are inverting the inode_lru_lock/inode->i_lock here,
+ * so use a trylock. If we fail to get the lock, just move the
+ * inode to the back of the list so we don't spin on it.
+ */
+ if (!spin_trylock(&inode->i_lock)) {
+ list_move(&inode->i_lru, &inode_lru);
+ continue;
+ }
+
+ /*
* Referenced or dirty inodes are still in use. Give them
* another pass through the LRU as we canot reclaim them now.
*/
if (atomic_read(&inode->i_count) ||
(inode->i_state & ~I_REFERENCED)) {
list_del_init(&inode->i_lru);
+ spin_unlock(&inode->i_lock);
inodes_stat.nr_unused--;
continue;
}
/* recently referenced inodes get one more pass */
if (inode->i_state & I_REFERENCED) {
- list_move(&inode->i_lru, &inode_lru);
inode->i_state &= ~I_REFERENCED;
+ list_move(&inode->i_lru, &inode_lru);
+ spin_unlock(&inode->i_lock);
continue;
}
if (inode_has_buffers(inode) || inode->i_data.nrpages) {
__iget(inode);
- spin_unlock(&inode_lock);
+ spin_unlock(&inode->i_lock);
+ spin_unlock(&inode_lru_lock);
if (remove_inode_buffers(inode))
reap += invalidate_mapping_pages(&inode->i_data,
0, -1);
iput(inode);
- spin_lock(&inode_lock);
+ spin_lock(&inode_lru_lock);
if (inode != list_entry(inode_lru.next,
struct inode, i_lru))
continue; /* wrong inode or list_empty */
- if (!can_unuse(inode))
+ /* avoid lock inversions with trylock */
+ if (!spin_trylock(&inode->i_lock))
+ continue;
+ if (!can_unuse(inode)) {
+ spin_unlock(&inode->i_lock);
continue;
+ }
}
WARN_ON(inode->i_state & I_NEW);
inode->i_state |= I_FREEING;
+ spin_unlock(&inode->i_lock);
- /*
- * Move the inode off the IO lists and LRU once I_FREEING is
- * set so that it won't get moved back on there if it is dirty.
- */
list_move(&inode->i_lru, &freeable);
- list_del_init(&inode->i_wb_list);
inodes_stat.nr_unused--;
}
if (current_is_kswapd())
__count_vm_events(KSWAPD_INODESTEAL, reap);
else
__count_vm_events(PGINODESTEAL, reap);
- spin_unlock(&inode_lock);
+ spin_unlock(&inode_lru_lock);
dispose_list(&freeable);
up_read(&iprune_sem);
@@ -732,15 +785,21 @@ static struct inode *find_inode(struct super_block *sb,
repeat:
hlist_for_each_entry(inode, node, head, i_hash) {
- if (inode->i_sb != sb)
+ spin_lock(&inode->i_lock);
+ if (inode->i_sb != sb) {
+ spin_unlock(&inode->i_lock);
continue;
- if (!test(inode, data))
+ }
+ if (!test(inode, data)) {
+ spin_unlock(&inode->i_lock);
continue;
+ }
if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
__wait_on_freeing_inode(inode);
goto repeat;
}
__iget(inode);
+ spin_unlock(&inode->i_lock);
return inode;
}
return NULL;
@@ -758,15 +817,21 @@ static struct inode *find_inode_fast(struct super_block *sb,
repeat:
hlist_for_each_entry(inode, node, head, i_hash) {
- if (inode->i_ino != ino)
+ spin_lock(&inode->i_lock);
+ if (inode->i_ino != ino) {
+ spin_unlock(&inode->i_lock);
continue;
- if (inode->i_sb != sb)
+ }
+ if (inode->i_sb != sb) {
+ spin_unlock(&inode->i_lock);
continue;
+ }
if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
__wait_on_freeing_inode(inode);
goto repeat;
}
__iget(inode);
+ spin_unlock(&inode->i_lock);
return inode;
}
return NULL;
@@ -826,19 +891,26 @@ struct inode *new_inode(struct super_block *sb)
{
struct inode *inode;
- spin_lock_prefetch(&inode_lock);
+ spin_lock_prefetch(&inode_sb_list_lock);
inode = alloc_inode(sb);
if (inode) {
- spin_lock(&inode_lock);
- __inode_sb_list_add(inode);
+ spin_lock(&inode->i_lock);
inode->i_state = 0;
- spin_unlock(&inode_lock);
+ spin_unlock(&inode->i_lock);
+ inode_sb_list_add(inode);
}
return inode;
}
EXPORT_SYMBOL(new_inode);
+/**
+ * unlock_new_inode - clear the I_NEW state and wake up any waiters
+ * @inode: new inode to unlock
+ *
+ * Called when the inode is fully initialised to clear the new state of the
+ * inode and wake up anyone waiting for the inode to finish initialisation.
+ */
void unlock_new_inode(struct inode *inode)
{
#ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -858,51 +930,67 @@ void unlock_new_inode(struct inode *inode)
}
}
#endif
- /*
- * This is special! We do not need the spinlock when clearing I_NEW,
- * because we're guaranteed that nobody else tries to do anything about
- * the state of the inode when it is locked, as we just created it (so
- * there can be no old holders that haven't tested I_NEW).
- * However we must emit the memory barrier so that other CPUs reliably
- * see the clearing of I_NEW after the other inode initialisation has
- * completed.
- */
- smp_mb();
+ spin_lock(&inode->i_lock);
WARN_ON(!(inode->i_state & I_NEW));
inode->i_state &= ~I_NEW;
- wake_up_inode(inode);
+ wake_up_bit(&inode->i_state, __I_NEW);
+ spin_unlock(&inode->i_lock);
}
EXPORT_SYMBOL(unlock_new_inode);
-/*
- * This is called without the inode lock held.. Be careful.
+/**
+ * iget5_locked - obtain an inode from a mounted file system
+ * @sb: super block of file system
+ * @hashval: hash value (usually inode number) to get
+ * @test: callback used for comparisons between inodes
+ * @set: callback used to initialize a new struct inode
+ * @data: opaque data pointer to pass to @test and @set
*
- * We no longer cache the sb_flags in i_flags - see fs.h
- * -- rmk@arm.uk.linux.org
+ * Search for the inode specified by @hashval and @data in the inode cache,
+ * and if present it is return it with an increased reference count. This is
+ * a generalized version of iget_locked() for file systems where the inode
+ * number is not sufficient for unique identification of an inode.
+ *
+ * If the inode is not in cache, allocate a new inode and return it locked,
+ * hashed, and with the I_NEW flag set. The file system gets to fill it in
+ * before unlocking it via unlock_new_inode().
+ *
+ * Note both @test and @set are called with the inode_hash_lock held, so can't
+ * sleep.
*/
-static struct inode *get_new_inode(struct super_block *sb,
- struct hlist_head *head,
- int (*test)(struct inode *, void *),
- int (*set)(struct inode *, void *),
- void *data)
+struct inode *iget5_locked(struct super_block *sb, unsigned long hashval,
+ int (*test)(struct inode *, void *),
+ int (*set)(struct inode *, void *), void *data)
{
+ struct hlist_head *head = inode_hashtable + hash(sb, hashval);
struct inode *inode;
+ spin_lock(&inode_hash_lock);
+ inode = find_inode(sb, head, test, data);
+ spin_unlock(&inode_hash_lock);
+
+ if (inode) {
+ wait_on_inode(inode);
+ return inode;
+ }
+
inode = alloc_inode(sb);
if (inode) {
struct inode *old;
- spin_lock(&inode_lock);
+ spin_lock(&inode_hash_lock);
/* We released the lock, so.. */
old = find_inode(sb, head, test, data);
if (!old) {
if (set(inode, data))
goto set_failed;
- hlist_add_head(&inode->i_hash, head);
- __inode_sb_list_add(inode);
+ spin_lock(&inode->i_lock);
inode->i_state = I_NEW;
- spin_unlock(&inode_lock);
+ hlist_add_head(&inode->i_hash, head);
+ spin_unlock(&inode->i_lock);
+ inode_sb_list_add(inode);
+ spin_unlock(&inode_hash_lock);
/* Return the locked inode with I_NEW set, the
* caller is responsible for filling in the contents
@@ -915,7 +1003,7 @@ static struct inode *get_new_inode(struct super_block *sb,
* us. Use the old inode instead of the one we just
* allocated.
*/
- spin_unlock(&inode_lock);
+ spin_unlock(&inode_hash_lock);
destroy_inode(inode);
inode = old;
wait_on_inode(inode);
@@ -923,33 +1011,53 @@ static struct inode *get_new_inode(struct super_block *sb,
return inode;
set_failed:
- spin_unlock(&inode_lock);
+ spin_unlock(&inode_hash_lock);
destroy_inode(inode);
return NULL;
}
+EXPORT_SYMBOL(iget5_locked);
-/*
- * get_new_inode_fast is the fast path version of get_new_inode, see the
- * comment at iget_locked for details.
+/**
+ * iget_locked - obtain an inode from a mounted file system
+ * @sb: super block of file system
+ * @ino: inode number to get
+ *
+ * Search for the inode specified by @ino in the inode cache and if present
+ * return it with an increased reference count. This is for file systems
+ * where the inode number is sufficient for unique identification of an inode.
+ *
+ * If the inode is not in cache, allocate a new inode and return it locked,
+ * hashed, and with the I_NEW flag set. The file system gets to fill it in
+ * before unlocking it via unlock_new_inode().
*/
-static struct inode *get_new_inode_fast(struct super_block *sb,
- struct hlist_head *head, unsigned long ino)
+struct inode *iget_locked(struct super_block *sb, unsigned long ino)
{
+ struct hlist_head *head = inode_hashtable + hash(sb, ino);
struct inode *inode;
+ spin_lock(&inode_hash_lock);
+ inode = find_inode_fast(sb, head, ino);
+ spin_unlock(&inode_hash_lock);
+ if (inode) {
+ wait_on_inode(inode);
+ return inode;
+ }
+
inode = alloc_inode(sb);
if (inode) {
struct inode *old;
- spin_lock(&inode_lock);
+ spin_lock(&inode_hash_lock);
/* We released the lock, so.. */
old = find_inode_fast(sb, head, ino);
if (!old) {
inode->i_ino = ino;
- hlist_add_head(&inode->i_hash, head);
- __inode_sb_list_add(inode);
+ spin_lock(&inode->i_lock);
inode->i_state = I_NEW;
- spin_unlock(&inode_lock);
+ hlist_add_head(&inode->i_hash, head);
+ spin_unlock(&inode->i_lock);
+ inode_sb_list_add(inode);
+ spin_unlock(&inode_hash_lock);
/* Return the locked inode with I_NEW set, the
* caller is responsible for filling in the contents
@@ -962,13 +1070,14 @@ static struct inode *get_new_inode_fast(struct super_block *sb,
* us. Use the old inode instead of the one we just
* allocated.
*/
- spin_unlock(&inode_lock);
+ spin_unlock(&inode_hash_lock);
destroy_inode(inode);
inode = old;
wait_on_inode(inode);
}
return inode;
}
+EXPORT_SYMBOL(iget_locked);
/*
* search the inode cache for a matching inode number.
@@ -983,10 +1092,14 @@ static int test_inode_iunique(struct super_block *sb, unsigned long ino)
struct hlist_node *node;
struct inode *inode;
+ spin_lock(&inode_hash_lock);
hlist_for_each_entry(inode, node, b, i_hash) {
- if (inode->i_ino == ino && inode->i_sb == sb)
+ if (inode->i_ino == ino && inode->i_sb == sb) {
+ spin_unlock(&inode_hash_lock);
return 0;
+ }
}
+ spin_unlock(&inode_hash_lock);
return 1;
}
@@ -1016,7 +1129,6 @@ ino_t iunique(struct super_block *sb, ino_t max_reserved)
static unsigned int counter;
ino_t res;
- spin_lock(&inode_lock);
spin_lock(&iunique_lock);
do {
if (counter <= max_reserved)
@@ -1024,7 +1136,6 @@ ino_t iunique(struct super_block *sb, ino_t max_reserved)
res = counter++;
} while (!test_inode_iunique(sb, res));
spin_unlock(&iunique_lock);
- spin_unlock(&inode_lock);
return res;
}
@@ -1032,116 +1143,50 @@ EXPORT_SYMBOL(iunique);
struct inode *igrab(struct inode *inode)
{
- spin_lock(&inode_lock);
- if (!(inode->i_state & (I_FREEING|I_WILL_FREE)))
+ spin_lock(&inode->i_lock);
+ if (!(inode->i_state & (I_FREEING|I_WILL_FREE))) {
__iget(inode);
- else
+ spin_unlock(&inode->i_lock);
+ } else {
+ spin_unlock(&inode->i_lock);
/*
* Handle the case where s_op->clear_inode is not been
* called yet, and somebody is calling igrab
* while the inode is getting freed.
*/
inode = NULL;
- spin_unlock(&inode_lock);
+ }
return inode;
}
EXPORT_SYMBOL(igrab);
/**
- * ifind - internal function, you want ilookup5() or iget5().
- * @sb: super block of file system to search
- * @head: the head of the list to search
- * @test: callback used for comparisons between inodes
- * @data: opaque data pointer to pass to @test
- * @wait: if true wait for the inode to be unlocked, if false do not
- *
- * ifind() searches for the inode specified by @data in the inode
- * cache. This is a generalized version of ifind_fast() for file systems where
- * the inode number is not sufficient for unique identification of an inode.
- *
- * If the inode is in the cache, the inode is returned with an incremented
- * reference count.
- *
- * Otherwise NULL is returned.
- *
- * Note, @test is called with the inode_lock held, so can't sleep.
- */
-static struct inode *ifind(struct super_block *sb,
- struct hlist_head *head, int (*test)(struct inode *, void *),
- void *data, const int wait)
-{
- struct inode *inode;
-
- spin_lock(&inode_lock);
- inode = find_inode(sb, head, test, data);
- if (inode) {
- spin_unlock(&inode_lock);
- if (likely(wait))
- wait_on_inode(inode);
- return inode;
- }
- spin_unlock(&inode_lock);
- return NULL;
-}
-
-/**
- * ifind_fast - internal function, you want ilookup() or iget().
- * @sb: super block of file system to search
- * @head: head of the list to search
- * @ino: inode number to search for
- *
- * ifind_fast() searches for the inode @ino in the inode cache. This is for
- * file systems where the inode number is sufficient for unique identification
- * of an inode.
- *
- * If the inode is in the cache, the inode is returned with an incremented
- * reference count.
- *
- * Otherwise NULL is returned.
- */
-static struct inode *ifind_fast(struct super_block *sb,
- struct hlist_head *head, unsigned long ino)
-{
- struct inode *inode;
-
- spin_lock(&inode_lock);
- inode = find_inode_fast(sb, head, ino);
- if (inode) {
- spin_unlock(&inode_lock);
- wait_on_inode(inode);
- return inode;
- }
- spin_unlock(&inode_lock);
- return NULL;
-}
-
-/**
* ilookup5_nowait - search for an inode in the inode cache
* @sb: super block of file system to search
* @hashval: hash value (usually inode number) to search for
* @test: callback used for comparisons between inodes
* @data: opaque data pointer to pass to @test
*
- * ilookup5() uses ifind() to search for the inode specified by @hashval and
- * @data in the inode cache. This is a generalized version of ilookup() for
- * file systems where the inode number is not sufficient for unique
- * identification of an inode.
- *
+ * Search for the inode specified by @hashval and @data in the inode cache.
* If the inode is in the cache, the inode is returned with an incremented
- * reference count. Note, the inode lock is not waited upon so you have to be
- * very careful what you do with the returned inode. You probably should be
- * using ilookup5() instead.
+ * reference count.
*
- * Otherwise NULL is returned.
+ * Note: I_NEW is not waited upon so you have to be very careful what you do
+ * with the returned inode. You probably should be using ilookup5() instead.
*
- * Note, @test is called with the inode_lock held, so can't sleep.
+ * Note2: @test is called with the inode_hash_lock held, so can't sleep.
*/
struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval,
int (*test)(struct inode *, void *), void *data)
{
struct hlist_head *head = inode_hashtable + hash(sb, hashval);
+ struct inode *inode;
+
+ spin_lock(&inode_hash_lock);
+ inode = find_inode(sb, head, test, data);
+ spin_unlock(&inode_hash_lock);
- return ifind(sb, head, test, data, 0);
+ return inode;
}
EXPORT_SYMBOL(ilookup5_nowait);
@@ -1152,24 +1197,24 @@ EXPORT_SYMBOL(ilookup5_nowait);
* @test: callback used for comparisons between inodes
* @data: opaque data pointer to pass to @test
*
- * ilookup5() uses ifind() to search for the inode specified by @hashval and
- * @data in the inode cache. This is a generalized version of ilookup() for
- * file systems where the inode number is not sufficient for unique
- * identification of an inode.
- *
- * If the inode is in the cache, the inode lock is waited upon and the inode is
+ * Search for the inode specified by @hashval and @data in the inode cache,
+ * and if the inode is in the cache, return the inode with an incremented
+ * reference count. Waits on I_NEW before returning the inode.
* returned with an incremented reference count.
*
- * Otherwise NULL is returned.
+ * This is a generalized version of ilookup() for file systems where the
+ * inode number is not sufficient for unique identification of an inode.
*
- * Note, @test is called with the inode_lock held, so can't sleep.
+ * Note: @test is called with the inode_hash_lock held, so can't sleep.
*/
struct inode *ilookup5(struct super_block *sb, unsigned long hashval,
int (*test)(struct inode *, void *), void *data)
{
- struct hlist_head *head = inode_hashtable + hash(sb, hashval);
+ struct inode *inode = ilookup5_nowait(sb, hashval, test, data);
- return ifind(sb, head, test, data, 1);
+ if (inode)
+ wait_on_inode(inode);
+ return inode;
}
EXPORT_SYMBOL(ilookup5);
@@ -1178,91 +1223,23 @@ EXPORT_SYMBOL(ilookup5);
* @sb: super block of file system to search
* @ino: inode number to search for
*
- * ilookup() uses ifind_fast() to search for the inode @ino in the inode cache.
- * This is for file systems where the inode number is sufficient for unique
- * identification of an inode.
- *
- * If the inode is in the cache, the inode is returned with an incremented
- * reference count.
- *
- * Otherwise NULL is returned.
+ * Search for the inode @ino in the inode cache, and if the inode is in the
+ * cache, the inode is returned with an incremented reference count.
*/
struct inode *ilookup(struct super_block *sb, unsigned long ino)
{
struct hlist_head *head = inode_hashtable + hash(sb, ino);
-
- return ifind_fast(sb, head, ino);
-}
-EXPORT_SYMBOL(ilookup);
-
-/**
- * iget5_locked - obtain an inode from a mounted file system
- * @sb: super block of file system
- * @hashval: hash value (usually inode number) to get
- * @test: callback used for comparisons between inodes
- * @set: callback used to initialize a new struct inode
- * @data: opaque data pointer to pass to @test and @set
- *
- * iget5_locked() uses ifind() to search for the inode specified by @hashval
- * and @data in the inode cache and if present it is returned with an increased
- * reference count. This is a generalized version of iget_locked() for file
- * systems where the inode number is not sufficient for unique identification
- * of an inode.
- *
- * If the inode is not in cache, get_new_inode() is called to allocate a new
- * inode and this is returned locked, hashed, and with the I_NEW flag set. The
- * file system gets to fill it in before unlocking it via unlock_new_inode().
- *
- * Note both @test and @set are called with the inode_lock held, so can't sleep.
- */
-struct inode *iget5_locked(struct super_block *sb, unsigned long hashval,
- int (*test)(struct inode *, void *),
- int (*set)(struct inode *, void *), void *data)
-{
- struct hlist_head *head = inode_hashtable + hash(sb, hashval);
struct inode *inode;
- inode = ifind(sb, head, test, data, 1);
- if (inode)
- return inode;
- /*
- * get_new_inode() will do the right thing, re-trying the search
- * in case it had to block at any point.
- */
- return get_new_inode(sb, head, test, set, data);
-}
-EXPORT_SYMBOL(iget5_locked);
-
-/**
- * iget_locked - obtain an inode from a mounted file system
- * @sb: super block of file system
- * @ino: inode number to get
- *
- * iget_locked() uses ifind_fast() to search for the inode specified by @ino in
- * the inode cache and if present it is returned with an increased reference
- * count. This is for file systems where the inode number is sufficient for
- * unique identification of an inode.
- *
- * If the inode is not in cache, get_new_inode_fast() is called to allocate a
- * new inode and this is returned locked, hashed, and with the I_NEW flag set.
- * The file system gets to fill it in before unlocking it via
- * unlock_new_inode().
- */
-struct inode *iget_locked(struct super_block *sb, unsigned long ino)
-{
- struct hlist_head *head = inode_hashtable + hash(sb, ino);
- struct inode *inode;
+ spin_lock(&inode_hash_lock);
+ inode = find_inode_fast(sb, head, ino);
+ spin_unlock(&inode_hash_lock);
- inode = ifind_fast(sb, head, ino);
if (inode)
- return inode;
- /*
- * get_new_inode_fast() will do the right thing, re-trying the search
- * in case it had to block at any point.
- */
- return get_new_inode_fast(sb, head, ino);
+ wait_on_inode(inode);
+ return inode;
}
-EXPORT_SYMBOL(iget_locked);
+EXPORT_SYMBOL(ilookup);
int insert_inode_locked(struct inode *inode)
{
@@ -1270,27 +1247,33 @@ int insert_inode_locked(struct inode *inode)
ino_t ino = inode->i_ino;
struct hlist_head *head = inode_hashtable + hash(sb, ino);
- inode->i_state |= I_NEW;
while (1) {
struct hlist_node *node;
struct inode *old = NULL;
- spin_lock(&inode_lock);
+ spin_lock(&inode_hash_lock);
hlist_for_each_entry(old, node, head, i_hash) {
if (old->i_ino != ino)
continue;
if (old->i_sb != sb)
continue;
- if (old->i_state & (I_FREEING|I_WILL_FREE))
+ spin_lock(&old->i_lock);
+ if (old->i_state & (I_FREEING|I_WILL_FREE)) {
+ spin_unlock(&old->i_lock);
continue;
+ }
break;
}
if (likely(!node)) {
+ spin_lock(&inode->i_lock);
+ inode->i_state |= I_NEW;
hlist_add_head(&inode->i_hash, head);
- spin_unlock(&inode_lock);
+ spin_unlock(&inode->i_lock);
+ spin_unlock(&inode_hash_lock);
return 0;
}
__iget(old);
- spin_unlock(&inode_lock);
+ spin_unlock(&old->i_lock);
+ spin_unlock(&inode_hash_lock);
wait_on_inode(old);
if (unlikely(!inode_unhashed(old))) {
iput(old);
@@ -1307,29 +1290,34 @@ int insert_inode_locked4(struct inode *inode, unsigned long hashval,
struct super_block *sb = inode->i_sb;
struct hlist_head *head = inode_hashtable + hash(sb, hashval);
- inode->i_state |= I_NEW;
-
while (1) {
struct hlist_node *node;
struct inode *old = NULL;
- spin_lock(&inode_lock);
+ spin_lock(&inode_hash_lock);
hlist_for_each_entry(old, node, head, i_hash) {
if (old->i_sb != sb)
continue;
if (!test(old, data))
continue;
- if (old->i_state & (I_FREEING|I_WILL_FREE))
+ spin_lock(&old->i_lock);
+ if (old->i_state & (I_FREEING|I_WILL_FREE)) {
+ spin_unlock(&old->i_lock);
continue;
+ }
break;
}
if (likely(!node)) {
+ spin_lock(&inode->i_lock);
+ inode->i_state |= I_NEW;
hlist_add_head(&inode->i_hash, head);
- spin_unlock(&inode_lock);
+ spin_unlock(&inode->i_lock);
+ spin_unlock(&inode_hash_lock);
return 0;
}
__iget(old);
- spin_unlock(&inode_lock);
+ spin_unlock(&old->i_lock);
+ spin_unlock(&inode_hash_lock);
wait_on_inode(old);
if (unlikely(!inode_unhashed(old))) {
iput(old);
@@ -1374,47 +1362,35 @@ static void iput_final(struct inode *inode)
const struct super_operations *op = inode->i_sb->s_op;
int drop;
+ WARN_ON(inode->i_state & I_NEW);
+
if (op && op->drop_inode)
drop = op->drop_inode(inode);
else
drop = generic_drop_inode(inode);
+ if (!drop && (sb->s_flags & MS_ACTIVE)) {
+ inode->i_state |= I_REFERENCED;
+ if (!(inode->i_state & (I_DIRTY|I_SYNC)))
+ inode_lru_list_add(inode);
+ spin_unlock(&inode->i_lock);
+ return;
+ }
+
if (!drop) {
- if (sb->s_flags & MS_ACTIVE) {
- inode->i_state |= I_REFERENCED;
- if (!(inode->i_state & (I_DIRTY|I_SYNC))) {
- inode_lru_list_add(inode);
- }
- spin_unlock(&inode_lock);
- return;
- }
- WARN_ON(inode->i_state & I_NEW);
inode->i_state |= I_WILL_FREE;
- spin_unlock(&inode_lock);
+ spin_unlock(&inode->i_lock);
write_inode_now(inode, 1);
- spin_lock(&inode_lock);
+ spin_lock(&inode->i_lock);
WARN_ON(inode->i_state & I_NEW);
inode->i_state &= ~I_WILL_FREE;
- __remove_inode_hash(inode);
}
- WARN_ON(inode->i_state & I_NEW);
inode->i_state |= I_FREEING;
-
- /*
- * Move the inode off the IO lists and LRU once I_FREEING is
- * set so that it won't get moved back on there if it is dirty.
- */
inode_lru_list_del(inode);
- list_del_init(&inode->i_wb_list);
+ spin_unlock(&inode->i_lock);
- __inode_sb_list_del(inode);
- spin_unlock(&inode_lock);
evict(inode);
- remove_inode_hash(inode);
- wake_up_inode(inode);
- BUG_ON(inode->i_state != (I_FREEING | I_CLEAR));
- destroy_inode(inode);
}
/**
@@ -1431,7 +1407,7 @@ void iput(struct inode *inode)
if (inode) {
BUG_ON(inode->i_state & I_CLEAR);
- if (atomic_dec_and_lock(&inode->i_count, &inode_lock))
+ if (atomic_dec_and_lock(&inode->i_count, &inode->i_lock))
iput_final(inode);
}
}
@@ -1610,9 +1586,8 @@ EXPORT_SYMBOL(inode_wait);
* to recheck inode state.
*
* It doesn't matter if I_NEW is not set initially, a call to
- * wake_up_inode() after removing from the hash list will DTRT.
- *
- * This is called with inode_lock held.
+ * wake_up_bit(&inode->i_state, __I_NEW) after removing from the hash list
+ * will DTRT.
*/
static void __wait_on_freeing_inode(struct inode *inode)
{
@@ -1620,10 +1595,11 @@ static void __wait_on_freeing_inode(struct inode *inode)
DEFINE_WAIT_BIT(wait, &inode->i_state, __I_NEW);
wq = bit_waitqueue(&inode->i_state, __I_NEW);
prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
- spin_unlock(&inode_lock);
+ spin_unlock(&inode->i_lock);
+ spin_unlock(&inode_hash_lock);
schedule();
finish_wait(wq, &wait.wait);
- spin_lock(&inode_lock);
+ spin_lock(&inode_hash_lock);
}
static __initdata unsigned long ihash_entries;
@@ -1715,7 +1691,7 @@ void init_special_inode(struct inode *inode, umode_t mode, dev_t rdev)
EXPORT_SYMBOL(init_special_inode);
/**
- * Init uid,gid,mode for new inode according to posix standards
+ * inode_init_owner - Init uid,gid,mode for new inode according to posix standards
* @inode: New inode
* @dir: Directory inode
* @mode: mode of the new inode
@@ -1733,3 +1709,22 @@ void inode_init_owner(struct inode *inode, const struct inode *dir,
inode->i_mode = mode;
}
EXPORT_SYMBOL(inode_init_owner);
+
+/**
+ * inode_owner_or_capable - check current task permissions to inode
+ * @inode: inode being checked
+ *
+ * Return true if current either has CAP_FOWNER to the inode, or
+ * owns the file.
+ */
+bool inode_owner_or_capable(const struct inode *inode)
+{
+ struct user_namespace *ns = inode_userns(inode);
+
+ if (current_user_ns() == ns && current_fsuid() == inode->i_uid)
+ return true;
+ if (ns_capable(ns, CAP_FOWNER))
+ return true;
+ return false;
+}
+EXPORT_SYMBOL(inode_owner_or_capable);
diff --git a/fs/internal.h b/fs/internal.h
index 1719154..b29c46e 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -64,6 +64,7 @@ extern int copy_mount_string(const void __user *, char **);
extern unsigned int mnt_get_count(struct vfsmount *mnt);
extern struct vfsmount *__lookup_mnt(struct vfsmount *, struct dentry *, int);
+extern struct vfsmount *lookup_mnt(struct path *);
extern void mnt_set_mountpoint(struct vfsmount *, struct dentry *,
struct vfsmount *);
extern void release_mounts(struct list_head *);
@@ -124,6 +125,13 @@ extern long do_handle_open(int mountdirfd,
/*
* inode.c
*/
+extern spinlock_t inode_sb_list_lock;
+
+/*
+ * fs-writeback.c
+ */
+extern void inode_wb_list_del(struct inode *inode);
+
extern int get_nr_dirty_inodes(void);
extern void evict_inodes(struct super_block *);
extern int invalidate_inodes(struct super_block *, bool);
diff --git a/fs/ioctl.c b/fs/ioctl.c
index 1eebeb7..1d9b9fc 100644
--- a/fs/ioctl.c
+++ b/fs/ioctl.c
@@ -548,6 +548,7 @@ int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd,
{
int error = 0;
int __user *argp = (int __user *)arg;
+ struct inode *inode = filp->f_path.dentry->d_inode;
switch (cmd) {
case FIOCLEX:
@@ -567,13 +568,11 @@ int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd,
break;
case FIOQSIZE:
- if (S_ISDIR(filp->f_path.dentry->d_inode->i_mode) ||
- S_ISREG(filp->f_path.dentry->d_inode->i_mode) ||
- S_ISLNK(filp->f_path.dentry->d_inode->i_mode)) {
- loff_t res =
- inode_get_bytes(filp->f_path.dentry->d_inode);
- error = copy_to_user((loff_t __user *)arg, &res,
- sizeof(res)) ? -EFAULT : 0;
+ if (S_ISDIR(inode->i_mode) || S_ISREG(inode->i_mode) ||
+ S_ISLNK(inode->i_mode)) {
+ loff_t res = inode_get_bytes(inode);
+ error = copy_to_user(argp, &res, sizeof(res)) ?
+ -EFAULT : 0;
} else
error = -ENOTTY;
break;
@@ -590,14 +589,10 @@ int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd,
return ioctl_fiemap(filp, arg);
case FIGETBSZ:
- {
- struct inode *inode = filp->f_path.dentry->d_inode;
- int __user *p = (int __user *)arg;
- return put_user(inode->i_sb->s_blocksize, p);
- }
+ return put_user(inode->i_sb->s_blocksize, argp);
default:
- if (S_ISREG(filp->f_path.dentry->d_inode->i_mode))
+ if (S_ISREG(inode->i_mode))
error = file_ioctl(filp, cmd, arg);
else
error = vfs_ioctl(filp, cmd, arg);
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index a0f3833..3db5ba4 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -1158,7 +1158,6 @@ static sector_t _isofs_bmap(struct address_space *mapping, sector_t block)
static const struct address_space_operations isofs_aops = {
.readpage = isofs_readpage,
- .sync_page = block_sync_page,
.bmap = _isofs_bmap
};
diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c
index 34a4861..69b1804 100644
--- a/fs/jbd/commit.c
+++ b/fs/jbd/commit.c
@@ -20,6 +20,7 @@
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/bio.h>
+#include <linux/blkdev.h>
/*
* Default IO end handler for temporary BJ_IO buffer_heads.
@@ -294,7 +295,7 @@ void journal_commit_transaction(journal_t *journal)
int first_tag = 0;
int tag_flag;
int i;
- int write_op = WRITE_SYNC;
+ struct blk_plug plug;
/*
* First job: lock down the current transaction and wait for
@@ -327,13 +328,6 @@ void journal_commit_transaction(journal_t *journal)
spin_lock(&journal->j_state_lock);
commit_transaction->t_state = T_LOCKED;
- /*
- * Use plugged writes here, since we want to submit several before
- * we unplug the device. We don't do explicit unplugging in here,
- * instead we rely on sync_buffer() doing the unplug for us.
- */
- if (commit_transaction->t_synchronous_commit)
- write_op = WRITE_SYNC_PLUG;
spin_lock(&commit_transaction->t_handle_lock);
while (commit_transaction->t_updates) {
DEFINE_WAIT(wait);
@@ -368,7 +362,7 @@ void journal_commit_transaction(journal_t *journal)
* we do not require it to remember exactly which old buffers it
* has reserved. This is consistent with the existing behaviour
* that multiple journal_get_write_access() calls to the same
- * buffer are perfectly permissable.
+ * buffer are perfectly permissible.
*/
while (commit_transaction->t_reserved_list) {
jh = commit_transaction->t_reserved_list;
@@ -418,8 +412,10 @@ void journal_commit_transaction(journal_t *journal)
* Now start flushing things to disk, in the order they appear
* on the transaction lists. Data blocks go first.
*/
+ blk_start_plug(&plug);
err = journal_submit_data_buffers(journal, commit_transaction,
- write_op);
+ WRITE_SYNC);
+ blk_finish_plug(&plug);
/*
* Wait for all previously submitted IO to complete.
@@ -480,7 +476,9 @@ void journal_commit_transaction(journal_t *journal)
err = 0;
}
- journal_write_revoke_records(journal, commit_transaction, write_op);
+ blk_start_plug(&plug);
+
+ journal_write_revoke_records(journal, commit_transaction, WRITE_SYNC);
/*
* If we found any dirty or locked buffers, then we should have
@@ -650,7 +648,7 @@ start_journal_io:
clear_buffer_dirty(bh);
set_buffer_uptodate(bh);
bh->b_end_io = journal_end_buffer_io_sync;
- submit_bh(write_op, bh);
+ submit_bh(WRITE_SYNC, bh);
}
cond_resched();
@@ -661,6 +659,8 @@ start_journal_io:
}
}
+ blk_finish_plug(&plug);
+
/* Lo and behold: we have just managed to send a transaction to
the log. Before we can commit it, wait for the IO so far to
complete. Control buffers being written are on the
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
index eb11601..b3713af 100644
--- a/fs/jbd/journal.c
+++ b/fs/jbd/journal.c
@@ -770,7 +770,7 @@ journal_t * journal_init_dev(struct block_device *bdev,
journal->j_wbufsize = n;
journal->j_wbuf = kmalloc(n * sizeof(struct buffer_head*), GFP_KERNEL);
if (!journal->j_wbuf) {
- printk(KERN_ERR "%s: Cant allocate bhs for commit thread\n",
+ printk(KERN_ERR "%s: Can't allocate bhs for commit thread\n",
__func__);
goto out_err;
}
@@ -831,7 +831,7 @@ journal_t * journal_init_inode (struct inode *inode)
journal->j_wbufsize = n;
journal->j_wbuf = kmalloc(n * sizeof(struct buffer_head*), GFP_KERNEL);
if (!journal->j_wbuf) {
- printk(KERN_ERR "%s: Cant allocate bhs for commit thread\n",
+ printk(KERN_ERR "%s: Can't allocate bhs for commit thread\n",
__func__);
goto out_err;
}
diff --git a/fs/jbd/revoke.c b/fs/jbd/revoke.c
index d290183..305a907 100644
--- a/fs/jbd/revoke.c
+++ b/fs/jbd/revoke.c
@@ -71,7 +71,7 @@
* switching hash tables under them. For operations on the lists of entries in
* the hash table j_revoke_lock is used.
*
- * Finally, also replay code uses the hash tables but at this moment noone else
+ * Finally, also replay code uses the hash tables but at this moment no one else
* can touch them (filesystem isn't mounted yet) and hence no locking is
* needed.
*/
diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c
index 5b2e4c3..60d2319 100644
--- a/fs/jbd/transaction.c
+++ b/fs/jbd/transaction.c
@@ -1392,7 +1392,7 @@ int journal_stop(handle_t *handle)
* by 30x or more...
*
* We try and optimize the sleep time against what the underlying disk
- * can do, instead of having a static sleep time. This is usefull for
+ * can do, instead of having a static sleep time. This is useful for
* the case where our storage is so fast that it is more optimal to go
* ahead and force a flush and wait for the transaction to be committed
* than it is to wait for an arbitrary amount of time for new writers to
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index f3ad159..6e28000 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -105,6 +105,8 @@ static int journal_submit_commit_record(journal_t *journal,
int ret;
struct timespec now = current_kernel_time();
+ *cbh = NULL;
+
if (is_journal_aborted(journal))
return 0;
@@ -137,9 +139,9 @@ static int journal_submit_commit_record(journal_t *journal,
if (journal->j_flags & JBD2_BARRIER &&
!JBD2_HAS_INCOMPAT_FEATURE(journal,
JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT))
- ret = submit_bh(WRITE_SYNC_PLUG | WRITE_FLUSH_FUA, bh);
+ ret = submit_bh(WRITE_SYNC | WRITE_FLUSH_FUA, bh);
else
- ret = submit_bh(WRITE_SYNC_PLUG, bh);
+ ret = submit_bh(WRITE_SYNC, bh);
*cbh = bh;
return ret;
@@ -329,7 +331,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
int tag_bytes = journal_tag_bytes(journal);
struct buffer_head *cbh = NULL; /* For transactional checksums */
__u32 crc32_sum = ~0;
- int write_op = WRITE_SYNC;
+ struct blk_plug plug;
/*
* First job: lock down the current transaction and wait for
@@ -363,13 +365,6 @@ void jbd2_journal_commit_transaction(journal_t *journal)
write_lock(&journal->j_state_lock);
commit_transaction->t_state = T_LOCKED;
- /*
- * Use plugged writes here, since we want to submit several before
- * we unplug the device. We don't do explicit unplugging in here,
- * instead we rely on sync_buffer() doing the unplug for us.
- */
- if (commit_transaction->t_synchronous_commit)
- write_op = WRITE_SYNC_PLUG;
trace_jbd2_commit_locking(journal, commit_transaction);
stats.run.rs_wait = commit_transaction->t_max_wait;
stats.run.rs_locked = jiffies;
@@ -410,7 +405,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
* we do not require it to remember exactly which old buffers it
* has reserved. This is consistent with the existing behaviour
* that multiple jbd2_journal_get_write_access() calls to the same
- * buffer are perfectly permissable.
+ * buffer are perfectly permissible.
*/
while (commit_transaction->t_reserved_list) {
jh = commit_transaction->t_reserved_list;
@@ -469,8 +464,10 @@ void jbd2_journal_commit_transaction(journal_t *journal)
if (err)
jbd2_journal_abort(journal, err);
+ blk_start_plug(&plug);
jbd2_journal_write_revoke_records(journal, commit_transaction,
- write_op);
+ WRITE_SYNC);
+ blk_finish_plug(&plug);
jbd_debug(3, "JBD: commit phase 2\n");
@@ -497,6 +494,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
err = 0;
descriptor = NULL;
bufs = 0;
+ blk_start_plug(&plug);
while (commit_transaction->t_buffers) {
/* Find the next buffer to be journaled... */
@@ -658,7 +656,7 @@ start_journal_io:
clear_buffer_dirty(bh);
set_buffer_uptodate(bh);
bh->b_end_io = journal_end_buffer_io_sync;
- submit_bh(write_op, bh);
+ submit_bh(WRITE_SYNC, bh);
}
cond_resched();
stats.run.rs_blocks_logged += bufs;
@@ -699,6 +697,8 @@ start_journal_io:
__jbd2_journal_abort_hard(journal);
}
+ blk_finish_plug(&plug);
+
/* Lo and behold: we have just managed to send a transaction to
the log. Before we can commit it, wait for the IO so far to
complete. Control buffers being written are on the
@@ -808,7 +808,7 @@ wait_for_iobuf:
if (err)
__jbd2_journal_abort_hard(journal);
}
- if (!err && !is_journal_aborted(journal))
+ if (cbh)
err = journal_wait_on_commit_record(journal, cbh);
if (JBD2_HAS_INCOMPAT_FEATURE(journal,
JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT) &&
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 90407b8..e0ec3db 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -917,7 +917,7 @@ journal_t * jbd2_journal_init_dev(struct block_device *bdev,
journal->j_wbufsize = n;
journal->j_wbuf = kmalloc(n * sizeof(struct buffer_head*), GFP_KERNEL);
if (!journal->j_wbuf) {
- printk(KERN_ERR "%s: Cant allocate bhs for commit thread\n",
+ printk(KERN_ERR "%s: Can't allocate bhs for commit thread\n",
__func__);
goto out_err;
}
@@ -983,7 +983,7 @@ journal_t * jbd2_journal_init_inode (struct inode *inode)
journal->j_wbufsize = n;
journal->j_wbuf = kmalloc(n * sizeof(struct buffer_head*), GFP_KERNEL);
if (!journal->j_wbuf) {
- printk(KERN_ERR "%s: Cant allocate bhs for commit thread\n",
+ printk(KERN_ERR "%s: Can't allocate bhs for commit thread\n",
__func__);
goto out_err;
}
@@ -2413,10 +2413,12 @@ const char *jbd2_dev_to_name(dev_t device)
new_dev = kmalloc(sizeof(struct devname_cache), GFP_KERNEL);
if (!new_dev)
return "NODEV-ALLOCFAILURE"; /* Something non-NULL */
+ bd = bdget(device);
spin_lock(&devname_cache_lock);
if (devcache[i]) {
if (devcache[i]->device == device) {
kfree(new_dev);
+ bdput(bd);
ret = devcache[i]->devname;
spin_unlock(&devname_cache_lock);
return ret;
@@ -2425,7 +2427,6 @@ const char *jbd2_dev_to_name(dev_t device)
}
devcache[i] = new_dev;
devcache[i]->device = device;
- bd = bdget(device);
if (bd) {
bdevname(bd, devcache[i]->devname);
bdput(bd);
diff --git a/fs/jbd2/revoke.c b/fs/jbd2/revoke.c
index 9ad321f..69fd935 100644
--- a/fs/jbd2/revoke.c
+++ b/fs/jbd2/revoke.c
@@ -71,7 +71,7 @@
* switching hash tables under them. For operations on the lists of entries in
* the hash table j_revoke_lock is used.
*
- * Finally, also replay code uses the hash tables but at this moment noone else
+ * Finally, also replay code uses the hash tables but at this moment no one else
* can touch them (filesystem isn't mounted yet) and hence no locking is
* needed.
*/
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 1d11910..05fa77a 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -1403,7 +1403,7 @@ int jbd2_journal_stop(handle_t *handle)
/*
* Once we drop t_updates, if it goes to zero the transaction
- * could start commiting on us and eventually disappear. So
+ * could start committing on us and eventually disappear. So
* once we do this, we must not dereference transaction
* pointer again.
*/
diff --git a/fs/jffs2/TODO b/fs/jffs2/TODO
index 5d3ea40..ca28964 100644
--- a/fs/jffs2/TODO
+++ b/fs/jffs2/TODO
@@ -11,7 +11,7 @@
- checkpointing (do we need this? scan is quite fast)
- make the scan code populate real inodes so read_inode just after
mount doesn't have to read the flash twice for large files.
- Make this a per-inode option, changable with chattr, so you can
+ Make this a per-inode option, changeable with chattr, so you can
decide which inodes should be in-core immediately after mount.
- test, test, test
diff --git a/fs/jffs2/acl.c b/fs/jffs2/acl.c
index 95b7967..828a0e1 100644
--- a/fs/jffs2/acl.c
+++ b/fs/jffs2/acl.c
@@ -402,7 +402,7 @@ static int jffs2_acl_setxattr(struct dentry *dentry, const char *name,
if (name[0] != '\0')
return -EINVAL;
- if (!is_owner_or_cap(dentry->d_inode))
+ if (!inode_owner_or_capable(dentry->d_inode))
return -EPERM;
if (value) {
diff --git a/fs/jffs2/compr_zlib.c b/fs/jffs2/compr_zlib.c
index fd05a0b..5a00102 100644
--- a/fs/jffs2/compr_zlib.c
+++ b/fs/jffs2/compr_zlib.c
@@ -40,12 +40,13 @@ static z_stream inf_strm, def_strm;
static int __init alloc_workspaces(void)
{
- def_strm.workspace = vmalloc(zlib_deflate_workspacesize());
+ def_strm.workspace = vmalloc(zlib_deflate_workspacesize(MAX_WBITS,
+ MAX_MEM_LEVEL));
if (!def_strm.workspace) {
- printk(KERN_WARNING "Failed to allocate %d bytes for deflate workspace\n", zlib_deflate_workspacesize());
+ printk(KERN_WARNING "Failed to allocate %d bytes for deflate workspace\n", zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL));
return -ENOMEM;
}
- D1(printk(KERN_DEBUG "Allocated %d bytes for deflate workspace\n", zlib_deflate_workspacesize()));
+ D1(printk(KERN_DEBUG "Allocated %d bytes for deflate workspace\n", zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL)));
inf_strm.workspace = vmalloc(zlib_inflate_workspacesize());
if (!inf_strm.workspace) {
printk(KERN_WARNING "Failed to allocate %d bytes for inflate workspace\n", zlib_inflate_workspacesize());
diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c
index d32ee94..2ab1a0d 100644
--- a/fs/jffs2/readinode.c
+++ b/fs/jffs2/readinode.c
@@ -24,7 +24,7 @@
*
* Returns: 0 if the data CRC is correct;
* 1 - if incorrect;
- * error code if an error occured.
+ * error code if an error occurred.
*/
static int check_node_data(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info *tn)
{
diff --git a/fs/jffs2/summary.c b/fs/jffs2/summary.c
index 800171d..e537fb0 100644
--- a/fs/jffs2/summary.c
+++ b/fs/jffs2/summary.c
@@ -121,7 +121,7 @@ int jffs2_sum_add_inode_mem(struct jffs2_summary *s, struct jffs2_raw_inode *ri,
temp->nodetype = ri->nodetype;
temp->inode = ri->ino;
temp->version = ri->version;
- temp->offset = cpu_to_je32(ofs); /* relative offset from the begining of the jeb */
+ temp->offset = cpu_to_je32(ofs); /* relative offset from the beginning of the jeb */
temp->totlen = ri->totlen;
temp->next = NULL;
@@ -139,7 +139,7 @@ int jffs2_sum_add_dirent_mem(struct jffs2_summary *s, struct jffs2_raw_dirent *r
temp->nodetype = rd->nodetype;
temp->totlen = rd->totlen;
- temp->offset = cpu_to_je32(ofs); /* relative from the begining of the jeb */
+ temp->offset = cpu_to_je32(ofs); /* relative from the beginning of the jeb */
temp->pino = rd->pino;
temp->version = rd->version;
temp->ino = rd->ino;
diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
index 07ee154..4515bea 100644
--- a/fs/jffs2/wbuf.c
+++ b/fs/jffs2/wbuf.c
@@ -1116,7 +1116,7 @@ int jffs2_write_nand_cleanmarker(struct jffs2_sb_info *c,
/*
* On NAND we try to mark this block bad. If the block was erased more
- * than MAX_ERASE_FAILURES we mark it finaly bad.
+ * than MAX_ERASE_FAILURES we mark it finally bad.
* Don't care about failures. This block remains on the erase-pending
* or badblock list as long as nobody manipulates the flash with
* a bootloader or something like that.
diff --git a/fs/jffs2/xattr.c b/fs/jffs2/xattr.c
index 4f9cc04..3e93cdd 100644
--- a/fs/jffs2/xattr.c
+++ b/fs/jffs2/xattr.c
@@ -31,7 +31,7 @@
* is used to release xattr name/value pair and detach from c->xattrindex.
* reclaim_xattr_datum(c)
* is used to reclaim xattr name/value pairs on the xattr name/value pair cache when
- * memory usage by cache is over c->xdatum_mem_threshold. Currently, this threshold
+ * memory usage by cache is over c->xdatum_mem_threshold. Currently, this threshold
* is hard coded as 32KiB.
* do_verify_xattr_datum(c, xd)
* is used to load the xdatum informations without name/value pair from the medium.
diff --git a/fs/jfs/Makefile b/fs/jfs/Makefile
index 3adb639..a58fa72 100644
--- a/fs/jfs/Makefile
+++ b/fs/jfs/Makefile
@@ -13,4 +13,4 @@ jfs-y := super.o file.o inode.o namei.o jfs_mount.o jfs_umount.o \
jfs-$(CONFIG_JFS_POSIX_ACL) += acl.o
-EXTRA_CFLAGS += -D_JFS_4K
+ccflags-y := -D_JFS_4K
diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c
index 9978803..eddbb37 100644
--- a/fs/jfs/inode.c
+++ b/fs/jfs/inode.c
@@ -352,7 +352,6 @@ const struct address_space_operations jfs_aops = {
.readpages = jfs_readpages,
.writepage = jfs_writepage,
.writepages = jfs_writepages,
- .sync_page = block_sync_page,
.write_begin = jfs_write_begin,
.write_end = nobh_write_end,
.bmap = jfs_bmap,
diff --git a/fs/jfs/ioctl.c b/fs/jfs/ioctl.c
index afe222b..6f98a18 100644
--- a/fs/jfs/ioctl.c
+++ b/fs/jfs/ioctl.c
@@ -72,7 +72,7 @@ long jfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (err)
return err;
- if (!is_owner_or_cap(inode)) {
+ if (!inode_owner_or_capable(inode)) {
err = -EACCES;
goto setflags_out;
}
diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c
index c92ea3b..4496872 100644
--- a/fs/jfs/jfs_dmap.c
+++ b/fs/jfs/jfs_dmap.c
@@ -1649,7 +1649,7 @@ static int dbFindCtl(struct bmap * bmp, int l2nb, int level, s64 * blkno)
}
/* search the tree within the dmap control page for
- * sufficent free space. if sufficient free space is found,
+ * sufficient free space. if sufficient free space is found,
* dbFindLeaf() returns the index of the leaf at which
* free space was found.
*/
@@ -2744,7 +2744,7 @@ static int dbJoin(dmtree_t * tp, int leafno, int newval)
/* check which (leafno or buddy) is the left buddy.
* the left buddy gets to claim the blocks resulting
* from the join while the right gets to claim none.
- * the left buddy is also eligable to participate in
+ * the left buddy is also eligible to participate in
* a join at the next higher level while the right
* is not.
*
diff --git a/fs/jfs/jfs_extent.c b/fs/jfs/jfs_extent.c
index 5d3bbd1..e5fe850 100644
--- a/fs/jfs/jfs_extent.c
+++ b/fs/jfs/jfs_extent.c
@@ -126,7 +126,7 @@ extAlloc(struct inode *ip, s64 xlen, s64 pno, xad_t * xp, bool abnr)
/* allocate the disk blocks for the extent. initially, extBalloc()
* will try to allocate disk blocks for the requested size (xlen).
- * if this fails (xlen contiguous free blocks not avaliable), it'll
+ * if this fails (xlen contiguous free blocks not available), it'll
* try to allocate a smaller number of blocks (producing a smaller
* extent), with this smaller number of blocks consisting of the
* requested number of blocks rounded down to the next smaller
@@ -481,7 +481,7 @@ int extFill(struct inode *ip, xad_t * xp)
*
* initially, we will try to allocate disk blocks for the
* requested size (nblocks). if this fails (nblocks
- * contiguous free blocks not avaliable), we'll try to allocate
+ * contiguous free blocks not available), we'll try to allocate
* a smaller number of blocks (producing a smaller extent), with
* this smaller number of blocks consisting of the requested
* number of blocks rounded down to the next smaller power of 2
@@ -575,7 +575,7 @@ extBalloc(struct inode *ip, s64 hint, s64 * nblocks, s64 * blkno)
* to a new set of blocks. If moving the extent, we initially
* will try to allocate disk blocks for the requested size
* (newnblks). if this fails (new contiguous free blocks not
- * avaliable), we'll try to allocate a smaller number of
+ * available), we'll try to allocate a smaller number of
* blocks (producing a smaller extent), with this smaller
* number of blocks consisting of the requested number of
* blocks rounded down to the next smaller power of 2
diff --git a/fs/jfs/jfs_imap.c b/fs/jfs/jfs_imap.c
index 3a09423..ed53a47 100644
--- a/fs/jfs/jfs_imap.c
+++ b/fs/jfs/jfs_imap.c
@@ -1069,7 +1069,7 @@ int diFree(struct inode *ip)
*/
if (iagp->nfreeexts == cpu_to_le32(EXTSPERIAG - 1)) {
/* in preparation for removing the iag from the
- * ag extent free list, read the iags preceeding
+ * ag extent free list, read the iags preceding
* and following the iag on the ag extent free
* list.
*/
@@ -1095,7 +1095,7 @@ int diFree(struct inode *ip)
int inofreefwd = le32_to_cpu(iagp->inofreefwd);
/* in preparation for removing the iag from the
- * ag inode free list, read the iags preceeding
+ * ag inode free list, read the iags preceding
* and following the iag on the ag inode free
* list. before reading these iags, we must make
* sure that we already don't have them in hand
@@ -1681,7 +1681,7 @@ diAllocAG(struct inomap * imap, int agno, bool dir, struct inode *ip)
* try to allocate a new extent of free inodes.
*/
if (addext) {
- /* if free space is not avaliable for this new extent, try
+ /* if free space is not available for this new extent, try
* below to allocate a free and existing (already backed)
* inode from the ag.
*/
@@ -2036,7 +2036,7 @@ static int diAllocBit(struct inomap * imap, struct iag * iagp, int ino)
/* check if this is the last free inode within the iag.
* if so, it will have to be removed from the ag free
- * inode list, so get the iags preceeding and following
+ * inode list, so get the iags preceding and following
* it on the list.
*/
if (iagp->nfreeinos == cpu_to_le32(1)) {
@@ -2208,7 +2208,7 @@ static int diNewExt(struct inomap * imap, struct iag * iagp, int extno)
/* check if this is the last free extent within the
* iag. if so, the iag must be removed from the ag
- * free extent list, so get the iags preceeding and
+ * free extent list, so get the iags preceding and
* following the iag on this list.
*/
if (iagp->nfreeexts == cpu_to_le32(1)) {
@@ -2504,7 +2504,7 @@ diNewIAG(struct inomap * imap, int *iagnop, int agno, struct metapage ** mpp)
}
- /* get the next avaliable iag number */
+ /* get the next available iag number */
iagno = imap->im_nextiag;
/* make sure that we have not exceeded the maximum inode
@@ -2615,7 +2615,7 @@ diNewIAG(struct inomap * imap, int *iagnop, int agno, struct metapage ** mpp)
duplicateIXtree(sb, blkno, xlen, &xaddr);
- /* update the next avaliable iag number */
+ /* update the next available iag number */
imap->im_nextiag += 1;
/* Add the iag to the iag free list so we don't lose the iag
diff --git a/fs/jfs/jfs_logmgr.h b/fs/jfs/jfs_logmgr.h
index 9236bc4..e38c215 100644
--- a/fs/jfs/jfs_logmgr.h
+++ b/fs/jfs/jfs_logmgr.h
@@ -288,7 +288,7 @@ struct lrd {
/*
* SYNCPT: log sync point
*
- * replay log upto syncpt address specified;
+ * replay log up to syncpt address specified;
*/
struct {
__le32 sync; /* 4: syncpt address (0 = here) */
diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c
index 48b44bd..6740d34 100644
--- a/fs/jfs/jfs_metapage.c
+++ b/fs/jfs/jfs_metapage.c
@@ -583,7 +583,6 @@ static void metapage_invalidatepage(struct page *page, unsigned long offset)
const struct address_space_operations jfs_metapage_aops = {
.readpage = metapage_readpage,
.writepage = metapage_writepage,
- .sync_page = block_sync_page,
.releasepage = metapage_releasepage,
.invalidatepage = metapage_invalidatepage,
.set_page_dirty = __set_page_dirty_nobuffers,
diff --git a/fs/jfs/jfs_metapage.h b/fs/jfs/jfs_metapage.h
index d94f8d9..a78beda 100644
--- a/fs/jfs/jfs_metapage.h
+++ b/fs/jfs/jfs_metapage.h
@@ -75,7 +75,7 @@ extern void grab_metapage(struct metapage *);
extern void force_metapage(struct metapage *);
/*
- * hold_metapage and put_metapage are used in conjuction. The page lock
+ * hold_metapage and put_metapage are used in conjunction. The page lock
* is not dropped between the two, so no other threads can get or release
* the metapage
*/
diff --git a/fs/jfs/jfs_txnmgr.c b/fs/jfs/jfs_txnmgr.c
index 9466957..f6cc0c0 100644
--- a/fs/jfs/jfs_txnmgr.c
+++ b/fs/jfs/jfs_txnmgr.c
@@ -636,7 +636,7 @@ struct tlock *txLock(tid_t tid, struct inode *ip, struct metapage * mp,
* the inode of the page and available to all anonymous
* transactions until txCommit() time at which point
* they are transferred to the transaction tlock list of
- * the commiting transaction of the inode)
+ * the committing transaction of the inode)
*/
if (xtid == 0) {
tlck->tid = tid;
diff --git a/fs/jfs/resize.c b/fs/jfs/resize.c
index 1aba003..8ea5efb 100644
--- a/fs/jfs/resize.c
+++ b/fs/jfs/resize.c
@@ -57,7 +57,7 @@
* 2. compute new FSCKSize from new LVSize;
* 3. set new FSSize as MIN(FSSize, LVSize-(LogSize+FSCKSize)) where
* assert(new FSSize >= old FSSize),
- * i.e., file system must not be shrinked;
+ * i.e., file system must not be shrunk;
*/
int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize)
{
@@ -182,7 +182,7 @@ int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize)
*/
newFSSize = newLVSize - newLogSize - newFSCKSize;
- /* file system cannot be shrinked */
+ /* file system cannot be shrunk */
if (newFSSize < bmp->db_mapsize) {
rc = -EINVAL;
goto out;
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index eeca48a..06c8a67 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -644,7 +644,7 @@ static int jfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
/* Read data from quotafile - avoid pagecache and such because we cannot afford
* acquiring the locks... As quota files are never truncated and quota code
- * itself serializes the operations (and noone else should touch the files)
+ * itself serializes the operations (and no one else should touch the files)
* we don't have to be afraid of races */
static ssize_t jfs_quota_read(struct super_block *sb, int type, char *data,
size_t len, loff_t off)
diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c
index 3fa4c322..24838f1 100644
--- a/fs/jfs/xattr.c
+++ b/fs/jfs/xattr.c
@@ -678,7 +678,7 @@ static int can_set_system_xattr(struct inode *inode, const char *name,
struct posix_acl *acl;
int rc;
- if (!is_owner_or_cap(inode))
+ if (!inode_owner_or_capable(inode))
return -EPERM;
/*
diff --git a/fs/locks.c b/fs/locks.c
index 822c3d1..0a4f50d 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -414,17 +414,7 @@ static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl,
fl->fl_ops = NULL;
fl->fl_lmops = NULL;
- switch (l->l_type) {
- case F_RDLCK:
- case F_WRLCK:
- case F_UNLCK:
- fl->fl_type = l->l_type;
- break;
- default:
- return -EINVAL;
- }
-
- return (0);
+ return assign_type(fl, l->l_type);
}
#endif
diff --git a/fs/logfs/compr.c b/fs/logfs/compr.c
index 44bbfd2..961f02b 100644
--- a/fs/logfs/compr.c
+++ b/fs/logfs/compr.c
@@ -81,7 +81,7 @@ error:
int __init logfs_compr_init(void)
{
- size_t size = max(zlib_deflate_workspacesize(),
+ size_t size = max(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
zlib_inflate_workspacesize());
stream.workspace = vmalloc(size);
if (!stream.workspace)
diff --git a/fs/logfs/dev_bdev.c b/fs/logfs/dev_bdev.c
index 723bc5b..1adc8d4 100644
--- a/fs/logfs/dev_bdev.c
+++ b/fs/logfs/dev_bdev.c
@@ -39,7 +39,6 @@ static int sync_request(struct page *page, struct block_device *bdev, int rw)
bio.bi_end_io = request_complete;
submit_bio(rw, &bio);
- generic_unplug_device(bdev_get_queue(bdev));
wait_for_completion(&complete);
return test_bit(BIO_UPTODATE, &bio.bi_flags) ? 0 : -EIO;
}
@@ -168,7 +167,6 @@ static void bdev_writeseg(struct super_block *sb, u64 ofs, size_t len)
}
len = PAGE_ALIGN(len);
__bdev_writeseg(sb, ofs, ofs >> PAGE_SHIFT, len >> PAGE_SHIFT);
- generic_unplug_device(bdev_get_queue(logfs_super(sb)->s_bdev));
}
diff --git a/fs/logfs/dev_mtd.c b/fs/logfs/dev_mtd.c
index 7466e9d..339e17e 100644
--- a/fs/logfs/dev_mtd.c
+++ b/fs/logfs/dev_mtd.c
@@ -60,7 +60,7 @@ static int mtd_write(struct super_block *sb, loff_t ofs, size_t len, void *buf)
* asynchronous properties. So just to prevent the first implementor of such
* a thing from breaking logfs in 2350, we do the usual pointless dance to
* declare a completion variable and wait for completion before returning
- * from mtd_erase(). What an excercise in futility!
+ * from mtd_erase(). What an exercise in futility!
*/
static void logfs_erase_callback(struct erase_info *ei)
{
diff --git a/fs/logfs/dir.c b/fs/logfs/dir.c
index f9ddf0c..9ed89d1 100644
--- a/fs/logfs/dir.c
+++ b/fs/logfs/dir.c
@@ -92,7 +92,7 @@ static int beyond_eof(struct inode *inode, loff_t bix)
* so short names (len <= 9) don't even occupy the complete 32bit name
* space. A prime >256 ensures short names quickly spread the 32bit
* name space. Add about 26 for the estimated amount of information
- * of each character and pick a prime nearby, preferrably a bit-sparse
+ * of each character and pick a prime nearby, preferably a bit-sparse
* one.
*/
static u32 hash_32(const char *s, int len, u32 seed)
diff --git a/fs/logfs/file.c b/fs/logfs/file.c
index e86376b..c2ad702 100644
--- a/fs/logfs/file.c
+++ b/fs/logfs/file.c
@@ -196,7 +196,7 @@ long logfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (IS_RDONLY(inode))
return -EROFS;
- if (!is_owner_or_cap(inode))
+ if (!inode_owner_or_capable(inode))
return -EACCES;
err = get_user(flags, (int __user *)arg);
diff --git a/fs/logfs/inode.c b/fs/logfs/inode.c
index 03b8c24..edfea7a 100644
--- a/fs/logfs/inode.c
+++ b/fs/logfs/inode.c
@@ -293,7 +293,7 @@ static int logfs_write_inode(struct inode *inode, struct writeback_control *wbc)
return ret;
}
-/* called with inode_lock held */
+/* called with inode->i_lock held */
static int logfs_drop_inode(struct inode *inode)
{
struct logfs_super *super = logfs_super(inode->i_sb);
diff --git a/fs/logfs/readwrite.c b/fs/logfs/readwrite.c
index 3dcb3a6..d8d0938 100644
--- a/fs/logfs/readwrite.c
+++ b/fs/logfs/readwrite.c
@@ -1616,7 +1616,7 @@ int logfs_rewrite_block(struct inode *inode, u64 bix, u64 ofs,
err = logfs_write_buf(inode, page, flags);
if (!err && shrink_level(gc_level) == 0) {
/* Rewrite cannot mark the inode dirty but has to
- * write it immediatly.
+ * write it immediately.
* Q: Can't we just create an alias for the inode
* instead? And if not, why not?
*/
diff --git a/fs/mbcache.c b/fs/mbcache.c
index a25444ab..2f174be 100644
--- a/fs/mbcache.c
+++ b/fs/mbcache.c
@@ -542,7 +542,7 @@ __mb_cache_entry_find(struct list_head *l, struct list_head *head,
* mb_cache_entry_find_first()
*
* Find the first cache entry on a given device with a certain key in
- * an additional index. Additonal matches can be found with
+ * an additional index. Additional matches can be found with
* mb_cache_entry_find_next(). Returns NULL if no match was found. The
* returned cache entry is locked for shared access ("multiple readers").
*
diff --git a/fs/minix/Kconfig b/fs/minix/Kconfig
index 0fd7ca9..6624684 100644
--- a/fs/minix/Kconfig
+++ b/fs/minix/Kconfig
@@ -15,3 +15,11 @@ config MINIX_FS
module will be called minix. Note that the file system of your root
partition (the one containing the directory /) cannot be compiled as
a module.
+
+config MINIX_FS_NATIVE_ENDIAN
+ def_bool MINIX_FS
+ depends on H8300 || M32R || MICROBLAZE || MIPS || S390 || SUPERH || SPARC || XTENSA || (M68K && !MMU)
+
+config MINIX_FS_BIG_ENDIAN_16BIT_INDEXED
+ def_bool MINIX_FS
+ depends on M68K && MMU
diff --git a/fs/minix/inode.c b/fs/minix/inode.c
index ae0b83f..adcdc0a 100644
--- a/fs/minix/inode.c
+++ b/fs/minix/inode.c
@@ -399,7 +399,6 @@ static sector_t minix_bmap(struct address_space *mapping, sector_t block)
static const struct address_space_operations minix_aops = {
.readpage = minix_readpage,
.writepage = minix_writepage,
- .sync_page = block_sync_page,
.write_begin = minix_write_begin,
.write_end = generic_write_end,
.bmap = minix_bmap
diff --git a/fs/minix/minix.h b/fs/minix/minix.h
index 407b1c8..341e212 100644
--- a/fs/minix/minix.h
+++ b/fs/minix/minix.h
@@ -88,4 +88,78 @@ static inline struct minix_inode_info *minix_i(struct inode *inode)
return list_entry(inode, struct minix_inode_info, vfs_inode);
}
+#if defined(CONFIG_MINIX_FS_NATIVE_ENDIAN) && \
+ defined(CONFIG_MINIX_FS_BIG_ENDIAN_16BIT_INDEXED)
+
+#error Minix file system byte order broken
+
+#elif defined(CONFIG_MINIX_FS_NATIVE_ENDIAN)
+
+/*
+ * big-endian 32 or 64 bit indexed bitmaps on big-endian system or
+ * little-endian bitmaps on little-endian system
+ */
+
+#define minix_test_and_set_bit(nr, addr) \
+ __test_and_set_bit((nr), (unsigned long *)(addr))
+#define minix_set_bit(nr, addr) \
+ __set_bit((nr), (unsigned long *)(addr))
+#define minix_test_and_clear_bit(nr, addr) \
+ __test_and_clear_bit((nr), (unsigned long *)(addr))
+#define minix_test_bit(nr, addr) \
+ test_bit((nr), (unsigned long *)(addr))
+#define minix_find_first_zero_bit(addr, size) \
+ find_first_zero_bit((unsigned long *)(addr), (size))
+
+#elif defined(CONFIG_MINIX_FS_BIG_ENDIAN_16BIT_INDEXED)
+
+/*
+ * big-endian 16bit indexed bitmaps
+ */
+
+static inline int minix_find_first_zero_bit(const void *vaddr, unsigned size)
+{
+ const unsigned short *p = vaddr, *addr = vaddr;
+ unsigned short num;
+
+ if (!size)
+ return 0;
+
+ size = (size >> 4) + ((size & 15) > 0);
+ while (*p++ == 0xffff) {
+ if (--size == 0)
+ return (p - addr) << 4;
+ }
+
+ num = *--p;
+ return ((p - addr) << 4) + ffz(num);
+}
+
+#define minix_test_and_set_bit(nr, addr) \
+ __test_and_set_bit((nr) ^ 16, (unsigned long *)(addr))
+#define minix_set_bit(nr, addr) \
+ __set_bit((nr) ^ 16, (unsigned long *)(addr))
+#define minix_test_and_clear_bit(nr, addr) \
+ __test_and_clear_bit((nr) ^ 16, (unsigned long *)(addr))
+
+static inline int minix_test_bit(int nr, const void *vaddr)
+{
+ const unsigned short *p = vaddr;
+ return (p[nr >> 4] & (1U << (nr & 15))) != 0;
+}
+
+#else
+
+/*
+ * little-endian bitmaps
+ */
+
+#define minix_test_and_set_bit __test_and_set_bit_le
+#define minix_set_bit __set_bit_le
+#define minix_test_and_clear_bit __test_and_clear_bit_le
+#define minix_test_bit test_bit_le
+#define minix_find_first_zero_bit find_first_zero_bit_le
+
+#endif
+
#endif /* FS_MINIX_H */
diff --git a/fs/mpage.c b/fs/mpage.c
index d78455a..0afc809 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -364,6 +364,9 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages,
sector_t last_block_in_bio = 0;
struct buffer_head map_bh;
unsigned long first_logical_block = 0;
+ struct blk_plug plug;
+
+ blk_start_plug(&plug);
map_bh.b_state = 0;
map_bh.b_size = 0;
@@ -385,6 +388,7 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages,
BUG_ON(!list_empty(pages));
if (bio)
mpage_bio_submit(READ, bio);
+ blk_finish_plug(&plug);
return 0;
}
EXPORT_SYMBOL(mpage_readpages);
@@ -666,8 +670,11 @@ int
mpage_writepages(struct address_space *mapping,
struct writeback_control *wbc, get_block_t get_block)
{
+ struct blk_plug plug;
int ret;
+ blk_start_plug(&plug);
+
if (!get_block)
ret = generic_writepages(mapping, wbc);
else {
@@ -682,6 +689,7 @@ mpage_writepages(struct address_space *mapping,
if (mpd.bio)
mpage_bio_submit(WRITE, mpd.bio);
}
+ blk_finish_plug(&plug);
return ret;
}
EXPORT_SYMBOL(mpage_writepages);
diff --git a/fs/namei.c b/fs/namei.c
index 5a9a6c3..54fc993 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -70,7 +70,7 @@
* name indicated by the symlink. The old code always complained that the
* name already exists, due to not following the symlink even if its target
* is nonexistent. The new semantics affects also mknod() and link() when
- * the name is a symlink pointing to a non-existant name.
+ * the name is a symlink pointing to a non-existent name.
*
* I don't know which semantics is the right one, since I have no access
* to standards. But I found by trial that HP-UX 9.0 has the full "new"
@@ -183,6 +183,9 @@ static int acl_permission_check(struct inode *inode, int mask, unsigned int flag
mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
+ if (current_user_ns() != inode_userns(inode))
+ goto other_perms;
+
if (current_fsuid() == inode->i_uid)
mode >>= 6;
else {
@@ -196,6 +199,7 @@ static int acl_permission_check(struct inode *inode, int mask, unsigned int flag
mode >>= 3;
}
+other_perms:
/*
* If the DACs are ok we don't need any capability check.
*/
@@ -237,7 +241,7 @@ int generic_permission(struct inode *inode, int mask, unsigned int flags,
* Executable DACs are overridable if at least one exec bit is set.
*/
if (!(mask & MAY_EXEC) || execute_ok(inode))
- if (capable(CAP_DAC_OVERRIDE))
+ if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
return 0;
/*
@@ -245,7 +249,7 @@ int generic_permission(struct inode *inode, int mask, unsigned int flags,
*/
mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE)))
- if (capable(CAP_DAC_READ_SEARCH))
+ if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
return 0;
return -EACCES;
@@ -654,6 +658,7 @@ static inline int handle_reval_path(struct nameidata *nd)
static inline int exec_permission(struct inode *inode, unsigned int flags)
{
int ret;
+ struct user_namespace *ns = inode_userns(inode);
if (inode->i_op->permission) {
ret = inode->i_op->permission(inode, MAY_EXEC, flags);
@@ -666,7 +671,8 @@ static inline int exec_permission(struct inode *inode, unsigned int flags)
if (ret == -ECHILD)
return ret;
- if (capable(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH))
+ if (ns_capable(ns, CAP_DAC_OVERRIDE) ||
+ ns_capable(ns, CAP_DAC_READ_SEARCH))
goto ok;
return ret;
@@ -691,6 +697,7 @@ static __always_inline void set_root_rcu(struct nameidata *nd)
do {
seq = read_seqcount_begin(&fs->seq);
nd->root = fs->root;
+ nd->seq = __read_seqcount_begin(&nd->root.dentry->d_seq);
} while (read_seqcount_retry(&fs->seq, seq));
}
}
@@ -986,6 +993,12 @@ int follow_down_one(struct path *path)
return 0;
}
+static inline bool managed_dentry_might_block(struct dentry *dentry)
+{
+ return (dentry->d_flags & DCACHE_MANAGE_TRANSIT &&
+ dentry->d_op->d_manage(dentry, true) < 0);
+}
+
/*
* Skip to top of mountpoint pile in rcuwalk mode. We abort the rcu-walk if we
* meet a managed dentry and we're not walking to "..". True is returned to
@@ -994,19 +1007,26 @@ int follow_down_one(struct path *path)
static bool __follow_mount_rcu(struct nameidata *nd, struct path *path,
struct inode **inode, bool reverse_transit)
{
- while (d_mountpoint(path->dentry)) {
+ for (;;) {
struct vfsmount *mounted;
- if (unlikely(path->dentry->d_flags & DCACHE_MANAGE_TRANSIT) &&
- !reverse_transit &&
- path->dentry->d_op->d_manage(path->dentry, true) < 0)
+ /*
+ * Don't forget we might have a non-mountpoint managed dentry
+ * that wants to block transit.
+ */
+ *inode = path->dentry->d_inode;
+ if (!reverse_transit &&
+ unlikely(managed_dentry_might_block(path->dentry)))
return false;
+
+ if (!d_mountpoint(path->dentry))
+ break;
+
mounted = __lookup_mnt(path->mnt, path->dentry, 1);
if (!mounted)
break;
path->mnt = mounted;
path->dentry = mounted->mnt_root;
nd->seq = read_seqcount_begin(&path->dentry->d_seq);
- *inode = path->dentry->d_inode;
}
if (unlikely(path->dentry->d_flags & DCACHE_NEED_AUTOMOUNT))
@@ -1644,13 +1664,16 @@ static int path_lookupat(int dfd, const char *name,
err = -ECHILD;
}
- if (!err)
+ if (!err) {
err = handle_reval_path(nd);
+ if (err)
+ path_put(&nd->path);
+ }
if (!err && nd->flags & LOOKUP_DIRECTORY) {
if (!nd->inode->i_op->lookup) {
path_put(&nd->path);
- return -ENOTDIR;
+ err = -ENOTDIR;
}
}
@@ -1842,11 +1865,15 @@ static inline int check_sticky(struct inode *dir, struct inode *inode)
if (!(dir->i_mode & S_ISVTX))
return 0;
+ if (current_user_ns() != inode_userns(inode))
+ goto other_userns;
if (inode->i_uid == fsuid)
return 0;
if (dir->i_uid == fsuid)
return 0;
- return !capable(CAP_FOWNER);
+
+other_userns:
+ return !ns_capable(inode_userns(inode), CAP_FOWNER);
}
/*
@@ -2026,7 +2053,7 @@ static int may_open(struct path *path, int acc_mode, int flag)
}
/* O_NOATIME can only be set by the owner or superuser */
- if (flag & O_NOATIME && !is_owner_or_cap(inode))
+ if (flag & O_NOATIME && !inode_owner_or_capable(inode))
return -EPERM;
/*
@@ -2440,7 +2467,8 @@ int vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
if (error)
return error;
- if ((S_ISCHR(mode) || S_ISBLK(mode)) && !capable(CAP_MKNOD))
+ if ((S_ISCHR(mode) || S_ISBLK(mode)) &&
+ !ns_capable(inode_userns(dir), CAP_MKNOD))
return -EPERM;
if (!dir->i_op->mknod)
diff --git a/fs/namespace.c b/fs/namespace.c
index 9263995..d99bcf5 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1030,18 +1030,6 @@ const struct seq_operations mounts_op = {
.show = show_vfsmnt
};
-static int uuid_is_nil(u8 *uuid)
-{
- int i;
- u8 *cp = (u8 *)uuid;
-
- for (i = 0; i < 16; i++) {
- if (*cp++)
- return 0;
- }
- return 1;
-}
-
static int show_mountinfo(struct seq_file *m, void *v)
{
struct proc_mounts *p = m->private;
@@ -1085,10 +1073,6 @@ static int show_mountinfo(struct seq_file *m, void *v)
if (IS_MNT_UNBINDABLE(mnt))
seq_puts(m, " unbindable");
- if (!uuid_is_nil(mnt->mnt_sb->s_uuid))
- /* print the uuid */
- seq_printf(m, " uuid:%pU", mnt->mnt_sb->s_uuid);
-
/* Filesystem specific data */
seq_puts(m, " - ");
show_type(m, sb);
@@ -2701,7 +2685,7 @@ void __init mnt_init(void)
if (!mount_hashtable)
panic("Failed to allocate mount hash table\n");
- printk("Mount-cache hash table entries: %lu\n", HASH_SIZE);
+ printk(KERN_INFO "Mount-cache hash table entries: %lu\n", HASH_SIZE);
for (u = 0; u < HASH_SIZE; u++)
INIT_LIST_HEAD(&mount_hashtable[u]);
diff --git a/fs/ncpfs/Makefile b/fs/ncpfs/Makefile
index 68ea095..c66af56 100644
--- a/fs/ncpfs/Makefile
+++ b/fs/ncpfs/Makefile
@@ -11,6 +11,6 @@ ncpfs-$(CONFIG_NCPFS_EXTRAS) += symlink.o
ncpfs-$(CONFIG_NCPFS_NFS_NS) += symlink.o
# If you want debugging output, please uncomment the following line
-# EXTRA_CFLAGS += -DDEBUG_NCP=1
+# ccflags-y := -DDEBUG_NCP=1
CFLAGS_ncplib_kernel.o := -finline-functions
diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
index 00a1d1c..0250e4c 100644
--- a/fs/ncpfs/inode.c
+++ b/fs/ncpfs/inode.c
@@ -596,7 +596,7 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
/* server->priv.data = NULL; */
server->m = data;
- /* Althought anything producing this is buggy, it happens
+ /* Although anything producing this is buggy, it happens
now because of PATH_MAX changes.. */
if (server->m.time_out < 1) {
server->m.time_out = 10;
diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
index 14e0f93..00ecf62 100644
--- a/fs/nfs/callback_xdr.c
+++ b/fs/nfs/callback_xdr.c
@@ -241,7 +241,7 @@ static __be32 decode_layoutrecall_args(struct svc_rqst *rqstp,
args->cbl_layout_type = ntohl(*p++);
/* Depite the spec's xdr, iomode really belongs in the FILE switch,
- * as it is unuseable and ignored with the other types.
+ * as it is unusable and ignored with the other types.
*/
iomode = ntohl(*p++);
args->cbl_layoutchanged = ntohl(*p++);
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index abdf38d..7237672 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -44,6 +44,7 @@
/* #define NFS_DEBUG_VERBOSE 1 */
static int nfs_opendir(struct inode *, struct file *);
+static int nfs_closedir(struct inode *, struct file *);
static int nfs_readdir(struct file *, void *, filldir_t);
static struct dentry *nfs_lookup(struct inode *, struct dentry *, struct nameidata *);
static int nfs_create(struct inode *, struct dentry *, int, struct nameidata *);
@@ -64,7 +65,7 @@ const struct file_operations nfs_dir_operations = {
.read = generic_read_dir,
.readdir = nfs_readdir,
.open = nfs_opendir,
- .release = nfs_release,
+ .release = nfs_closedir,
.fsync = nfs_fsync_dir,
};
@@ -133,13 +134,35 @@ const struct inode_operations nfs4_dir_inode_operations = {
#endif /* CONFIG_NFS_V4 */
+static struct nfs_open_dir_context *alloc_nfs_open_dir_context(struct rpc_cred *cred)
+{
+ struct nfs_open_dir_context *ctx;
+ ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
+ if (ctx != NULL) {
+ ctx->duped = 0;
+ ctx->dir_cookie = 0;
+ ctx->dup_cookie = 0;
+ ctx->cred = get_rpccred(cred);
+ } else
+ ctx = ERR_PTR(-ENOMEM);
+ return ctx;
+}
+
+static void put_nfs_open_dir_context(struct nfs_open_dir_context *ctx)
+{
+ put_rpccred(ctx->cred);
+ kfree(ctx);
+}
+
/*
* Open file
*/
static int
nfs_opendir(struct inode *inode, struct file *filp)
{
- int res;
+ int res = 0;
+ struct nfs_open_dir_context *ctx;
+ struct rpc_cred *cred;
dfprintk(FILE, "NFS: open dir(%s/%s)\n",
filp->f_path.dentry->d_parent->d_name.name,
@@ -147,8 +170,15 @@ nfs_opendir(struct inode *inode, struct file *filp)
nfs_inc_stats(inode, NFSIOS_VFSOPEN);
- /* Call generic open code in order to cache credentials */
- res = nfs_open(inode, filp);
+ cred = rpc_lookup_cred();
+ if (IS_ERR(cred))
+ return PTR_ERR(cred);
+ ctx = alloc_nfs_open_dir_context(cred);
+ if (IS_ERR(ctx)) {
+ res = PTR_ERR(ctx);
+ goto out;
+ }
+ filp->private_data = ctx;
if (filp->f_path.dentry == filp->f_path.mnt->mnt_root) {
/* This is a mountpoint, so d_revalidate will never
* have been called, so we need to refresh the
@@ -156,9 +186,18 @@ nfs_opendir(struct inode *inode, struct file *filp)
*/
__nfs_revalidate_inode(NFS_SERVER(inode), inode);
}
+out:
+ put_rpccred(cred);
return res;
}
+static int
+nfs_closedir(struct inode *inode, struct file *filp)
+{
+ put_nfs_open_dir_context(filp->private_data);
+ return 0;
+}
+
struct nfs_cache_array_entry {
u64 cookie;
u64 ino;
@@ -284,19 +323,20 @@ int nfs_readdir_search_for_pos(struct nfs_cache_array *array, nfs_readdir_descri
{
loff_t diff = desc->file->f_pos - desc->current_index;
unsigned int index;
+ struct nfs_open_dir_context *ctx = desc->file->private_data;
if (diff < 0)
goto out_eof;
if (diff >= array->size) {
if (array->eof_index >= 0)
goto out_eof;
- desc->current_index += array->size;
return -EAGAIN;
}
index = (unsigned int)diff;
*desc->dir_cookie = array->array[index].cookie;
desc->cache_entry_index = index;
+ ctx->duped = 0;
return 0;
out_eof:
desc->eof = 1;
@@ -307,10 +347,18 @@ static
int nfs_readdir_search_for_cookie(struct nfs_cache_array *array, nfs_readdir_descriptor_t *desc)
{
int i;
+ loff_t new_pos;
int status = -EAGAIN;
+ struct nfs_open_dir_context *ctx = desc->file->private_data;
for (i = 0; i < array->size; i++) {
if (array->array[i].cookie == *desc->dir_cookie) {
+ new_pos = desc->current_index + i;
+ if (new_pos < desc->file->f_pos) {
+ ctx->dup_cookie = *desc->dir_cookie;
+ ctx->duped = 1;
+ }
+ desc->file->f_pos = new_pos;
desc->cache_entry_index = i;
return 0;
}
@@ -342,6 +390,7 @@ int nfs_readdir_search_array(nfs_readdir_descriptor_t *desc)
if (status == -EAGAIN) {
desc->last_cookie = array->last_cookie;
+ desc->current_index += array->size;
desc->page_index++;
}
nfs_readdir_release_array(desc->page);
@@ -354,7 +403,8 @@ static
int nfs_readdir_xdr_filler(struct page **pages, nfs_readdir_descriptor_t *desc,
struct nfs_entry *entry, struct file *file, struct inode *inode)
{
- struct rpc_cred *cred = nfs_file_cred(file);
+ struct nfs_open_dir_context *ctx = file->private_data;
+ struct rpc_cred *cred = ctx->cred;
unsigned long timestamp, gencount;
int error;
@@ -693,6 +743,20 @@ int nfs_do_filldir(nfs_readdir_descriptor_t *desc, void *dirent,
int i = 0;
int res = 0;
struct nfs_cache_array *array = NULL;
+ struct nfs_open_dir_context *ctx = file->private_data;
+
+ if (ctx->duped != 0 && ctx->dup_cookie == *desc->dir_cookie) {
+ if (printk_ratelimit()) {
+ pr_notice("NFS: directory %s/%s contains a readdir loop. "
+ "Please contact your server vendor. "
+ "Offending cookie: %llu\n",
+ file->f_dentry->d_parent->d_name.name,
+ file->f_dentry->d_name.name,
+ *desc->dir_cookie);
+ }
+ res = -ELOOP;
+ goto out;
+ }
array = nfs_readdir_get_array(desc->page);
if (IS_ERR(array)) {
@@ -785,6 +849,7 @@ static int nfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
struct inode *inode = dentry->d_inode;
nfs_readdir_descriptor_t my_desc,
*desc = &my_desc;
+ struct nfs_open_dir_context *dir_ctx = filp->private_data;
int res;
dfprintk(FILE, "NFS: readdir(%s/%s) starting at cookie %llu\n",
@@ -801,7 +866,7 @@ static int nfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
memset(desc, 0, sizeof(*desc));
desc->file = filp;
- desc->dir_cookie = &nfs_file_open_context(filp)->dir_cookie;
+ desc->dir_cookie = &dir_ctx->dir_cookie;
desc->decode = NFS_PROTO(inode)->decode_dirent;
desc->plus = NFS_USE_READDIRPLUS(inode);
@@ -853,6 +918,7 @@ static loff_t nfs_llseek_dir(struct file *filp, loff_t offset, int origin)
{
struct dentry *dentry = filp->f_path.dentry;
struct inode *inode = dentry->d_inode;
+ struct nfs_open_dir_context *dir_ctx = filp->private_data;
dfprintk(FILE, "NFS: llseek dir(%s/%s, %lld, %d)\n",
dentry->d_parent->d_name.name,
@@ -872,7 +938,8 @@ static loff_t nfs_llseek_dir(struct file *filp, loff_t offset, int origin)
}
if (offset != filp->f_pos) {
filp->f_pos = offset;
- nfs_file_open_context(filp)->dir_cookie = 0;
+ dir_ctx->dir_cookie = 0;
+ dir_ctx->duped = 0;
}
out:
mutex_unlock(&inode->i_mutex);
@@ -1068,7 +1135,7 @@ static int nfs_lookup_revalidate(struct dentry *dentry, struct nameidata *nd)
if (fhandle == NULL || fattr == NULL)
goto out_error;
- error = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, fhandle, fattr);
+ error = NFS_PROTO(dir)->lookup(NFS_SERVER(dir)->client, dir, &dentry->d_name, fhandle, fattr);
if (error)
goto out_bad;
if (nfs_compare_fh(NFS_FH(inode), fhandle))
@@ -1224,7 +1291,7 @@ static struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, stru
parent = dentry->d_parent;
/* Protect against concurrent sillydeletes */
nfs_block_sillyrename(parent);
- error = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, fhandle, fattr);
+ error = NFS_PROTO(dir)->lookup(NFS_SERVER(dir)->client, dir, &dentry->d_name, fhandle, fattr);
if (error == -ENOENT)
goto no_entry;
if (error < 0) {
@@ -1562,7 +1629,7 @@ int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fhandle,
if (dentry->d_inode)
goto out;
if (fhandle->size == 0) {
- error = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, fhandle, fattr);
+ error = NFS_PROTO(dir)->lookup(NFS_SERVER(dir)->client, dir, &dentry->d_name, fhandle, fattr);
if (error)
goto out_error;
}
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index d85a534..2f093ed 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -301,7 +301,7 @@ nfs_file_mmap(struct file * file, struct vm_area_struct * vma)
* disk, but it retrieves and clears ctx->error after synching, despite
* the two being set at the same time in nfs_context_set_write_error().
* This is because the former is used to notify the _next_ call to
- * nfs_file_write() that a write error occured, and hence cause it to
+ * nfs_file_write() that a write error occurred, and hence cause it to
* fall back to doing a synchronous write.
*/
static int
@@ -326,6 +326,9 @@ nfs_file_fsync(struct file *file, int datasync)
ret = xchg(&ctx->error, 0);
if (!ret && status < 0)
ret = status;
+ if (!ret && !datasync)
+ /* application has asked for meta-data sync */
+ ret = pnfs_layoutcommit_inode(inode, true);
return ret;
}
diff --git a/fs/nfs/getroot.c b/fs/nfs/getroot.c
index 1084792..dcb6154 100644
--- a/fs/nfs/getroot.c
+++ b/fs/nfs/getroot.c
@@ -222,6 +222,10 @@ struct dentry *nfs4_get_root(struct super_block *sb, struct nfs_fh *mntfh,
goto out;
}
+ if (fattr->valid & NFS_ATTR_FATTR_FSID &&
+ !nfs_fsid_equal(&server->fsid, &fattr->fsid))
+ memcpy(&server->fsid, &fattr->fsid, sizeof(server->fsid));
+
inode = nfs_fhget(sb, mntfh, fattr);
if (IS_ERR(inode)) {
dprintk("nfs_get_root: get root inode failed\n");
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 01768e5..57bb31a 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -254,7 +254,9 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
struct inode *inode = ERR_PTR(-ENOENT);
unsigned long hash;
- if ((fattr->valid & NFS_ATTR_FATTR_FILEID) == 0)
+ nfs_attr_check_mountpoint(sb, fattr);
+
+ if ((fattr->valid & NFS_ATTR_FATTR_FILEID) == 0 && (fattr->valid & NFS_ATTR_FATTR_MOUNTPOINT) == 0)
goto out_no_inode;
if ((fattr->valid & NFS_ATTR_FATTR_TYPE) == 0)
goto out_no_inode;
@@ -298,8 +300,8 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
if (nfs_server_capable(inode, NFS_CAP_READDIRPLUS))
set_bit(NFS_INO_ADVISE_RDPLUS, &NFS_I(inode)->flags);
/* Deal with crossing mountpoints */
- if ((fattr->valid & NFS_ATTR_FATTR_FSID)
- && !nfs_fsid_equal(&NFS_SB(sb)->fsid, &fattr->fsid)) {
+ if (fattr->valid & NFS_ATTR_FATTR_MOUNTPOINT ||
+ fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL) {
if (fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL)
inode->i_op = &nfs_referral_inode_operations;
else
@@ -639,7 +641,6 @@ struct nfs_open_context *alloc_nfs_open_context(struct path *path, struct rpc_cr
ctx->mode = f_mode;
ctx->flags = 0;
ctx->error = 0;
- ctx->dir_cookie = 0;
nfs_init_lock_context(&ctx->lock_context);
ctx->lock_context.open_context = ctx;
INIT_LIST_HEAD(&ctx->list);
@@ -1471,6 +1472,7 @@ static inline void nfs4_init_once(struct nfs_inode *nfsi)
nfsi->delegation_state = 0;
init_rwsem(&nfsi->rwsem);
nfsi->layout = NULL;
+ atomic_set(&nfsi->commits_outstanding, 0);
#endif
}
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 72e0bdd..ce118ce 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -39,6 +39,12 @@ static inline int nfs4_has_persistent_session(const struct nfs_client *clp)
return 0;
}
+static inline void nfs_attr_check_mountpoint(struct super_block *parent, struct nfs_fattr *fattr)
+{
+ if (!nfs_fsid_equal(&NFS_SB(parent)->fsid, &fattr->fsid))
+ fattr->valid |= NFS_ATTR_FATTR_MOUNTPOINT;
+}
+
struct nfs_clone_mount {
const struct super_block *sb;
const struct dentry *dentry;
@@ -214,6 +220,7 @@ extern const u32 nfs41_maxwrite_overhead;
/* nfs4proc.c */
#ifdef CONFIG_NFS_V4
extern struct rpc_procinfo nfs4_procedures[];
+void nfs_fixup_secinfo_attributes(struct nfs_fattr *, struct nfs_fh *);
#endif
extern int nfs4_init_ds_session(struct nfs_client *clp);
@@ -276,11 +283,25 @@ extern int nfs_initiate_read(struct nfs_read_data *data, struct rpc_clnt *clnt,
extern void nfs_read_prepare(struct rpc_task *task, void *calldata);
/* write.c */
+extern void nfs_commit_free(struct nfs_write_data *p);
extern int nfs_initiate_write(struct nfs_write_data *data,
struct rpc_clnt *clnt,
const struct rpc_call_ops *call_ops,
int how);
extern void nfs_write_prepare(struct rpc_task *task, void *calldata);
+extern int nfs_initiate_commit(struct nfs_write_data *data,
+ struct rpc_clnt *clnt,
+ const struct rpc_call_ops *call_ops,
+ int how);
+extern void nfs_init_commit(struct nfs_write_data *data,
+ struct list_head *head,
+ struct pnfs_layout_segment *lseg);
+void nfs_retry_commit(struct list_head *page_list,
+ struct pnfs_layout_segment *lseg);
+void nfs_commit_clear_lock(struct nfs_inode *nfsi);
+void nfs_commitdata_release(void *data);
+void nfs_commit_release_pages(struct nfs_write_data *data);
+
#ifdef CONFIG_MIGRATION
extern int nfs_migrate_page(struct address_space *,
struct page *, struct page *);
@@ -296,12 +317,14 @@ extern int nfs4_init_client(struct nfs_client *clp,
rpc_authflavor_t authflavour,
int noresvport);
extern void nfs4_reset_write(struct rpc_task *task, struct nfs_write_data *data);
-extern int _nfs4_call_sync(struct nfs_server *server,
+extern int _nfs4_call_sync(struct rpc_clnt *clnt,
+ struct nfs_server *server,
struct rpc_message *msg,
struct nfs4_sequence_args *args,
struct nfs4_sequence_res *res,
int cache_reply);
-extern int _nfs4_call_sync_session(struct nfs_server *server,
+extern int _nfs4_call_sync_session(struct rpc_clnt *clnt,
+ struct nfs_server *server,
struct rpc_message *msg,
struct nfs4_sequence_args *args,
struct nfs4_sequence_res *res,
diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
index c0b8344..89fc160 100644
--- a/fs/nfs/namespace.c
+++ b/fs/nfs/namespace.c
@@ -15,6 +15,7 @@
#include <linux/string.h>
#include <linux/sunrpc/clnt.h>
#include <linux/vfs.h>
+#include <linux/sunrpc/gss_api.h>
#include "internal.h"
#define NFSDBG_FACILITY NFSDBG_VFS
@@ -27,7 +28,8 @@ int nfs_mountpoint_expiry_timeout = 500 * HZ;
static struct vfsmount *nfs_do_submount(struct dentry *dentry,
struct nfs_fh *fh,
- struct nfs_fattr *fattr);
+ struct nfs_fattr *fattr,
+ rpc_authflavor_t authflavor);
/*
* nfs_path - reconstruct the path given an arbitrary dentry
@@ -98,7 +100,7 @@ rename_retry:
namelen--;
buflen -= namelen;
if (buflen < 0) {
- spin_lock(&dentry->d_lock);
+ spin_unlock(&dentry->d_lock);
rcu_read_unlock();
goto Elong;
}
@@ -108,7 +110,7 @@ rename_retry:
rcu_read_unlock();
return end;
Elong_unlock:
- spin_lock(&dentry->d_lock);
+ spin_unlock(&dentry->d_lock);
rcu_read_unlock();
if (read_seqretry(&rename_lock, seq))
goto rename_retry;
@@ -116,6 +118,99 @@ Elong:
return ERR_PTR(-ENAMETOOLONG);
}
+#ifdef CONFIG_NFS_V4
+static rpc_authflavor_t nfs_find_best_sec(struct nfs4_secinfo_flavors *flavors, struct inode *inode)
+{
+ struct gss_api_mech *mech;
+ struct xdr_netobj oid;
+ int i;
+ rpc_authflavor_t pseudoflavor = RPC_AUTH_UNIX;
+
+ for (i = 0; i < flavors->num_flavors; i++) {
+ struct nfs4_secinfo_flavor *flavor;
+ flavor = &flavors->flavors[i];
+
+ if (flavor->flavor == RPC_AUTH_NULL || flavor->flavor == RPC_AUTH_UNIX) {
+ pseudoflavor = flavor->flavor;
+ break;
+ } else if (flavor->flavor == RPC_AUTH_GSS) {
+ oid.len = flavor->gss.sec_oid4.len;
+ oid.data = flavor->gss.sec_oid4.data;
+ mech = gss_mech_get_by_OID(&oid);
+ if (!mech)
+ continue;
+ pseudoflavor = gss_svc_to_pseudoflavor(mech, flavor->gss.service);
+ gss_mech_put(mech);
+ break;
+ }
+ }
+
+ return pseudoflavor;
+}
+
+static int nfs_negotiate_security(const struct dentry *parent,
+ const struct dentry *dentry,
+ rpc_authflavor_t *flavor)
+{
+ struct page *page;
+ struct nfs4_secinfo_flavors *flavors;
+ int (*secinfo)(struct inode *, const struct qstr *, struct nfs4_secinfo_flavors *);
+ int ret = -EPERM;
+
+ secinfo = NFS_PROTO(parent->d_inode)->secinfo;
+ if (secinfo != NULL) {
+ page = alloc_page(GFP_KERNEL);
+ if (!page) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ flavors = page_address(page);
+ ret = secinfo(parent->d_inode, &dentry->d_name, flavors);
+ *flavor = nfs_find_best_sec(flavors, dentry->d_inode);
+ put_page(page);
+ }
+
+out:
+ return ret;
+}
+
+static int nfs_lookup_with_sec(struct nfs_server *server, struct dentry *parent,
+ struct dentry *dentry, struct path *path,
+ struct nfs_fh *fh, struct nfs_fattr *fattr,
+ rpc_authflavor_t *flavor)
+{
+ struct rpc_clnt *clone;
+ struct rpc_auth *auth;
+ int err;
+
+ err = nfs_negotiate_security(parent, path->dentry, flavor);
+ if (err < 0)
+ goto out;
+ clone = rpc_clone_client(server->client);
+ auth = rpcauth_create(*flavor, clone);
+ if (!auth) {
+ err = -EIO;
+ goto out_shutdown;
+ }
+ err = server->nfs_client->rpc_ops->lookup(clone, parent->d_inode,
+ &path->dentry->d_name,
+ fh, fattr);
+out_shutdown:
+ rpc_shutdown_client(clone);
+out:
+ return err;
+}
+#else /* CONFIG_NFS_V4 */
+static inline int nfs_lookup_with_sec(struct nfs_server *server,
+ struct dentry *parent, struct dentry *dentry,
+ struct path *path, struct nfs_fh *fh,
+ struct nfs_fattr *fattr,
+ rpc_authflavor_t *flavor)
+{
+ return -EPERM;
+}
+#endif /* CONFIG_NFS_V4 */
+
/*
* nfs_d_automount - Handle crossing a mountpoint on the server
* @path - The mountpoint
@@ -136,6 +231,7 @@ struct vfsmount *nfs_d_automount(struct path *path)
struct nfs_fh *fh = NULL;
struct nfs_fattr *fattr = NULL;
int err;
+ rpc_authflavor_t flavor = RPC_AUTH_UNIX;
dprintk("--> nfs_d_automount()\n");
@@ -153,9 +249,11 @@ struct vfsmount *nfs_d_automount(struct path *path)
/* Look it up again to get its attributes */
parent = dget_parent(path->dentry);
- err = server->nfs_client->rpc_ops->lookup(parent->d_inode,
+ err = server->nfs_client->rpc_ops->lookup(server->client, parent->d_inode,
&path->dentry->d_name,
fh, fattr);
+ if (err == -EPERM && NFS_PROTO(parent->d_inode)->secinfo != NULL)
+ err = nfs_lookup_with_sec(server, parent, path->dentry, path, fh, fattr, &flavor);
dput(parent);
if (err != 0) {
mnt = ERR_PTR(err);
@@ -165,7 +263,7 @@ struct vfsmount *nfs_d_automount(struct path *path)
if (fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL)
mnt = nfs_do_refmount(path->dentry);
else
- mnt = nfs_do_submount(path->dentry, fh, fattr);
+ mnt = nfs_do_submount(path->dentry, fh, fattr, flavor);
if (IS_ERR(mnt))
goto out;
@@ -232,17 +330,20 @@ static struct vfsmount *nfs_do_clone_mount(struct nfs_server *server,
* @dentry - parent directory
* @fh - filehandle for new root dentry
* @fattr - attributes for new root inode
+ * @authflavor - security flavor to use when performing the mount
*
*/
static struct vfsmount *nfs_do_submount(struct dentry *dentry,
struct nfs_fh *fh,
- struct nfs_fattr *fattr)
+ struct nfs_fattr *fattr,
+ rpc_authflavor_t authflavor)
{
struct nfs_clone_mount mountdata = {
.sb = dentry->d_sb,
.dentry = dentry,
.fh = fh,
.fattr = fattr,
+ .authflavor = authflavor,
};
struct vfsmount *mnt = ERR_PTR(-ENOMEM);
char *page = (char *) __get_free_page(GFP_USER);
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index d0c80d8..38053d8 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -141,7 +141,7 @@ nfs3_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
}
static int
-nfs3_proc_lookup(struct inode *dir, struct qstr *name,
+nfs3_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, struct qstr *name,
struct nfs_fh *fhandle, struct nfs_fattr *fattr)
{
struct nfs3_diropargs arg = {
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index c64be1c..e1c261d 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -57,7 +57,8 @@ enum nfs4_session_state {
struct nfs4_minor_version_ops {
u32 minor_version;
- int (*call_sync)(struct nfs_server *server,
+ int (*call_sync)(struct rpc_clnt *clnt,
+ struct nfs_server *server,
struct rpc_message *msg,
struct nfs4_sequence_args *args,
struct nfs4_sequence_res *res,
@@ -262,6 +263,8 @@ extern int nfs4_proc_destroy_session(struct nfs4_session *);
extern int nfs4_init_session(struct nfs_server *server);
extern int nfs4_proc_get_lease_time(struct nfs_client *clp,
struct nfs_fsinfo *fsinfo);
+extern int nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data,
+ bool sync);
static inline bool
is_ds_only_client(struct nfs_client *clp)
diff --git a/fs/nfs/nfs4filelayout.c b/fs/nfs/nfs4filelayout.c
index 4285584..6f8192f 100644
--- a/fs/nfs/nfs4filelayout.c
+++ b/fs/nfs/nfs4filelayout.c
@@ -154,6 +154,23 @@ static int filelayout_read_done_cb(struct rpc_task *task,
}
/*
+ * We reference the rpc_cred of the first WRITE that triggers the need for
+ * a LAYOUTCOMMIT, and use it to send the layoutcommit compound.
+ * rfc5661 is not clear about which credential should be used.
+ */
+static void
+filelayout_set_layoutcommit(struct nfs_write_data *wdata)
+{
+ if (FILELAYOUT_LSEG(wdata->lseg)->commit_through_mds ||
+ wdata->res.verf->committed == NFS_FILE_SYNC)
+ return;
+
+ pnfs_set_layoutcommit(wdata);
+ dprintk("%s ionde %lu pls_end_pos %lu\n", __func__, wdata->inode->i_ino,
+ (unsigned long) wdata->lseg->pls_end_pos);
+}
+
+/*
* Call ops for the async read/write cases
* In the case of dense layouts, the offset needs to be reset to its
* original value.
@@ -210,6 +227,38 @@ static int filelayout_write_done_cb(struct rpc_task *task,
return -EAGAIN;
}
+ filelayout_set_layoutcommit(data);
+ return 0;
+}
+
+/* Fake up some data that will cause nfs_commit_release to retry the writes. */
+static void prepare_to_resend_writes(struct nfs_write_data *data)
+{
+ struct nfs_page *first = nfs_list_entry(data->pages.next);
+
+ data->task.tk_status = 0;
+ memcpy(data->verf.verifier, first->wb_verf.verifier,
+ sizeof(first->wb_verf.verifier));
+ data->verf.verifier[0]++; /* ensure verifier mismatch */
+}
+
+static int filelayout_commit_done_cb(struct rpc_task *task,
+ struct nfs_write_data *data)
+{
+ int reset = 0;
+
+ if (filelayout_async_handle_error(task, data->args.context->state,
+ data->ds_clp, &reset) == -EAGAIN) {
+ dprintk("%s calling restart ds_clp %p ds_clp->cl_session %p\n",
+ __func__, data->ds_clp, data->ds_clp->cl_session);
+ if (reset) {
+ prepare_to_resend_writes(data);
+ filelayout_set_lo_fail(data->lseg);
+ } else
+ nfs_restart_rpc(task, data->ds_clp);
+ return -EAGAIN;
+ }
+
return 0;
}
@@ -240,6 +289,16 @@ static void filelayout_write_release(void *data)
wdata->mds_ops->rpc_release(data);
}
+static void filelayout_commit_release(void *data)
+{
+ struct nfs_write_data *wdata = (struct nfs_write_data *)data;
+
+ nfs_commit_release_pages(wdata);
+ if (atomic_dec_and_test(&NFS_I(wdata->inode)->commits_outstanding))
+ nfs_commit_clear_lock(NFS_I(wdata->inode));
+ nfs_commitdata_release(wdata);
+}
+
struct rpc_call_ops filelayout_read_call_ops = {
.rpc_call_prepare = filelayout_read_prepare,
.rpc_call_done = filelayout_read_call_done,
@@ -252,6 +311,12 @@ struct rpc_call_ops filelayout_write_call_ops = {
.rpc_release = filelayout_write_release,
};
+struct rpc_call_ops filelayout_commit_call_ops = {
+ .rpc_call_prepare = filelayout_write_prepare,
+ .rpc_call_done = filelayout_write_call_done,
+ .rpc_release = filelayout_commit_release,
+};
+
static enum pnfs_try_status
filelayout_read_pagelist(struct nfs_read_data *data)
{
@@ -320,10 +385,6 @@ filelayout_write_pagelist(struct nfs_write_data *data, int sync)
data->inode->i_ino, sync, (size_t) data->args.count, offset,
ntohl(ds->ds_ip_addr), ntohs(ds->ds_port));
- /* We can't handle commit to ds yet */
- if (!FILELAYOUT_LSEG(lseg)->commit_through_mds)
- data->args.stable = NFS_FILE_SYNC;
-
data->write_done_cb = filelayout_write_done_cb;
data->ds_clp = ds->ds_clp;
fh = nfs4_fl_select_ds_fh(lseg, j);
@@ -441,12 +502,33 @@ filelayout_decode_layout(struct pnfs_layout_hdr *flo,
struct nfs4_layoutget_res *lgr,
struct nfs4_deviceid *id)
{
- uint32_t *p = (uint32_t *)lgr->layout.buf;
+ struct xdr_stream stream;
+ struct xdr_buf buf = {
+ .pages = lgr->layoutp->pages,
+ .page_len = lgr->layoutp->len,
+ .buflen = lgr->layoutp->len,
+ .len = lgr->layoutp->len,
+ };
+ struct page *scratch;
+ __be32 *p;
uint32_t nfl_util;
int i;
dprintk("%s: set_layout_map Begin\n", __func__);
+ scratch = alloc_page(GFP_KERNEL);
+ if (!scratch)
+ return -ENOMEM;
+
+ xdr_init_decode(&stream, &buf, NULL);
+ xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE);
+
+ /* 20 = ufl_util (4), first_stripe_index (4), pattern_offset (8),
+ * num_fh (4) */
+ p = xdr_inline_decode(&stream, NFS4_DEVICEID4_SIZE + 20);
+ if (unlikely(!p))
+ goto out_err;
+
memcpy(id, p, sizeof(*id));
p += XDR_QUADLEN(NFS4_DEVICEID4_SIZE);
print_deviceid(id);
@@ -468,32 +550,57 @@ filelayout_decode_layout(struct pnfs_layout_hdr *flo,
__func__, nfl_util, fl->num_fh, fl->first_stripe_index,
fl->pattern_offset);
+ if (!fl->num_fh)
+ goto out_err;
+
fl->fh_array = kzalloc(fl->num_fh * sizeof(struct nfs_fh *),
GFP_KERNEL);
if (!fl->fh_array)
- return -ENOMEM;
+ goto out_err;
for (i = 0; i < fl->num_fh; i++) {
/* Do we want to use a mempool here? */
fl->fh_array[i] = kmalloc(sizeof(struct nfs_fh), GFP_KERNEL);
- if (!fl->fh_array[i]) {
- filelayout_free_fh_array(fl);
- return -ENOMEM;
- }
+ if (!fl->fh_array[i])
+ goto out_err_free;
+
+ p = xdr_inline_decode(&stream, 4);
+ if (unlikely(!p))
+ goto out_err_free;
fl->fh_array[i]->size = be32_to_cpup(p++);
if (sizeof(struct nfs_fh) < fl->fh_array[i]->size) {
printk(KERN_ERR "Too big fh %d received %d\n",
i, fl->fh_array[i]->size);
- filelayout_free_fh_array(fl);
- return -EIO;
+ goto out_err_free;
}
+
+ p = xdr_inline_decode(&stream, fl->fh_array[i]->size);
+ if (unlikely(!p))
+ goto out_err_free;
memcpy(fl->fh_array[i]->data, p, fl->fh_array[i]->size);
- p += XDR_QUADLEN(fl->fh_array[i]->size);
dprintk("DEBUG: %s: fh len %d\n", __func__,
fl->fh_array[i]->size);
}
+ __free_page(scratch);
return 0;
+
+out_err_free:
+ filelayout_free_fh_array(fl);
+out_err:
+ __free_page(scratch);
+ return -EIO;
+}
+
+static void
+filelayout_free_lseg(struct pnfs_layout_segment *lseg)
+{
+ struct nfs4_filelayout_segment *fl = FILELAYOUT_LSEG(lseg);
+
+ dprintk("--> %s\n", __func__);
+ nfs4_fl_put_deviceid(fl->dsaddr);
+ kfree(fl->commit_buckets);
+ _filelayout_free_lseg(fl);
}
static struct pnfs_layout_segment *
@@ -514,17 +621,28 @@ filelayout_alloc_lseg(struct pnfs_layout_hdr *layoutid,
_filelayout_free_lseg(fl);
return NULL;
}
- return &fl->generic_hdr;
-}
-static void
-filelayout_free_lseg(struct pnfs_layout_segment *lseg)
-{
- struct nfs4_filelayout_segment *fl = FILELAYOUT_LSEG(lseg);
-
- dprintk("--> %s\n", __func__);
- nfs4_fl_put_deviceid(fl->dsaddr);
- _filelayout_free_lseg(fl);
+ /* This assumes there is only one IOMODE_RW lseg. What
+ * we really want to do is have a layout_hdr level
+ * dictionary of <multipath_list4, fh> keys, each
+ * associated with a struct list_head, populated by calls
+ * to filelayout_write_pagelist().
+ * */
+ if ((!fl->commit_through_mds) && (lgr->range.iomode == IOMODE_RW)) {
+ int i;
+ int size = (fl->stripe_type == STRIPE_SPARSE) ?
+ fl->dsaddr->ds_num : fl->dsaddr->stripe_count;
+
+ fl->commit_buckets = kcalloc(size, sizeof(struct list_head), GFP_KERNEL);
+ if (!fl->commit_buckets) {
+ filelayout_free_lseg(&fl->generic_hdr);
+ return NULL;
+ }
+ fl->number_of_buckets = size;
+ for (i = 0; i < size; i++)
+ INIT_LIST_HEAD(&fl->commit_buckets[i]);
+ }
+ return &fl->generic_hdr;
}
/*
@@ -552,6 +670,191 @@ filelayout_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
return (p_stripe == r_stripe);
}
+static bool filelayout_mark_pnfs_commit(struct pnfs_layout_segment *lseg)
+{
+ return !FILELAYOUT_LSEG(lseg)->commit_through_mds;
+}
+
+static u32 select_bucket_index(struct nfs4_filelayout_segment *fl, u32 j)
+{
+ if (fl->stripe_type == STRIPE_SPARSE)
+ return nfs4_fl_calc_ds_index(&fl->generic_hdr, j);
+ else
+ return j;
+}
+
+struct list_head *filelayout_choose_commit_list(struct nfs_page *req)
+{
+ struct pnfs_layout_segment *lseg = req->wb_commit_lseg;
+ struct nfs4_filelayout_segment *fl = FILELAYOUT_LSEG(lseg);
+ u32 i, j;
+ struct list_head *list;
+
+ /* Note that we are calling nfs4_fl_calc_j_index on each page
+ * that ends up being committed to a data server. An attractive
+ * alternative is to add a field to nfs_write_data and nfs_page
+ * to store the value calculated in filelayout_write_pagelist
+ * and just use that here.
+ */
+ j = nfs4_fl_calc_j_index(lseg,
+ (loff_t)req->wb_index << PAGE_CACHE_SHIFT);
+ i = select_bucket_index(fl, j);
+ list = &fl->commit_buckets[i];
+ if (list_empty(list)) {
+ /* Non-empty buckets hold a reference on the lseg */
+ get_lseg(lseg);
+ }
+ return list;
+}
+
+static u32 calc_ds_index_from_commit(struct pnfs_layout_segment *lseg, u32 i)
+{
+ struct nfs4_filelayout_segment *flseg = FILELAYOUT_LSEG(lseg);
+
+ if (flseg->stripe_type == STRIPE_SPARSE)
+ return i;
+ else
+ return nfs4_fl_calc_ds_index(lseg, i);
+}
+
+static struct nfs_fh *
+select_ds_fh_from_commit(struct pnfs_layout_segment *lseg, u32 i)
+{
+ struct nfs4_filelayout_segment *flseg = FILELAYOUT_LSEG(lseg);
+
+ if (flseg->stripe_type == STRIPE_SPARSE) {
+ if (flseg->num_fh == 1)
+ i = 0;
+ else if (flseg->num_fh == 0)
+ /* Use the MDS OPEN fh set in nfs_read_rpcsetup */
+ return NULL;
+ }
+ return flseg->fh_array[i];
+}
+
+static int filelayout_initiate_commit(struct nfs_write_data *data, int how)
+{
+ struct pnfs_layout_segment *lseg = data->lseg;
+ struct nfs4_pnfs_ds *ds;
+ u32 idx;
+ struct nfs_fh *fh;
+
+ idx = calc_ds_index_from_commit(lseg, data->ds_commit_index);
+ ds = nfs4_fl_prepare_ds(lseg, idx);
+ if (!ds) {
+ printk(KERN_ERR "%s: prepare_ds failed, use MDS\n", __func__);
+ set_bit(lo_fail_bit(IOMODE_RW), &lseg->pls_layout->plh_flags);
+ set_bit(lo_fail_bit(IOMODE_READ), &lseg->pls_layout->plh_flags);
+ prepare_to_resend_writes(data);
+ data->mds_ops->rpc_release(data);
+ return -EAGAIN;
+ }
+ dprintk("%s ino %lu, how %d\n", __func__, data->inode->i_ino, how);
+ data->write_done_cb = filelayout_commit_done_cb;
+ data->ds_clp = ds->ds_clp;
+ fh = select_ds_fh_from_commit(lseg, data->ds_commit_index);
+ if (fh)
+ data->args.fh = fh;
+ return nfs_initiate_commit(data, ds->ds_clp->cl_rpcclient,
+ &filelayout_commit_call_ops, how);
+}
+
+/*
+ * This is only useful while we are using whole file layouts.
+ */
+static struct pnfs_layout_segment *find_only_write_lseg(struct inode *inode)
+{
+ struct pnfs_layout_segment *lseg, *rv = NULL;
+
+ spin_lock(&inode->i_lock);
+ list_for_each_entry(lseg, &NFS_I(inode)->layout->plh_segs, pls_list)
+ if (lseg->pls_range.iomode == IOMODE_RW)
+ rv = get_lseg(lseg);
+ spin_unlock(&inode->i_lock);
+ return rv;
+}
+
+static int alloc_ds_commits(struct inode *inode, struct list_head *list)
+{
+ struct pnfs_layout_segment *lseg;
+ struct nfs4_filelayout_segment *fl;
+ struct nfs_write_data *data;
+ int i, j;
+
+ /* Won't need this when non-whole file layout segments are supported
+ * instead we will use a pnfs_layout_hdr structure */
+ lseg = find_only_write_lseg(inode);
+ if (!lseg)
+ return 0;
+ fl = FILELAYOUT_LSEG(lseg);
+ for (i = 0; i < fl->number_of_buckets; i++) {
+ if (list_empty(&fl->commit_buckets[i]))
+ continue;
+ data = nfs_commitdata_alloc();
+ if (!data)
+ goto out_bad;
+ data->ds_commit_index = i;
+ data->lseg = lseg;
+ list_add(&data->pages, list);
+ }
+ put_lseg(lseg);
+ return 0;
+
+out_bad:
+ for (j = i; j < fl->number_of_buckets; j++) {
+ if (list_empty(&fl->commit_buckets[i]))
+ continue;
+ nfs_retry_commit(&fl->commit_buckets[i], lseg);
+ put_lseg(lseg); /* associated with emptying bucket */
+ }
+ put_lseg(lseg);
+ /* Caller will clean up entries put on list */
+ return -ENOMEM;
+}
+
+/* This follows nfs_commit_list pretty closely */
+static int
+filelayout_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
+ int how)
+{
+ struct nfs_write_data *data, *tmp;
+ LIST_HEAD(list);
+
+ if (!list_empty(mds_pages)) {
+ data = nfs_commitdata_alloc();
+ if (!data)
+ goto out_bad;
+ data->lseg = NULL;
+ list_add(&data->pages, &list);
+ }
+
+ if (alloc_ds_commits(inode, &list))
+ goto out_bad;
+
+ list_for_each_entry_safe(data, tmp, &list, pages) {
+ list_del_init(&data->pages);
+ atomic_inc(&NFS_I(inode)->commits_outstanding);
+ if (!data->lseg) {
+ nfs_init_commit(data, mds_pages, NULL);
+ nfs_initiate_commit(data, NFS_CLIENT(inode),
+ data->mds_ops, how);
+ } else {
+ nfs_init_commit(data, &FILELAYOUT_LSEG(data->lseg)->commit_buckets[data->ds_commit_index], data->lseg);
+ filelayout_initiate_commit(data, how);
+ }
+ }
+ return 0;
+ out_bad:
+ list_for_each_entry_safe(data, tmp, &list, pages) {
+ nfs_retry_commit(&data->pages, data->lseg);
+ list_del_init(&data->pages);
+ nfs_commit_free(data);
+ }
+ nfs_retry_commit(mds_pages, NULL);
+ nfs_commit_clear_lock(NFS_I(inode));
+ return -ENOMEM;
+}
+
static struct pnfs_layoutdriver_type filelayout_type = {
.id = LAYOUT_NFSV4_1_FILES,
.name = "LAYOUT_NFSV4_1_FILES",
@@ -559,6 +862,9 @@ static struct pnfs_layoutdriver_type filelayout_type = {
.alloc_lseg = filelayout_alloc_lseg,
.free_lseg = filelayout_free_lseg,
.pg_test = filelayout_pg_test,
+ .mark_pnfs_commit = filelayout_mark_pnfs_commit,
+ .choose_commit_list = filelayout_choose_commit_list,
+ .commit_pagelist = filelayout_commit_pagelist,
.read_pagelist = filelayout_read_pagelist,
.write_pagelist = filelayout_write_pagelist,
};
diff --git a/fs/nfs/nfs4filelayout.h b/fs/nfs/nfs4filelayout.h
index ee0c907..7c44579 100644
--- a/fs/nfs/nfs4filelayout.h
+++ b/fs/nfs/nfs4filelayout.h
@@ -33,7 +33,7 @@
#include "pnfs.h"
/*
- * Field testing shows we need to support upto 4096 stripe indices.
+ * Field testing shows we need to support up to 4096 stripe indices.
* We store each index as a u8 (u32 on the wire) to keep the memory footprint
* reasonable. This in turn means we support a maximum of 256
* RFC 5661 multipath_list4 structures.
@@ -79,6 +79,8 @@ struct nfs4_filelayout_segment {
struct nfs4_file_layout_dsaddr *dsaddr; /* Point to GETDEVINFO data */
unsigned int num_fh;
struct nfs_fh **fh_array;
+ struct list_head *commit_buckets; /* Sort commits to ds */
+ int number_of_buckets;
};
static inline struct nfs4_filelayout_segment *
diff --git a/fs/nfs/nfs4filelayoutdev.c b/fs/nfs/nfs4filelayoutdev.c
index 68143c1..de5350f 100644
--- a/fs/nfs/nfs4filelayoutdev.c
+++ b/fs/nfs/nfs4filelayoutdev.c
@@ -261,7 +261,7 @@ out:
* Currently only support ipv4, and one multi-path address.
*/
static struct nfs4_pnfs_ds *
-decode_and_add_ds(__be32 **pp, struct inode *inode)
+decode_and_add_ds(struct xdr_stream *streamp, struct inode *inode)
{
struct nfs4_pnfs_ds *ds = NULL;
char *buf;
@@ -269,25 +269,34 @@ decode_and_add_ds(__be32 **pp, struct inode *inode)
u32 ip_addr, port;
int nlen, rlen, i;
int tmp[2];
- __be32 *r_netid, *r_addr, *p = *pp;
+ __be32 *p;
/* r_netid */
+ p = xdr_inline_decode(streamp, 4);
+ if (unlikely(!p))
+ goto out_err;
nlen = be32_to_cpup(p++);
- r_netid = p;
- p += XDR_QUADLEN(nlen);
- /* r_addr */
- rlen = be32_to_cpup(p++);
- r_addr = p;
- p += XDR_QUADLEN(rlen);
- *pp = p;
+ p = xdr_inline_decode(streamp, nlen);
+ if (unlikely(!p))
+ goto out_err;
/* Check that netid is "tcp" */
- if (nlen != 3 || memcmp((char *)r_netid, "tcp", 3)) {
+ if (nlen != 3 || memcmp((char *)p, "tcp", 3)) {
dprintk("%s: ERROR: non ipv4 TCP r_netid\n", __func__);
goto out_err;
}
+ /* r_addr */
+ p = xdr_inline_decode(streamp, 4);
+ if (unlikely(!p))
+ goto out_err;
+ rlen = be32_to_cpup(p);
+
+ p = xdr_inline_decode(streamp, rlen);
+ if (unlikely(!p))
+ goto out_err;
+
/* ipv6 length plus port is legal */
if (rlen > INET6_ADDRSTRLEN + 8) {
dprintk("%s: Invalid address, length %d\n", __func__,
@@ -300,7 +309,7 @@ decode_and_add_ds(__be32 **pp, struct inode *inode)
goto out_err;
}
buf[rlen] = '\0';
- memcpy(buf, r_addr, rlen);
+ memcpy(buf, p, rlen);
/* replace the port dots with dashes for the in4_pton() delimiter*/
for (i = 0; i < 2; i++) {
@@ -336,90 +345,154 @@ out_err:
static struct nfs4_file_layout_dsaddr*
decode_device(struct inode *ino, struct pnfs_device *pdev)
{
- int i, dummy;
+ int i;
u32 cnt, num;
u8 *indexp;
- __be32 *p = (__be32 *)pdev->area, *indicesp;
- struct nfs4_file_layout_dsaddr *dsaddr;
+ __be32 *p;
+ u8 *stripe_indices;
+ u8 max_stripe_index;
+ struct nfs4_file_layout_dsaddr *dsaddr = NULL;
+ struct xdr_stream stream;
+ struct xdr_buf buf = {
+ .pages = pdev->pages,
+ .page_len = pdev->pglen,
+ .buflen = pdev->pglen,
+ .len = pdev->pglen,
+ };
+ struct page *scratch;
+
+ /* set up xdr stream */
+ scratch = alloc_page(GFP_KERNEL);
+ if (!scratch)
+ goto out_err;
+
+ xdr_init_decode(&stream, &buf, NULL);
+ xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE);
/* Get the stripe count (number of stripe index) */
- cnt = be32_to_cpup(p++);
+ p = xdr_inline_decode(&stream, 4);
+ if (unlikely(!p))
+ goto out_err_free_scratch;
+
+ cnt = be32_to_cpup(p);
dprintk("%s stripe count %d\n", __func__, cnt);
if (cnt > NFS4_PNFS_MAX_STRIPE_CNT) {
printk(KERN_WARNING "%s: stripe count %d greater than "
"supported maximum %d\n", __func__,
cnt, NFS4_PNFS_MAX_STRIPE_CNT);
- goto out_err;
+ goto out_err_free_scratch;
+ }
+
+ /* read stripe indices */
+ stripe_indices = kcalloc(cnt, sizeof(u8), GFP_KERNEL);
+ if (!stripe_indices)
+ goto out_err_free_scratch;
+
+ p = xdr_inline_decode(&stream, cnt << 2);
+ if (unlikely(!p))
+ goto out_err_free_stripe_indices;
+
+ indexp = &stripe_indices[0];
+ max_stripe_index = 0;
+ for (i = 0; i < cnt; i++) {
+ *indexp = be32_to_cpup(p++);
+ max_stripe_index = max(max_stripe_index, *indexp);
+ indexp++;
}
/* Check the multipath list count */
- indicesp = p;
- p += XDR_QUADLEN(cnt << 2);
- num = be32_to_cpup(p++);
+ p = xdr_inline_decode(&stream, 4);
+ if (unlikely(!p))
+ goto out_err_free_stripe_indices;
+
+ num = be32_to_cpup(p);
dprintk("%s ds_num %u\n", __func__, num);
if (num > NFS4_PNFS_MAX_MULTI_CNT) {
printk(KERN_WARNING "%s: multipath count %d greater than "
"supported maximum %d\n", __func__,
num, NFS4_PNFS_MAX_MULTI_CNT);
- goto out_err;
+ goto out_err_free_stripe_indices;
}
+
+ /* validate stripe indices are all < num */
+ if (max_stripe_index >= num) {
+ printk(KERN_WARNING "%s: stripe index %u >= num ds %u\n",
+ __func__, max_stripe_index, num);
+ goto out_err_free_stripe_indices;
+ }
+
dsaddr = kzalloc(sizeof(*dsaddr) +
(sizeof(struct nfs4_pnfs_ds *) * (num - 1)),
GFP_KERNEL);
if (!dsaddr)
- goto out_err;
-
- dsaddr->stripe_indices = kzalloc(sizeof(u8) * cnt, GFP_KERNEL);
- if (!dsaddr->stripe_indices)
- goto out_err_free;
+ goto out_err_free_stripe_indices;
dsaddr->stripe_count = cnt;
+ dsaddr->stripe_indices = stripe_indices;
+ stripe_indices = NULL;
dsaddr->ds_num = num;
memcpy(&dsaddr->deviceid, &pdev->dev_id, sizeof(pdev->dev_id));
- /* Go back an read stripe indices */
- p = indicesp;
- indexp = &dsaddr->stripe_indices[0];
- for (i = 0; i < dsaddr->stripe_count; i++) {
- *indexp = be32_to_cpup(p++);
- if (*indexp >= num)
- goto out_err_free;
- indexp++;
- }
- /* Skip already read multipath list count */
- p++;
-
for (i = 0; i < dsaddr->ds_num; i++) {
int j;
+ u32 mp_count;
+
+ p = xdr_inline_decode(&stream, 4);
+ if (unlikely(!p))
+ goto out_err_free_deviceid;
- dummy = be32_to_cpup(p++); /* multipath count */
- if (dummy > 1) {
+ mp_count = be32_to_cpup(p); /* multipath count */
+ if (mp_count > 1) {
printk(KERN_WARNING
"%s: Multipath count %d not supported, "
"skipping all greater than 1\n", __func__,
- dummy);
+ mp_count);
}
- for (j = 0; j < dummy; j++) {
+ for (j = 0; j < mp_count; j++) {
if (j == 0) {
- dsaddr->ds_list[i] = decode_and_add_ds(&p, ino);
+ dsaddr->ds_list[i] = decode_and_add_ds(&stream,
+ ino);
if (dsaddr->ds_list[i] == NULL)
- goto out_err_free;
+ goto out_err_free_deviceid;
} else {
u32 len;
/* skip extra multipath */
- len = be32_to_cpup(p++);
- p += XDR_QUADLEN(len);
- len = be32_to_cpup(p++);
- p += XDR_QUADLEN(len);
- continue;
+
+ /* read len, skip */
+ p = xdr_inline_decode(&stream, 4);
+ if (unlikely(!p))
+ goto out_err_free_deviceid;
+ len = be32_to_cpup(p);
+
+ p = xdr_inline_decode(&stream, len);
+ if (unlikely(!p))
+ goto out_err_free_deviceid;
+
+ /* read len, skip */
+ p = xdr_inline_decode(&stream, 4);
+ if (unlikely(!p))
+ goto out_err_free_deviceid;
+ len = be32_to_cpup(p);
+
+ p = xdr_inline_decode(&stream, len);
+ if (unlikely(!p))
+ goto out_err_free_deviceid;
}
}
}
+
+ __free_page(scratch);
return dsaddr;
-out_err_free:
+out_err_free_deviceid:
nfs4_fl_free_deviceid(dsaddr);
+ /* stripe_indicies was part of dsaddr */
+ goto out_err_free_scratch;
+out_err_free_stripe_indices:
+ kfree(stripe_indices);
+out_err_free_scratch:
+ __free_page(scratch);
out_err:
dprintk("%s ERROR: returning NULL\n", __func__);
return NULL;
@@ -498,11 +571,6 @@ get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id)
goto out_free;
}
- /* set pdev->area */
- pdev->area = vmap(pages, max_pages, VM_MAP, PAGE_KERNEL);
- if (!pdev->area)
- goto out_free;
-
memcpy(&pdev->dev_id, dev_id, sizeof(*dev_id));
pdev->layout_type = LAYOUT_NFSV4_1_FILES;
pdev->pages = pages;
@@ -521,8 +589,6 @@ get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id)
*/
dsaddr = decode_and_add_device(inode, pdev);
out_free:
- if (pdev->area != NULL)
- vunmap(pdev->area);
for (i = 0; i < max_pages; i++)
__free_page(pages[i]);
kfree(pages);
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 1d84e70..9bf41ea 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -41,6 +41,7 @@
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/gss_api.h>
#include <linux/nfs.h>
#include <linux/nfs4.h>
#include <linux/nfs_fs.h>
@@ -71,7 +72,9 @@ static int _nfs4_proc_open(struct nfs4_opendata *data);
static int _nfs4_recover_proc_open(struct nfs4_opendata *data);
static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *);
static int nfs4_async_handle_error(struct rpc_task *, const struct nfs_server *, struct nfs4_state *);
-static int _nfs4_proc_lookup(struct inode *dir, const struct qstr *name, struct nfs_fh *fhandle, struct nfs_fattr *fattr);
+static int _nfs4_proc_lookup(struct rpc_clnt *client, struct inode *dir,
+ const struct qstr *name, struct nfs_fh *fhandle,
+ struct nfs_fattr *fattr);
static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr);
static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
struct nfs_fattr *fattr, struct iattr *sattr,
@@ -85,6 +88,8 @@ static int nfs4_map_errors(int err)
switch (err) {
case -NFS4ERR_RESOURCE:
return -EREMOTEIO;
+ case -NFS4ERR_WRONGSEC:
+ return -EPERM;
case -NFS4ERR_BADOWNER:
case -NFS4ERR_BADNAME:
return -EINVAL;
@@ -657,7 +662,8 @@ struct rpc_call_ops nfs41_call_priv_sync_ops = {
.rpc_call_done = nfs41_call_sync_done,
};
-static int nfs4_call_sync_sequence(struct nfs_server *server,
+static int nfs4_call_sync_sequence(struct rpc_clnt *clnt,
+ struct nfs_server *server,
struct rpc_message *msg,
struct nfs4_sequence_args *args,
struct nfs4_sequence_res *res,
@@ -673,7 +679,7 @@ static int nfs4_call_sync_sequence(struct nfs_server *server,
.cache_reply = cache_reply,
};
struct rpc_task_setup task_setup = {
- .rpc_client = server->client,
+ .rpc_client = clnt,
.rpc_message = msg,
.callback_ops = &nfs41_call_sync_ops,
.callback_data = &data
@@ -692,13 +698,14 @@ static int nfs4_call_sync_sequence(struct nfs_server *server,
return ret;
}
-int _nfs4_call_sync_session(struct nfs_server *server,
+int _nfs4_call_sync_session(struct rpc_clnt *clnt,
+ struct nfs_server *server,
struct rpc_message *msg,
struct nfs4_sequence_args *args,
struct nfs4_sequence_res *res,
int cache_reply)
{
- return nfs4_call_sync_sequence(server, msg, args, res, cache_reply, 0);
+ return nfs4_call_sync_sequence(clnt, server, msg, args, res, cache_reply, 0);
}
#else
@@ -709,19 +716,28 @@ static int nfs4_sequence_done(struct rpc_task *task,
}
#endif /* CONFIG_NFS_V4_1 */
-int _nfs4_call_sync(struct nfs_server *server,
+int _nfs4_call_sync(struct rpc_clnt *clnt,
+ struct nfs_server *server,
struct rpc_message *msg,
struct nfs4_sequence_args *args,
struct nfs4_sequence_res *res,
int cache_reply)
{
args->sa_session = res->sr_session = NULL;
- return rpc_call_sync(server->client, msg, 0);
+ return rpc_call_sync(clnt, msg, 0);
}
-#define nfs4_call_sync(server, msg, args, res, cache_reply) \
- (server)->nfs_client->cl_mvops->call_sync((server), (msg), &(args)->seq_args, \
- &(res)->seq_res, (cache_reply))
+static inline
+int nfs4_call_sync(struct rpc_clnt *clnt,
+ struct nfs_server *server,
+ struct rpc_message *msg,
+ struct nfs4_sequence_args *args,
+ struct nfs4_sequence_res *res,
+ int cache_reply)
+{
+ return server->nfs_client->cl_mvops->call_sync(clnt, server, msg,
+ args, res, cache_reply);
+}
static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo)
{
@@ -1831,7 +1847,7 @@ static int _nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
} else
memcpy(&arg.stateid, &zero_stateid, sizeof(arg.stateid));
- status = nfs4_call_sync(server, &msg, &arg, &res, 1);
+ status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
if (status == 0 && state != NULL)
renew_lease(server, timestamp);
return status;
@@ -2090,7 +2106,7 @@ static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *f
};
int status;
- status = nfs4_call_sync(server, &msg, &args, &res, 0);
+ status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
if (status == 0) {
memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask));
server->caps &= ~(NFS_CAP_ACLS|NFS_CAP_HARDLINKS|
@@ -2160,7 +2176,7 @@ static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
};
nfs_fattr_init(info->fattr);
- return nfs4_call_sync(server, &msg, &args, &res, 0);
+ return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
}
static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
@@ -2176,15 +2192,41 @@ static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
return err;
}
+static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
+ struct nfs_fsinfo *info, rpc_authflavor_t flavor)
+{
+ struct rpc_auth *auth;
+ int ret;
+
+ auth = rpcauth_create(flavor, server->client);
+ if (!auth) {
+ ret = -EIO;
+ goto out;
+ }
+ ret = nfs4_lookup_root(server, fhandle, info);
+out:
+ return ret;
+}
+
/*
* get the file handle for the "/" directory on the server
*/
static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *fhandle,
struct nfs_fsinfo *info)
{
- int status;
+ int i, len, status = 0;
+ rpc_authflavor_t flav_array[NFS_MAX_SECFLAVORS + 2];
- status = nfs4_lookup_root(server, fhandle, info);
+ flav_array[0] = RPC_AUTH_UNIX;
+ len = gss_mech_list_pseudoflavors(&flav_array[1]);
+ flav_array[1+len] = RPC_AUTH_NULL;
+ len += 2;
+
+ for (i = 0; i < len; i++) {
+ status = nfs4_lookup_root_sec(server, fhandle, info, flav_array[i]);
+ if (status != -EPERM)
+ break;
+ }
if (status == 0)
status = nfs4_server_capabilities(server, fhandle);
if (status == 0)
@@ -2249,7 +2291,7 @@ static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
};
nfs_fattr_init(fattr);
- return nfs4_call_sync(server, &msg, &args, &res, 0);
+ return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
}
static int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr)
@@ -2309,9 +2351,9 @@ nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
return status;
}
-static int _nfs4_proc_lookupfh(struct nfs_server *server, const struct nfs_fh *dirfh,
- const struct qstr *name, struct nfs_fh *fhandle,
- struct nfs_fattr *fattr)
+static int _nfs4_proc_lookupfh(struct rpc_clnt *clnt, struct nfs_server *server,
+ const struct nfs_fh *dirfh, const struct qstr *name,
+ struct nfs_fh *fhandle, struct nfs_fattr *fattr)
{
int status;
struct nfs4_lookup_arg args = {
@@ -2333,7 +2375,7 @@ static int _nfs4_proc_lookupfh(struct nfs_server *server, const struct nfs_fh *d
nfs_fattr_init(fattr);
dprintk("NFS call lookupfh %s\n", name->name);
- status = nfs4_call_sync(server, &msg, &args, &res, 0);
+ status = nfs4_call_sync(clnt, server, &msg, &args.seq_args, &res.seq_res, 0);
dprintk("NFS reply lookupfh: %d\n", status);
return status;
}
@@ -2345,7 +2387,7 @@ static int nfs4_proc_lookupfh(struct nfs_server *server, struct nfs_fh *dirfh,
struct nfs4_exception exception = { };
int err;
do {
- err = _nfs4_proc_lookupfh(server, dirfh, name, fhandle, fattr);
+ err = _nfs4_proc_lookupfh(server->client, server, dirfh, name, fhandle, fattr);
/* FIXME: !!!! */
if (err == -NFS4ERR_MOVED) {
err = -EREMOTE;
@@ -2356,27 +2398,41 @@ static int nfs4_proc_lookupfh(struct nfs_server *server, struct nfs_fh *dirfh,
return err;
}
-static int _nfs4_proc_lookup(struct inode *dir, const struct qstr *name,
- struct nfs_fh *fhandle, struct nfs_fattr *fattr)
+static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir,
+ const struct qstr *name, struct nfs_fh *fhandle,
+ struct nfs_fattr *fattr)
{
int status;
dprintk("NFS call lookup %s\n", name->name);
- status = _nfs4_proc_lookupfh(NFS_SERVER(dir), NFS_FH(dir), name, fhandle, fattr);
+ status = _nfs4_proc_lookupfh(clnt, NFS_SERVER(dir), NFS_FH(dir), name, fhandle, fattr);
if (status == -NFS4ERR_MOVED)
status = nfs4_get_referral(dir, name, fattr, fhandle);
dprintk("NFS reply lookup: %d\n", status);
return status;
}
-static int nfs4_proc_lookup(struct inode *dir, struct qstr *name, struct nfs_fh *fhandle, struct nfs_fattr *fattr)
+void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr, struct nfs_fh *fh)
+{
+ memset(fh, 0, sizeof(struct nfs_fh));
+ fattr->fsid.major = 1;
+ fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
+ NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_FSID | NFS_ATTR_FATTR_MOUNTPOINT;
+ fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
+ fattr->nlink = 2;
+}
+
+static int nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, struct qstr *name,
+ struct nfs_fh *fhandle, struct nfs_fattr *fattr)
{
struct nfs4_exception exception = { };
int err;
do {
err = nfs4_handle_exception(NFS_SERVER(dir),
- _nfs4_proc_lookup(dir, name, fhandle, fattr),
+ _nfs4_proc_lookup(clnt, dir, name, fhandle, fattr),
&exception);
+ if (err == -EPERM)
+ nfs_fixup_secinfo_attributes(fattr, fhandle);
} while (exception.retry);
return err;
}
@@ -2421,7 +2477,7 @@ static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry
if (res.fattr == NULL)
return -ENOMEM;
- status = nfs4_call_sync(server, &msg, &args, &res, 0);
+ status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
if (!status) {
entry->mask = 0;
if (res.access & NFS4_ACCESS_READ)
@@ -2488,7 +2544,7 @@ static int _nfs4_proc_readlink(struct inode *inode, struct page *page,
.rpc_resp = &res,
};
- return nfs4_call_sync(NFS_SERVER(inode), &msg, &args, &res, 0);
+ return nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0);
}
static int nfs4_proc_readlink(struct inode *inode, struct page *page,
@@ -2577,7 +2633,7 @@ static int _nfs4_proc_remove(struct inode *dir, struct qstr *name)
if (res.dir_attr == NULL)
goto out;
- status = nfs4_call_sync(server, &msg, &args, &res, 1);
+ status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1);
if (status == 0) {
update_changeattr(dir, &res.cinfo);
nfs_post_op_update_inode(dir, res.dir_attr);
@@ -2678,7 +2734,7 @@ static int _nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name,
if (res.old_fattr == NULL || res.new_fattr == NULL)
goto out;
- status = nfs4_call_sync(server, &msg, &arg, &res, 1);
+ status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
if (!status) {
update_changeattr(old_dir, &res.old_cinfo);
nfs_post_op_update_inode(old_dir, res.old_fattr);
@@ -2729,7 +2785,7 @@ static int _nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *
if (res.fattr == NULL || res.dir_attr == NULL)
goto out;
- status = nfs4_call_sync(server, &msg, &arg, &res, 1);
+ status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
if (!status) {
update_changeattr(dir, &res.cinfo);
nfs_post_op_update_inode(dir, res.dir_attr);
@@ -2792,8 +2848,8 @@ static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir,
static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_createdata *data)
{
- int status = nfs4_call_sync(NFS_SERVER(dir), &data->msg,
- &data->arg, &data->res, 1);
+ int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg,
+ &data->arg.seq_args, &data->res.seq_res, 1);
if (status == 0) {
update_changeattr(dir, &data->res.dir_cinfo);
nfs_post_op_update_inode(dir, data->res.dir_fattr);
@@ -2905,7 +2961,7 @@ static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
(unsigned long long)cookie);
nfs4_setup_readdir(cookie, NFS_COOKIEVERF(dir), dentry, &args);
res.pgbase = args.pgbase;
- status = nfs4_call_sync(NFS_SERVER(dir), &msg, &args, &res, 0);
+ status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0);
if (status >= 0) {
memcpy(NFS_COOKIEVERF(dir), res.verifier.data, NFS4_VERIFIER_SIZE);
status += args.pgbase;
@@ -2997,7 +3053,7 @@ static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle,
};
nfs_fattr_init(fsstat->fattr);
- return nfs4_call_sync(server, &msg, &args, &res, 0);
+ return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
}
static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat)
@@ -3028,7 +3084,7 @@ static int _nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle,
.rpc_resp = &res,
};
- return nfs4_call_sync(server, &msg, &args, &res, 0);
+ return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
}
static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
@@ -3073,7 +3129,7 @@ static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle
}
nfs_fattr_init(pathconf->fattr);
- return nfs4_call_sync(server, &msg, &args, &res, 0);
+ return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
}
static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
@@ -3195,12 +3251,9 @@ static void nfs4_proc_write_setup(struct nfs_write_data *data, struct rpc_messag
msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE];
}
-static int nfs4_commit_done(struct rpc_task *task, struct nfs_write_data *data)
+static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_write_data *data)
{
struct inode *inode = data->inode;
-
- if (!nfs4_sequence_done(task, &data->res.seq_res))
- return -EAGAIN;
if (nfs4_async_handle_error(task, NFS_SERVER(inode), NULL) == -EAGAIN) {
nfs_restart_rpc(task, NFS_SERVER(inode)->nfs_client);
@@ -3210,11 +3263,24 @@ static int nfs4_commit_done(struct rpc_task *task, struct nfs_write_data *data)
return 0;
}
+static int nfs4_commit_done(struct rpc_task *task, struct nfs_write_data *data)
+{
+ if (!nfs4_sequence_done(task, &data->res.seq_res))
+ return -EAGAIN;
+ return data->write_done_cb(task, data);
+}
+
static void nfs4_proc_commit_setup(struct nfs_write_data *data, struct rpc_message *msg)
{
struct nfs_server *server = NFS_SERVER(data->inode);
-
- data->args.bitmask = server->cache_consistency_bitmask;
+
+ if (data->lseg) {
+ data->args.bitmask = NULL;
+ data->res.fattr = NULL;
+ } else
+ data->args.bitmask = server->cache_consistency_bitmask;
+ if (!data->write_done_cb)
+ data->write_done_cb = nfs4_commit_done_cb;
data->res.server = server;
msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT];
}
@@ -3452,7 +3518,7 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu
resp_buf = buf;
buf_to_pages(buf, buflen, args.acl_pages, &args.acl_pgbase);
}
- ret = nfs4_call_sync(NFS_SERVER(inode), &msg, &args, &res, 0);
+ ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0);
if (ret)
goto out_free;
if (res.acl_len > args.acl_len)
@@ -3527,7 +3593,7 @@ static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t bufl
if (i < 0)
return i;
nfs_inode_return_delegation(inode);
- ret = nfs4_call_sync(server, &msg, &arg, &res, 1);
+ ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
/*
* Free each page after tx, so the only ref left is
@@ -3890,7 +3956,7 @@ static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock
lsp = request->fl_u.nfs4_fl.owner;
arg.lock_owner.id = lsp->ls_id.id;
arg.lock_owner.s_dev = server->s_dev;
- status = nfs4_call_sync(server, &msg, &arg, &res, 1);
+ status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
switch (status) {
case 0:
request->fl_type = F_UNLCK;
@@ -4618,12 +4684,46 @@ int nfs4_proc_fs_locations(struct inode *dir, const struct qstr *name,
nfs_fattr_init(&fs_locations->fattr);
fs_locations->server = server;
fs_locations->nlocations = 0;
- status = nfs4_call_sync(server, &msg, &args, &res, 0);
+ status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
nfs_fixup_referral_attributes(&fs_locations->fattr);
dprintk("%s: returned status = %d\n", __func__, status);
return status;
}
+static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors)
+{
+ int status;
+ struct nfs4_secinfo_arg args = {
+ .dir_fh = NFS_FH(dir),
+ .name = name,
+ };
+ struct nfs4_secinfo_res res = {
+ .flavors = flavors,
+ };
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO],
+ .rpc_argp = &args,
+ .rpc_resp = &res,
+ };
+
+ dprintk("NFS call secinfo %s\n", name->name);
+ status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0);
+ dprintk("NFS reply secinfo: %d\n", status);
+ return status;
+}
+
+int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors)
+{
+ struct nfs4_exception exception = { };
+ int err;
+ do {
+ err = nfs4_handle_exception(NFS_SERVER(dir),
+ _nfs4_proc_secinfo(dir, name, flavors),
+ &exception);
+ } while (exception.retry);
+ return err;
+}
+
#ifdef CONFIG_NFS_V4_1
/*
* Check the exchange flags returned by the server for invalid flags, having
@@ -5516,8 +5616,6 @@ static void nfs4_layoutget_release(void *calldata)
struct nfs4_layoutget *lgp = calldata;
dprintk("--> %s\n", __func__);
- if (lgp->res.layout.buf != NULL)
- free_page((unsigned long) lgp->res.layout.buf);
put_nfs_open_context(lgp->args.ctx);
kfree(calldata);
dprintk("<-- %s\n", __func__);
@@ -5549,12 +5647,7 @@ int nfs4_proc_layoutget(struct nfs4_layoutget *lgp)
dprintk("--> %s\n", __func__);
- lgp->res.layout.buf = (void *)__get_free_page(GFP_NOFS);
- if (lgp->res.layout.buf == NULL) {
- nfs4_layoutget_release(lgp);
- return -ENOMEM;
- }
-
+ lgp->res.layoutp = &lgp->args.layout;
lgp->res.seq_res.sr_slot = NULL;
task = rpc_run_task(&task_setup_data);
if (IS_ERR(task))
@@ -5586,7 +5679,7 @@ _nfs4_proc_getdeviceinfo(struct nfs_server *server, struct pnfs_device *pdev)
int status;
dprintk("--> %s\n", __func__);
- status = nfs4_call_sync(server, &msg, &args, &res, 0);
+ status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
dprintk("<-- %s status=%d\n", __func__, status);
return status;
@@ -5606,6 +5699,100 @@ int nfs4_proc_getdeviceinfo(struct nfs_server *server, struct pnfs_device *pdev)
}
EXPORT_SYMBOL_GPL(nfs4_proc_getdeviceinfo);
+static void nfs4_layoutcommit_prepare(struct rpc_task *task, void *calldata)
+{
+ struct nfs4_layoutcommit_data *data = calldata;
+ struct nfs_server *server = NFS_SERVER(data->args.inode);
+
+ if (nfs4_setup_sequence(server, &data->args.seq_args,
+ &data->res.seq_res, 1, task))
+ return;
+ rpc_call_start(task);
+}
+
+static void
+nfs4_layoutcommit_done(struct rpc_task *task, void *calldata)
+{
+ struct nfs4_layoutcommit_data *data = calldata;
+ struct nfs_server *server = NFS_SERVER(data->args.inode);
+
+ if (!nfs4_sequence_done(task, &data->res.seq_res))
+ return;
+
+ switch (task->tk_status) { /* Just ignore these failures */
+ case NFS4ERR_DELEG_REVOKED: /* layout was recalled */
+ case NFS4ERR_BADIOMODE: /* no IOMODE_RW layout for range */
+ case NFS4ERR_BADLAYOUT: /* no layout */
+ case NFS4ERR_GRACE: /* loca_recalim always false */
+ task->tk_status = 0;
+ }
+
+ if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) {
+ nfs_restart_rpc(task, server->nfs_client);
+ return;
+ }
+
+ if (task->tk_status == 0)
+ nfs_post_op_update_inode_force_wcc(data->args.inode,
+ data->res.fattr);
+}
+
+static void nfs4_layoutcommit_release(void *calldata)
+{
+ struct nfs4_layoutcommit_data *data = calldata;
+
+ /* Matched by references in pnfs_set_layoutcommit */
+ put_lseg(data->lseg);
+ put_rpccred(data->cred);
+ kfree(data);
+}
+
+static const struct rpc_call_ops nfs4_layoutcommit_ops = {
+ .rpc_call_prepare = nfs4_layoutcommit_prepare,
+ .rpc_call_done = nfs4_layoutcommit_done,
+ .rpc_release = nfs4_layoutcommit_release,
+};
+
+int
+nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync)
+{
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTCOMMIT],
+ .rpc_argp = &data->args,
+ .rpc_resp = &data->res,
+ .rpc_cred = data->cred,
+ };
+ struct rpc_task_setup task_setup_data = {
+ .task = &data->task,
+ .rpc_client = NFS_CLIENT(data->args.inode),
+ .rpc_message = &msg,
+ .callback_ops = &nfs4_layoutcommit_ops,
+ .callback_data = data,
+ .flags = RPC_TASK_ASYNC,
+ };
+ struct rpc_task *task;
+ int status = 0;
+
+ dprintk("NFS: %4d initiating layoutcommit call. sync %d "
+ "lbw: %llu inode %lu\n",
+ data->task.tk_pid, sync,
+ data->args.lastbytewritten,
+ data->args.inode->i_ino);
+
+ task = rpc_run_task(&task_setup_data);
+ if (IS_ERR(task))
+ return PTR_ERR(task);
+ if (sync == false)
+ goto out;
+ status = nfs4_wait_for_completion_rpc_task(task);
+ if (status != 0)
+ goto out;
+ status = task->tk_status;
+out:
+ dprintk("%s: status %d\n", __func__, status);
+ rpc_put_task(task);
+ return status;
+}
#endif /* CONFIG_NFS_V4_1 */
struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = {
@@ -5741,6 +5928,7 @@ const struct nfs_rpc_ops nfs_v4_clientops = {
.close_context = nfs4_close_context,
.open_context = nfs4_atomic_open,
.init_client = nfs4_init_client,
+ .secinfo = nfs4_proc_secinfo,
};
static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = {
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index ab1bf5b..a6804f7 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -590,7 +590,8 @@ nfs4_get_open_state(struct inode *inode, struct nfs4_state_owner *owner)
state->owner = owner;
atomic_inc(&owner->so_count);
list_add(&state->inode_states, &nfsi->open_states);
- state->inode = igrab(inode);
+ ihold(inode);
+ state->inode = inode;
spin_unlock(&inode->i_lock);
/* Note: The reclaim code dictates that we add stateless
* and read-only stateids to the end of the list */
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 0cf560f..dddfb57 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -46,6 +46,7 @@
#include <linux/kdev_t.h>
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/msg_prot.h>
+#include <linux/sunrpc/gss_api.h>
#include <linux/nfs.h>
#include <linux/nfs4.h>
#include <linux/nfs_fs.h>
@@ -112,7 +113,7 @@ static int nfs4_stat_to_errno(int);
#define encode_restorefh_maxsz (op_encode_hdr_maxsz)
#define decode_restorefh_maxsz (op_decode_hdr_maxsz)
#define encode_fsinfo_maxsz (encode_getattr_maxsz)
-#define decode_fsinfo_maxsz (op_decode_hdr_maxsz + 11)
+#define decode_fsinfo_maxsz (op_decode_hdr_maxsz + 15)
#define encode_renew_maxsz (op_encode_hdr_maxsz + 3)
#define decode_renew_maxsz (op_decode_hdr_maxsz)
#define encode_setclientid_maxsz \
@@ -253,6 +254,8 @@ static int nfs4_stat_to_errno(int);
(encode_getattr_maxsz)
#define decode_fs_locations_maxsz \
(0)
+#define encode_secinfo_maxsz (op_encode_hdr_maxsz + nfs4_name_maxsz)
+#define decode_secinfo_maxsz (op_decode_hdr_maxsz + 4 + (NFS_MAX_SECFLAVORS * (16 + GSS_OID_MAX_LEN)))
#if defined(CONFIG_NFS_V4_1)
#define NFS4_MAX_MACHINE_NAME_LEN (64)
@@ -324,6 +327,18 @@ static int nfs4_stat_to_errno(int);
#define decode_layoutget_maxsz (op_decode_hdr_maxsz + 8 + \
decode_stateid_maxsz + \
XDR_QUADLEN(PNFS_LAYOUT_MAXSIZE))
+#define encode_layoutcommit_maxsz (op_encode_hdr_maxsz + \
+ 2 /* offset */ + \
+ 2 /* length */ + \
+ 1 /* reclaim */ + \
+ encode_stateid_maxsz + \
+ 1 /* new offset (true) */ + \
+ 2 /* last byte written */ + \
+ 1 /* nt_timechanged (false) */ + \
+ 1 /* layoutupdate4 layout type */ + \
+ 1 /* NULL filelayout layoutupdate4 payload */)
+#define decode_layoutcommit_maxsz (op_decode_hdr_maxsz + 3)
+
#else /* CONFIG_NFS_V4_1 */
#define encode_sequence_maxsz 0
#define decode_sequence_maxsz 0
@@ -676,6 +691,14 @@ static int nfs4_stat_to_errno(int);
decode_putfh_maxsz + \
decode_lookup_maxsz + \
decode_fs_locations_maxsz)
+#define NFS4_enc_secinfo_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
+ encode_putfh_maxsz + \
+ encode_secinfo_maxsz)
+#define NFS4_dec_secinfo_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
+ decode_putfh_maxsz + \
+ decode_secinfo_maxsz)
#if defined(CONFIG_NFS_V4_1)
#define NFS4_enc_exchange_id_sz \
(compound_encode_hdr_maxsz + \
@@ -727,6 +750,17 @@ static int nfs4_stat_to_errno(int);
decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_layoutget_maxsz)
+#define NFS4_enc_layoutcommit_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz +\
+ encode_putfh_maxsz + \
+ encode_layoutcommit_maxsz + \
+ encode_getattr_maxsz)
+#define NFS4_dec_layoutcommit_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
+ decode_putfh_maxsz + \
+ decode_layoutcommit_maxsz + \
+ decode_getattr_maxsz)
+
const u32 nfs41_maxwrite_overhead = ((RPC_MAX_HEADER_WITH_AUTH +
compound_encode_hdr_maxsz +
@@ -1620,6 +1654,18 @@ static void encode_delegreturn(struct xdr_stream *xdr, const nfs4_stateid *state
hdr->replen += decode_delegreturn_maxsz;
}
+static void encode_secinfo(struct xdr_stream *xdr, const struct qstr *name, struct compound_hdr *hdr)
+{
+ int len = name->len;
+ __be32 *p;
+
+ p = reserve_space(xdr, 8 + len);
+ *p++ = cpu_to_be32(OP_SECINFO);
+ xdr_encode_opaque(p, name->name, len);
+ hdr->nops++;
+ hdr->replen += decode_secinfo_maxsz;
+}
+
#if defined(CONFIG_NFS_V4_1)
/* NFSv4.1 operations */
static void encode_exchange_id(struct xdr_stream *xdr,
@@ -1816,6 +1862,34 @@ encode_layoutget(struct xdr_stream *xdr,
hdr->nops++;
hdr->replen += decode_layoutget_maxsz;
}
+
+static int
+encode_layoutcommit(struct xdr_stream *xdr,
+ const struct nfs4_layoutcommit_args *args,
+ struct compound_hdr *hdr)
+{
+ __be32 *p;
+
+ dprintk("%s: lbw: %llu type: %d\n", __func__, args->lastbytewritten,
+ NFS_SERVER(args->inode)->pnfs_curr_ld->id);
+
+ p = reserve_space(xdr, 48 + NFS4_STATEID_SIZE);
+ *p++ = cpu_to_be32(OP_LAYOUTCOMMIT);
+ /* Only whole file layouts */
+ p = xdr_encode_hyper(p, 0); /* offset */
+ p = xdr_encode_hyper(p, NFS4_MAX_UINT64); /* length */
+ *p++ = cpu_to_be32(0); /* reclaim */
+ p = xdr_encode_opaque_fixed(p, args->stateid.data, NFS4_STATEID_SIZE);
+ *p++ = cpu_to_be32(1); /* newoffset = TRUE */
+ p = xdr_encode_hyper(p, args->lastbytewritten);
+ *p++ = cpu_to_be32(0); /* Never send time_modify_changed */
+ *p++ = cpu_to_be32(NFS_SERVER(args->inode)->pnfs_curr_ld->id);/* type */
+ *p++ = cpu_to_be32(0); /* no file layout payload */
+
+ hdr->nops++;
+ hdr->replen += decode_layoutcommit_maxsz;
+ return 0;
+}
#endif /* CONFIG_NFS_V4_1 */
/*
@@ -2294,7 +2368,8 @@ static void nfs4_xdr_enc_commit(struct rpc_rqst *req, struct xdr_stream *xdr,
encode_sequence(xdr, &args->seq_args, &hdr);
encode_putfh(xdr, args->fh, &hdr);
encode_commit(xdr, args, &hdr);
- encode_getfattr(xdr, args->bitmask, &hdr);
+ if (args->bitmask)
+ encode_getfattr(xdr, args->bitmask, &hdr);
encode_nops(&hdr);
}
@@ -2465,6 +2540,24 @@ static void nfs4_xdr_enc_fs_locations(struct rpc_rqst *req,
encode_nops(&hdr);
}
+/*
+ * Encode SECINFO request
+ */
+static void nfs4_xdr_enc_secinfo(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs4_secinfo_arg *args)
+{
+ struct compound_hdr hdr = {
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+ };
+
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, args->dir_fh, &hdr);
+ encode_secinfo(xdr, args->name, &hdr);
+ encode_nops(&hdr);
+}
+
#if defined(CONFIG_NFS_V4_1)
/*
* EXCHANGE_ID request
@@ -2604,8 +2697,32 @@ static void nfs4_xdr_enc_layoutget(struct rpc_rqst *req,
encode_sequence(xdr, &args->seq_args, &hdr);
encode_putfh(xdr, NFS_FH(args->inode), &hdr);
encode_layoutget(xdr, args, &hdr);
+
+ xdr_inline_pages(&req->rq_rcv_buf, hdr.replen << 2,
+ args->layout.pages, 0, args->layout.pglen);
+
encode_nops(&hdr);
}
+
+/*
+ * Encode LAYOUTCOMMIT request
+ */
+static int nfs4_xdr_enc_layoutcommit(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs4_layoutcommit_args *args)
+{
+ struct compound_hdr hdr = {
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+ };
+
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, NFS_FH(args->inode), &hdr);
+ encode_layoutcommit(xdr, args, &hdr);
+ encode_getfattr(xdr, args->bitmask, &hdr);
+ encode_nops(&hdr);
+ return 0;
+}
#endif /* CONFIG_NFS_V4_1 */
static void print_overflow_msg(const char *func, const struct xdr_stream *xdr)
@@ -2925,6 +3042,7 @@ static int decode_attr_error(struct xdr_stream *xdr, uint32_t *bitmap)
if (unlikely(!p))
goto out_overflow;
bitmap[0] &= ~FATTR4_WORD0_RDATTR_ERROR;
+ return -be32_to_cpup(p);
}
return 0;
out_overflow:
@@ -3912,6 +4030,10 @@ static int decode_getfattr_attrs(struct xdr_stream *xdr, uint32_t *bitmap,
fattr->valid |= status;
status = decode_attr_error(xdr, bitmap);
+ if (status == -NFS4ERR_WRONGSEC) {
+ nfs_fixup_secinfo_attributes(fattr, fh);
+ status = 0;
+ }
if (status < 0)
goto xdr_error;
@@ -4680,6 +4802,73 @@ static int decode_delegreturn(struct xdr_stream *xdr)
return decode_op_hdr(xdr, OP_DELEGRETURN);
}
+static int decode_secinfo_gss(struct xdr_stream *xdr, struct nfs4_secinfo_flavor *flavor)
+{
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ flavor->gss.sec_oid4.len = be32_to_cpup(p);
+ if (flavor->gss.sec_oid4.len > GSS_OID_MAX_LEN)
+ goto out_err;
+
+ p = xdr_inline_decode(xdr, flavor->gss.sec_oid4.len);
+ if (unlikely(!p))
+ goto out_overflow;
+ memcpy(flavor->gss.sec_oid4.data, p, flavor->gss.sec_oid4.len);
+
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ flavor->gss.qop4 = be32_to_cpup(p++);
+ flavor->gss.service = be32_to_cpup(p);
+
+ return 0;
+
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+out_err:
+ return -EINVAL;
+}
+
+static int decode_secinfo(struct xdr_stream *xdr, struct nfs4_secinfo_res *res)
+{
+ struct nfs4_secinfo_flavor *sec_flavor;
+ int status;
+ __be32 *p;
+ int i;
+
+ status = decode_op_hdr(xdr, OP_SECINFO);
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ res->flavors->num_flavors = be32_to_cpup(p);
+
+ for (i = 0; i < res->flavors->num_flavors; i++) {
+ sec_flavor = &res->flavors->flavors[i];
+ if ((char *)&sec_flavor[1] - (char *)res > PAGE_SIZE)
+ break;
+
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ sec_flavor->flavor = be32_to_cpup(p);
+
+ if (sec_flavor->flavor == RPC_AUTH_GSS) {
+ if (decode_secinfo_gss(xdr, sec_flavor))
+ break;
+ }
+ }
+
+ return 0;
+
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
#if defined(CONFIG_NFS_V4_1)
static int decode_exchange_id(struct xdr_stream *xdr,
struct nfs41_exchange_id_res *res)
@@ -4950,6 +5139,9 @@ static int decode_layoutget(struct xdr_stream *xdr, struct rpc_rqst *req,
__be32 *p;
int status;
u32 layout_count;
+ struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
+ struct kvec *iov = rcvbuf->head;
+ u32 hdrlen, recvd;
status = decode_op_hdr(xdr, OP_LAYOUTGET);
if (status)
@@ -4966,17 +5158,14 @@ static int decode_layoutget(struct xdr_stream *xdr, struct rpc_rqst *req,
return -EINVAL;
}
- p = xdr_inline_decode(xdr, 24);
+ p = xdr_inline_decode(xdr, 28);
if (unlikely(!p))
goto out_overflow;
p = xdr_decode_hyper(p, &res->range.offset);
p = xdr_decode_hyper(p, &res->range.length);
res->range.iomode = be32_to_cpup(p++);
res->type = be32_to_cpup(p++);
-
- status = decode_opaque_inline(xdr, &res->layout.len, (char **)&p);
- if (unlikely(status))
- return status;
+ res->layoutp->len = be32_to_cpup(p);
dprintk("%s roff:%lu rlen:%lu riomode:%d, lo_type:0x%x, lo.len:%d\n",
__func__,
@@ -4984,12 +5173,18 @@ static int decode_layoutget(struct xdr_stream *xdr, struct rpc_rqst *req,
(unsigned long)res->range.length,
res->range.iomode,
res->type,
- res->layout.len);
+ res->layoutp->len);
- /* nfs4_proc_layoutget allocated a single page */
- if (res->layout.len > PAGE_SIZE)
- return -ENOMEM;
- memcpy(res->layout.buf, p, res->layout.len);
+ hdrlen = (u8 *) xdr->p - (u8 *) iov->iov_base;
+ recvd = req->rq_rcv_buf.len - hdrlen;
+ if (res->layoutp->len > recvd) {
+ dprintk("NFS: server cheating in layoutget reply: "
+ "layout len %u > recvd %u\n",
+ res->layoutp->len, recvd);
+ return -EINVAL;
+ }
+
+ xdr_read_pages(xdr, res->layoutp->len);
if (layout_count > 1) {
/* We only handle a length one array at the moment. Any
@@ -5006,6 +5201,35 @@ out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
+
+static int decode_layoutcommit(struct xdr_stream *xdr,
+ struct rpc_rqst *req,
+ struct nfs4_layoutcommit_res *res)
+{
+ __be32 *p;
+ __u32 sizechanged;
+ int status;
+
+ status = decode_op_hdr(xdr, OP_LAYOUTCOMMIT);
+ if (status)
+ return status;
+
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ sizechanged = be32_to_cpup(p);
+
+ if (sizechanged) {
+ /* throw away new size */
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ }
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
#endif /* CONFIG_NFS_V4_1 */
/*
@@ -5723,8 +5947,9 @@ static int nfs4_xdr_dec_commit(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
status = decode_commit(xdr, res);
if (status)
goto out;
- decode_getfattr(xdr, res->fattr, res->server,
- !RPC_IS_ASYNC(rqstp->rq_task));
+ if (res->fattr)
+ decode_getfattr(xdr, res->fattr, res->server,
+ !RPC_IS_ASYNC(rqstp->rq_task));
out:
return status;
}
@@ -5919,6 +6144,32 @@ out:
return status;
}
+/*
+ * Decode SECINFO response
+ */
+static int nfs4_xdr_dec_secinfo(struct rpc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ struct nfs4_secinfo_res *res)
+{
+ struct compound_hdr hdr;
+ int status;
+
+ status = decode_compound_hdr(xdr, &hdr);
+ if (status)
+ goto out;
+ status = decode_sequence(xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
+ status = decode_putfh(xdr);
+ if (status)
+ goto out;
+ status = decode_secinfo(xdr, res);
+ if (status)
+ goto out;
+out:
+ return status;
+}
+
#if defined(CONFIG_NFS_V4_1)
/*
* Decode EXCHANGE_ID response
@@ -6066,6 +6317,34 @@ static int nfs4_xdr_dec_layoutget(struct rpc_rqst *rqstp,
out:
return status;
}
+
+/*
+ * Decode LAYOUTCOMMIT response
+ */
+static int nfs4_xdr_dec_layoutcommit(struct rpc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ struct nfs4_layoutcommit_res *res)
+{
+ struct compound_hdr hdr;
+ int status;
+
+ status = decode_compound_hdr(xdr, &hdr);
+ if (status)
+ goto out;
+ status = decode_sequence(xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
+ status = decode_putfh(xdr);
+ if (status)
+ goto out;
+ status = decode_layoutcommit(xdr, rqstp, res);
+ if (status)
+ goto out;
+ decode_getfattr(xdr, res->fattr, res->server,
+ !RPC_IS_ASYNC(rqstp->rq_task));
+out:
+ return status;
+}
#endif /* CONFIG_NFS_V4_1 */
/**
@@ -6180,10 +6459,6 @@ static struct {
{ NFS4ERR_SYMLINK, -ELOOP },
{ NFS4ERR_OP_ILLEGAL, -EOPNOTSUPP },
{ NFS4ERR_DEADLOCK, -EDEADLK },
- { NFS4ERR_WRONGSEC, -EPERM }, /* FIXME: this needs
- * to be handled by a
- * middle-layer.
- */
{ -1, -EIO }
};
@@ -6258,6 +6533,7 @@ struct rpc_procinfo nfs4_procedures[] = {
PROC(SETACL, enc_setacl, dec_setacl),
PROC(FS_LOCATIONS, enc_fs_locations, dec_fs_locations),
PROC(RELEASE_LOCKOWNER, enc_release_lockowner, dec_release_lockowner),
+ PROC(SECINFO, enc_secinfo, dec_secinfo),
#if defined(CONFIG_NFS_V4_1)
PROC(EXCHANGE_ID, enc_exchange_id, dec_exchange_id),
PROC(CREATE_SESSION, enc_create_session, dec_create_session),
@@ -6267,6 +6543,7 @@ struct rpc_procinfo nfs4_procedures[] = {
PROC(RECLAIM_COMPLETE, enc_reclaim_complete, dec_reclaim_complete),
PROC(GETDEVICEINFO, enc_getdeviceinfo, dec_getdeviceinfo),
PROC(LAYOUTGET, enc_layoutget, dec_layoutget),
+ PROC(LAYOUTCOMMIT, enc_layoutcommit, dec_layoutcommit),
#endif /* CONFIG_NFS_V4_1 */
};
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index 23e7944..c80add6 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -135,14 +135,14 @@ void nfs_clear_page_tag_locked(struct nfs_page *req)
nfs_unlock_request(req);
}
-/**
+/*
* nfs_clear_request - Free up all resources allocated to the request
* @req:
*
* Release page and open context resources associated with a read/write
* request after it has completed.
*/
-void nfs_clear_request(struct nfs_page *req)
+static void nfs_clear_request(struct nfs_page *req)
{
struct page *page = req->wb_page;
struct nfs_open_context *ctx = req->wb_context;
@@ -223,6 +223,7 @@ void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
desc->pg_count = 0;
desc->pg_bsize = bsize;
desc->pg_base = 0;
+ desc->pg_moreio = 0;
desc->pg_inode = inode;
desc->pg_doio = doio;
desc->pg_ioflags = io_flags;
@@ -335,9 +336,11 @@ int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
struct nfs_page *req)
{
while (!nfs_pageio_do_add_request(desc, req)) {
+ desc->pg_moreio = 1;
nfs_pageio_doio(desc);
if (desc->pg_error < 0)
return 0;
+ desc->pg_moreio = 0;
}
return 1;
}
@@ -395,6 +398,7 @@ int nfs_scan_list(struct nfs_inode *nfsi,
pgoff_t idx_end;
int found, i;
int res;
+ struct list_head *list;
res = 0;
if (npages == 0)
@@ -415,10 +419,10 @@ int nfs_scan_list(struct nfs_inode *nfsi,
idx_start = req->wb_index + 1;
if (nfs_set_page_tag_locked(req)) {
kref_get(&req->wb_kref);
- nfs_list_remove_request(req);
radix_tree_tag_clear(&nfsi->nfs_page_tree,
req->wb_index, tag);
- nfs_list_add_request(req, dst);
+ list = pnfs_choose_commit_list(req, dst);
+ nfs_list_add_request(req, list);
res++;
if (res == INT_MAX)
goto out;
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index f38813a..d9ab972 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -259,6 +259,7 @@ put_lseg(struct pnfs_layout_segment *lseg)
pnfs_free_lseg_list(&free_me);
}
}
+EXPORT_SYMBOL_GPL(put_lseg);
static bool
should_free_lseg(u32 lseg_iomode, u32 recall_iomode)
@@ -471,6 +472,9 @@ send_layoutget(struct pnfs_layout_hdr *lo,
struct nfs_server *server = NFS_SERVER(ino);
struct nfs4_layoutget *lgp;
struct pnfs_layout_segment *lseg = NULL;
+ struct page **pages = NULL;
+ int i;
+ u32 max_resp_sz, max_pages;
dprintk("--> %s\n", __func__);
@@ -478,6 +482,21 @@ send_layoutget(struct pnfs_layout_hdr *lo,
lgp = kzalloc(sizeof(*lgp), GFP_KERNEL);
if (lgp == NULL)
return NULL;
+
+ /* allocate pages for xdr post processing */
+ max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
+ max_pages = max_resp_sz >> PAGE_SHIFT;
+
+ pages = kzalloc(max_pages * sizeof(struct page *), GFP_KERNEL);
+ if (!pages)
+ goto out_err_free;
+
+ for (i = 0; i < max_pages; i++) {
+ pages[i] = alloc_page(GFP_KERNEL);
+ if (!pages[i])
+ goto out_err_free;
+ }
+
lgp->args.minlength = NFS4_MAX_UINT64;
lgp->args.maxcount = PNFS_LAYOUT_MAXSIZE;
lgp->args.range.iomode = iomode;
@@ -486,6 +505,8 @@ send_layoutget(struct pnfs_layout_hdr *lo,
lgp->args.type = server->pnfs_curr_ld->id;
lgp->args.inode = ino;
lgp->args.ctx = get_nfs_open_context(ctx);
+ lgp->args.layout.pages = pages;
+ lgp->args.layout.pglen = max_pages * PAGE_SIZE;
lgp->lsegpp = &lseg;
/* Synchronously retrieve layout information from server and
@@ -496,7 +517,26 @@ send_layoutget(struct pnfs_layout_hdr *lo,
/* remember that LAYOUTGET failed and suspend trying */
set_bit(lo_fail_bit(iomode), &lo->plh_flags);
}
+
+ /* free xdr pages */
+ for (i = 0; i < max_pages; i++)
+ __free_page(pages[i]);
+ kfree(pages);
+
return lseg;
+
+out_err_free:
+ /* free any allocated xdr pages, lgp as it's not used */
+ if (pages) {
+ for (i = 0; i < max_pages; i++) {
+ if (!pages[i])
+ break;
+ __free_page(pages[i]);
+ }
+ kfree(pages);
+ }
+ kfree(lgp);
+ return NULL;
}
bool pnfs_roc(struct inode *ino)
@@ -945,3 +985,105 @@ pnfs_try_to_read_data(struct nfs_read_data *rdata,
dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
return trypnfs;
}
+
+/*
+ * Currently there is only one (whole file) write lseg.
+ */
+static struct pnfs_layout_segment *pnfs_list_write_lseg(struct inode *inode)
+{
+ struct pnfs_layout_segment *lseg, *rv = NULL;
+
+ list_for_each_entry(lseg, &NFS_I(inode)->layout->plh_segs, pls_list)
+ if (lseg->pls_range.iomode == IOMODE_RW)
+ rv = lseg;
+ return rv;
+}
+
+void
+pnfs_set_layoutcommit(struct nfs_write_data *wdata)
+{
+ struct nfs_inode *nfsi = NFS_I(wdata->inode);
+ loff_t end_pos = wdata->args.offset + wdata->res.count;
+
+ spin_lock(&nfsi->vfs_inode.i_lock);
+ if (!test_and_set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) {
+ /* references matched in nfs4_layoutcommit_release */
+ get_lseg(wdata->lseg);
+ wdata->lseg->pls_lc_cred =
+ get_rpccred(wdata->args.context->state->owner->so_cred);
+ mark_inode_dirty_sync(wdata->inode);
+ dprintk("%s: Set layoutcommit for inode %lu ",
+ __func__, wdata->inode->i_ino);
+ }
+ if (end_pos > wdata->lseg->pls_end_pos)
+ wdata->lseg->pls_end_pos = end_pos;
+ spin_unlock(&nfsi->vfs_inode.i_lock);
+}
+EXPORT_SYMBOL_GPL(pnfs_set_layoutcommit);
+
+/*
+ * For the LAYOUT4_NFSV4_1_FILES layout type, NFS_DATA_SYNC WRITEs and
+ * NFS_UNSTABLE WRITEs with a COMMIT to data servers must store enough
+ * data to disk to allow the server to recover the data if it crashes.
+ * LAYOUTCOMMIT is only needed when the NFL4_UFLG_COMMIT_THRU_MDS flag
+ * is off, and a COMMIT is sent to a data server, or
+ * if WRITEs to a data server return NFS_DATA_SYNC.
+ */
+int
+pnfs_layoutcommit_inode(struct inode *inode, bool sync)
+{
+ struct nfs4_layoutcommit_data *data;
+ struct nfs_inode *nfsi = NFS_I(inode);
+ struct pnfs_layout_segment *lseg;
+ struct rpc_cred *cred;
+ loff_t end_pos;
+ int status = 0;
+
+ dprintk("--> %s inode %lu\n", __func__, inode->i_ino);
+
+ if (!test_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags))
+ return 0;
+
+ /* Note kzalloc ensures data->res.seq_res.sr_slot == NULL */
+ data = kzalloc(sizeof(*data), GFP_NOFS);
+ if (!data) {
+ mark_inode_dirty_sync(inode);
+ status = -ENOMEM;
+ goto out;
+ }
+
+ spin_lock(&inode->i_lock);
+ if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) {
+ spin_unlock(&inode->i_lock);
+ kfree(data);
+ goto out;
+ }
+ /*
+ * Currently only one (whole file) write lseg which is referenced
+ * in pnfs_set_layoutcommit and will be found.
+ */
+ lseg = pnfs_list_write_lseg(inode);
+
+ end_pos = lseg->pls_end_pos;
+ cred = lseg->pls_lc_cred;
+ lseg->pls_end_pos = 0;
+ lseg->pls_lc_cred = NULL;
+
+ memcpy(&data->args.stateid.data, nfsi->layout->plh_stateid.data,
+ sizeof(nfsi->layout->plh_stateid.data));
+ spin_unlock(&inode->i_lock);
+
+ data->args.inode = inode;
+ data->lseg = lseg;
+ data->cred = cred;
+ nfs_fattr_init(&data->fattr);
+ data->args.bitmask = NFS_SERVER(inode)->cache_consistency_bitmask;
+ data->res.fattr = &data->fattr;
+ data->args.lastbytewritten = end_pos - 1;
+ data->res.server = NFS_SERVER(inode);
+
+ status = nfs4_proc_layoutcommit(data, sync);
+out:
+ dprintk("<-- %s status %d\n", __func__, status);
+ return status;
+}
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index 6380b94..bc48272 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -43,6 +43,8 @@ struct pnfs_layout_segment {
atomic_t pls_refcount;
unsigned long pls_flags;
struct pnfs_layout_hdr *pls_layout;
+ struct rpc_cred *pls_lc_cred; /* LAYOUTCOMMIT credential */
+ loff_t pls_end_pos; /* LAYOUTCOMMIT write end */
};
enum pnfs_try_status {
@@ -74,6 +76,13 @@ struct pnfs_layoutdriver_type {
/* test for nfs page cache coalescing */
int (*pg_test)(struct nfs_pageio_descriptor *, struct nfs_page *, struct nfs_page *);
+ /* Returns true if layoutdriver wants to divert this request to
+ * driver's commit routine.
+ */
+ bool (*mark_pnfs_commit)(struct pnfs_layout_segment *lseg);
+ struct list_head * (*choose_commit_list) (struct nfs_page *req);
+ int (*commit_pagelist)(struct inode *inode, struct list_head *mds_pages, int how);
+
/*
* Return PNFS_ATTEMPTED to indicate the layout code has attempted
* I/O, else return PNFS_NOT_ATTEMPTED to fall back to normal NFS
@@ -100,7 +109,6 @@ struct pnfs_device {
unsigned int layout_type;
unsigned int mincount;
struct page **pages;
- void *area;
unsigned int pgbase;
unsigned int pglen;
};
@@ -145,7 +153,8 @@ bool pnfs_roc(struct inode *ino);
void pnfs_roc_release(struct inode *ino);
void pnfs_roc_set_barrier(struct inode *ino, u32 barrier);
bool pnfs_roc_drain(struct inode *ino, u32 *barrier);
-
+void pnfs_set_layoutcommit(struct nfs_write_data *wdata);
+int pnfs_layoutcommit_inode(struct inode *inode, bool sync);
static inline int lo_fail_bit(u32 iomode)
{
@@ -169,6 +178,51 @@ static inline int pnfs_enabled_sb(struct nfs_server *nfss)
return nfss->pnfs_curr_ld != NULL;
}
+static inline void
+pnfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg)
+{
+ if (lseg) {
+ struct pnfs_layoutdriver_type *ld;
+
+ ld = NFS_SERVER(req->wb_page->mapping->host)->pnfs_curr_ld;
+ if (ld->mark_pnfs_commit && ld->mark_pnfs_commit(lseg)) {
+ set_bit(PG_PNFS_COMMIT, &req->wb_flags);
+ req->wb_commit_lseg = get_lseg(lseg);
+ }
+ }
+}
+
+static inline int
+pnfs_commit_list(struct inode *inode, struct list_head *mds_pages, int how)
+{
+ if (!test_and_clear_bit(NFS_INO_PNFS_COMMIT, &NFS_I(inode)->flags))
+ return PNFS_NOT_ATTEMPTED;
+ return NFS_SERVER(inode)->pnfs_curr_ld->commit_pagelist(inode, mds_pages, how);
+}
+
+static inline struct list_head *
+pnfs_choose_commit_list(struct nfs_page *req, struct list_head *mds)
+{
+ struct list_head *rv;
+
+ if (test_and_clear_bit(PG_PNFS_COMMIT, &req->wb_flags)) {
+ struct inode *inode = req->wb_commit_lseg->pls_layout->plh_inode;
+
+ set_bit(NFS_INO_PNFS_COMMIT, &NFS_I(inode)->flags);
+ rv = NFS_SERVER(inode)->pnfs_curr_ld->choose_commit_list(req);
+ /* matched by ref taken when PG_PNFS_COMMIT is set */
+ put_lseg(req->wb_commit_lseg);
+ } else
+ rv = mds;
+ return rv;
+}
+
+static inline void pnfs_clear_request_commit(struct nfs_page *req)
+{
+ if (test_and_clear_bit(PG_PNFS_COMMIT, &req->wb_flags))
+ put_lseg(req->wb_commit_lseg);
+}
+
#else /* CONFIG_NFS_V4_1 */
static inline void pnfs_destroy_all_layouts(struct nfs_client *clp)
@@ -252,6 +306,31 @@ pnfs_pageio_init_write(struct nfs_pageio_descriptor *pgio, struct inode *ino)
pgio->pg_test = NULL;
}
+static inline void
+pnfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg)
+{
+}
+
+static inline int
+pnfs_commit_list(struct inode *inode, struct list_head *mds_pages, int how)
+{
+ return PNFS_NOT_ATTEMPTED;
+}
+
+static inline struct list_head *
+pnfs_choose_commit_list(struct nfs_page *req, struct list_head *mds)
+{
+ return mds;
+}
+
+static inline void pnfs_clear_request_commit(struct nfs_page *req)
+{
+}
+
+static inline int pnfs_layoutcommit_inode(struct inode *inode, bool sync)
+{
+ return 0;
+}
#endif /* CONFIG_NFS_V4_1 */
#endif /* FS_NFS_PNFS_H */
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
index b8ec170..ac40b85 100644
--- a/fs/nfs/proc.c
+++ b/fs/nfs/proc.c
@@ -177,7 +177,7 @@ nfs_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
}
static int
-nfs_proc_lookup(struct inode *dir, struct qstr *name,
+nfs_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, struct qstr *name,
struct nfs_fh *fhandle, struct nfs_fattr *fattr)
{
struct nfs_diropargs arg = {
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 47a3ad6..e4cbc11 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -59,6 +59,7 @@ struct nfs_write_data *nfs_commitdata_alloc(void)
}
return p;
}
+EXPORT_SYMBOL_GPL(nfs_commitdata_alloc);
void nfs_commit_free(struct nfs_write_data *p)
{
@@ -66,6 +67,7 @@ void nfs_commit_free(struct nfs_write_data *p)
kfree(p->pagevec);
mempool_free(p, nfs_commit_mempool);
}
+EXPORT_SYMBOL_GPL(nfs_commit_free);
struct nfs_write_data *nfs_writedata_alloc(unsigned int pagecount)
{
@@ -179,8 +181,8 @@ static int wb_priority(struct writeback_control *wbc)
if (wbc->for_reclaim)
return FLUSH_HIGHPRI | FLUSH_STABLE;
if (wbc->for_kupdate || wbc->for_background)
- return FLUSH_LOWPRI;
- return 0;
+ return FLUSH_LOWPRI | FLUSH_COND_STABLE;
+ return FLUSH_COND_STABLE;
}
/*
@@ -387,11 +389,8 @@ static int nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
spin_lock(&inode->i_lock);
error = radix_tree_insert(&nfsi->nfs_page_tree, req->wb_index, req);
BUG_ON(error);
- if (!nfsi->npages) {
- igrab(inode);
- if (nfs_have_delegation(inode, FMODE_WRITE))
- nfsi->change_attr++;
- }
+ if (!nfsi->npages && nfs_have_delegation(inode, FMODE_WRITE))
+ nfsi->change_attr++;
set_bit(PG_MAPPED, &req->wb_flags);
SetPagePrivate(req->wb_page);
set_page_private(req->wb_page, (unsigned long)req);
@@ -421,11 +420,7 @@ static void nfs_inode_remove_request(struct nfs_page *req)
clear_bit(PG_MAPPED, &req->wb_flags);
radix_tree_delete(&nfsi->nfs_page_tree, req->wb_index);
nfsi->npages--;
- if (!nfsi->npages) {
- spin_unlock(&inode->i_lock);
- iput(inode);
- } else
- spin_unlock(&inode->i_lock);
+ spin_unlock(&inode->i_lock);
nfs_release_request(req);
}
@@ -441,7 +436,7 @@ nfs_mark_request_dirty(struct nfs_page *req)
* Add a request to the inode's commit list.
*/
static void
-nfs_mark_request_commit(struct nfs_page *req)
+nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg)
{
struct inode *inode = req->wb_context->path.dentry->d_inode;
struct nfs_inode *nfsi = NFS_I(inode);
@@ -453,6 +448,7 @@ nfs_mark_request_commit(struct nfs_page *req)
NFS_PAGE_TAG_COMMIT);
nfsi->ncommit++;
spin_unlock(&inode->i_lock);
+ pnfs_mark_request_commit(req, lseg);
inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
inc_bdi_stat(req->wb_page->mapping->backing_dev_info, BDI_RECLAIMABLE);
__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
@@ -474,14 +470,18 @@ nfs_clear_request_commit(struct nfs_page *req)
static inline
int nfs_write_need_commit(struct nfs_write_data *data)
{
- return data->verf.committed != NFS_FILE_SYNC;
+ if (data->verf.committed == NFS_DATA_SYNC)
+ return data->lseg == NULL;
+ else
+ return data->verf.committed != NFS_FILE_SYNC;
}
static inline
-int nfs_reschedule_unstable_write(struct nfs_page *req)
+int nfs_reschedule_unstable_write(struct nfs_page *req,
+ struct nfs_write_data *data)
{
if (test_and_clear_bit(PG_NEED_COMMIT, &req->wb_flags)) {
- nfs_mark_request_commit(req);
+ nfs_mark_request_commit(req, data->lseg);
return 1;
}
if (test_and_clear_bit(PG_NEED_RESCHED, &req->wb_flags)) {
@@ -492,7 +492,7 @@ int nfs_reschedule_unstable_write(struct nfs_page *req)
}
#else
static inline void
-nfs_mark_request_commit(struct nfs_page *req)
+nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg)
{
}
@@ -509,7 +509,8 @@ int nfs_write_need_commit(struct nfs_write_data *data)
}
static inline
-int nfs_reschedule_unstable_write(struct nfs_page *req)
+int nfs_reschedule_unstable_write(struct nfs_page *req,
+ struct nfs_write_data *data)
{
return 0;
}
@@ -541,11 +542,15 @@ nfs_scan_commit(struct inode *inode, struct list_head *dst, pgoff_t idx_start, u
if (!nfs_need_commit(nfsi))
return 0;
+ spin_lock(&inode->i_lock);
ret = nfs_scan_list(nfsi, dst, idx_start, npages, NFS_PAGE_TAG_COMMIT);
if (ret > 0)
nfsi->ncommit -= ret;
+ spin_unlock(&inode->i_lock);
+
if (nfs_need_commit(NFS_I(inode)))
__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
+
return ret;
}
#else
@@ -612,9 +617,11 @@ static struct nfs_page *nfs_try_to_update_request(struct inode *inode,
}
if (nfs_clear_request_commit(req) &&
- radix_tree_tag_clear(&NFS_I(inode)->nfs_page_tree,
- req->wb_index, NFS_PAGE_TAG_COMMIT) != NULL)
+ radix_tree_tag_clear(&NFS_I(inode)->nfs_page_tree,
+ req->wb_index, NFS_PAGE_TAG_COMMIT) != NULL) {
NFS_I(inode)->ncommit--;
+ pnfs_clear_request_commit(req);
+ }
/* Okay, the request matches. Update the region */
if (offset < req->wb_offset) {
@@ -762,11 +769,12 @@ int nfs_updatepage(struct file *file, struct page *page,
return status;
}
-static void nfs_writepage_release(struct nfs_page *req)
+static void nfs_writepage_release(struct nfs_page *req,
+ struct nfs_write_data *data)
{
struct page *page = req->wb_page;
- if (PageError(req->wb_page) || !nfs_reschedule_unstable_write(req))
+ if (PageError(req->wb_page) || !nfs_reschedule_unstable_write(req, data))
nfs_inode_remove_request(req);
nfs_clear_page_tag_locked(req);
nfs_end_page_writeback(page);
@@ -863,7 +871,7 @@ static int nfs_write_rpcsetup(struct nfs_page *req,
data->args.context = get_nfs_open_context(req->wb_context);
data->args.lock_context = req->wb_lock_context;
data->args.stable = NFS_UNSTABLE;
- if (how & FLUSH_STABLE) {
+ if (how & (FLUSH_STABLE | FLUSH_COND_STABLE)) {
data->args.stable = NFS_DATA_SYNC;
if (!nfs_need_commit(NFS_I(inode)))
data->args.stable = NFS_FILE_SYNC;
@@ -912,6 +920,12 @@ static int nfs_flush_multi(struct nfs_pageio_descriptor *desc)
nfs_list_remove_request(req);
+ if ((desc->pg_ioflags & FLUSH_COND_STABLE) &&
+ (desc->pg_moreio || NFS_I(desc->pg_inode)->ncommit ||
+ desc->pg_count > wsize))
+ desc->pg_ioflags &= ~FLUSH_COND_STABLE;
+
+
nbytes = desc->pg_count;
do {
size_t len = min(nbytes, wsize);
@@ -1002,6 +1016,10 @@ static int nfs_flush_one(struct nfs_pageio_descriptor *desc)
if ((!lseg) && list_is_singular(&data->pages))
lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_RW);
+ if ((desc->pg_ioflags & FLUSH_COND_STABLE) &&
+ (desc->pg_moreio || NFS_I(desc->pg_inode)->ncommit))
+ desc->pg_ioflags &= ~FLUSH_COND_STABLE;
+
/* Set up the argument struct */
ret = nfs_write_rpcsetup(req, data, &nfs_write_full_ops, desc->pg_count, 0, lseg, desc->pg_ioflags);
out:
@@ -1074,7 +1092,7 @@ static void nfs_writeback_release_partial(void *calldata)
out:
if (atomic_dec_and_test(&req->wb_complete))
- nfs_writepage_release(req);
+ nfs_writepage_release(req, data);
nfs_writedata_release(calldata);
}
@@ -1141,7 +1159,7 @@ static void nfs_writeback_release_full(void *calldata)
if (nfs_write_need_commit(data)) {
memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
- nfs_mark_request_commit(req);
+ nfs_mark_request_commit(req, data->lseg);
dprintk(" marked for commit\n");
goto next;
}
@@ -1251,57 +1269,82 @@ void nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
static int nfs_commit_set_lock(struct nfs_inode *nfsi, int may_wait)
{
+ int ret;
+
if (!test_and_set_bit(NFS_INO_COMMIT, &nfsi->flags))
return 1;
- if (may_wait && !out_of_line_wait_on_bit_lock(&nfsi->flags,
- NFS_INO_COMMIT, nfs_wait_bit_killable,
- TASK_KILLABLE))
- return 1;
- return 0;
+ if (!may_wait)
+ return 0;
+ ret = out_of_line_wait_on_bit_lock(&nfsi->flags,
+ NFS_INO_COMMIT,
+ nfs_wait_bit_killable,
+ TASK_KILLABLE);
+ return (ret < 0) ? ret : 1;
}
-static void nfs_commit_clear_lock(struct nfs_inode *nfsi)
+void nfs_commit_clear_lock(struct nfs_inode *nfsi)
{
clear_bit(NFS_INO_COMMIT, &nfsi->flags);
smp_mb__after_clear_bit();
wake_up_bit(&nfsi->flags, NFS_INO_COMMIT);
}
+EXPORT_SYMBOL_GPL(nfs_commit_clear_lock);
-
-static void nfs_commitdata_release(void *data)
+void nfs_commitdata_release(void *data)
{
struct nfs_write_data *wdata = data;
+ put_lseg(wdata->lseg);
put_nfs_open_context(wdata->args.context);
nfs_commit_free(wdata);
}
+EXPORT_SYMBOL_GPL(nfs_commitdata_release);
-/*
- * Set up the argument/result storage required for the RPC call.
- */
-static int nfs_commit_rpcsetup(struct list_head *head,
- struct nfs_write_data *data,
- int how)
+int nfs_initiate_commit(struct nfs_write_data *data, struct rpc_clnt *clnt,
+ const struct rpc_call_ops *call_ops,
+ int how)
{
- struct nfs_page *first = nfs_list_entry(head->next);
- struct inode *inode = first->wb_context->path.dentry->d_inode;
- int priority = flush_task_priority(how);
struct rpc_task *task;
+ int priority = flush_task_priority(how);
struct rpc_message msg = {
.rpc_argp = &data->args,
.rpc_resp = &data->res,
- .rpc_cred = first->wb_context->cred,
+ .rpc_cred = data->cred,
};
struct rpc_task_setup task_setup_data = {
.task = &data->task,
- .rpc_client = NFS_CLIENT(inode),
+ .rpc_client = clnt,
.rpc_message = &msg,
- .callback_ops = &nfs_commit_ops,
+ .callback_ops = call_ops,
.callback_data = data,
.workqueue = nfsiod_workqueue,
.flags = RPC_TASK_ASYNC,
.priority = priority,
};
+ /* Set up the initial task struct. */
+ NFS_PROTO(data->inode)->commit_setup(data, &msg);
+
+ dprintk("NFS: %5u initiated commit call\n", data->task.tk_pid);
+
+ task = rpc_run_task(&task_setup_data);
+ if (IS_ERR(task))
+ return PTR_ERR(task);
+ if (how & FLUSH_SYNC)
+ rpc_wait_for_completion_task(task);
+ rpc_put_task(task);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nfs_initiate_commit);
+
+/*
+ * Set up the argument/result storage required for the RPC call.
+ */
+void nfs_init_commit(struct nfs_write_data *data,
+ struct list_head *head,
+ struct pnfs_layout_segment *lseg)
+{
+ struct nfs_page *first = nfs_list_entry(head->next);
+ struct inode *inode = first->wb_context->path.dentry->d_inode;
/* Set up the RPC argument and reply structs
* NB: take care not to mess about with data->commit et al. */
@@ -1309,7 +1352,9 @@ static int nfs_commit_rpcsetup(struct list_head *head,
list_splice_init(head, &data->pages);
data->inode = inode;
- data->cred = msg.rpc_cred;
+ data->cred = first->wb_context->cred;
+ data->lseg = lseg; /* reference transferred */
+ data->mds_ops = &nfs_commit_ops;
data->args.fh = NFS_FH(data->inode);
/* Note: we always request a commit of the entire inode */
@@ -1320,20 +1365,25 @@ static int nfs_commit_rpcsetup(struct list_head *head,
data->res.fattr = &data->fattr;
data->res.verf = &data->verf;
nfs_fattr_init(&data->fattr);
+}
+EXPORT_SYMBOL_GPL(nfs_init_commit);
- /* Set up the initial task struct. */
- NFS_PROTO(inode)->commit_setup(data, &msg);
-
- dprintk("NFS: %5u initiated commit call\n", data->task.tk_pid);
+void nfs_retry_commit(struct list_head *page_list,
+ struct pnfs_layout_segment *lseg)
+{
+ struct nfs_page *req;
- task = rpc_run_task(&task_setup_data);
- if (IS_ERR(task))
- return PTR_ERR(task);
- if (how & FLUSH_SYNC)
- rpc_wait_for_completion_task(task);
- rpc_put_task(task);
- return 0;
+ while (!list_empty(page_list)) {
+ req = nfs_list_entry(page_list->next);
+ nfs_list_remove_request(req);
+ nfs_mark_request_commit(req, lseg);
+ dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
+ dec_bdi_stat(req->wb_page->mapping->backing_dev_info,
+ BDI_RECLAIMABLE);
+ nfs_clear_page_tag_locked(req);
+ }
}
+EXPORT_SYMBOL_GPL(nfs_retry_commit);
/*
* Commit dirty pages
@@ -1342,7 +1392,6 @@ static int
nfs_commit_list(struct inode *inode, struct list_head *head, int how)
{
struct nfs_write_data *data;
- struct nfs_page *req;
data = nfs_commitdata_alloc();
@@ -1350,17 +1399,10 @@ nfs_commit_list(struct inode *inode, struct list_head *head, int how)
goto out_bad;
/* Set up the argument struct */
- return nfs_commit_rpcsetup(head, data, how);
+ nfs_init_commit(data, head, NULL);
+ return nfs_initiate_commit(data, NFS_CLIENT(inode), data->mds_ops, how);
out_bad:
- while (!list_empty(head)) {
- req = nfs_list_entry(head->next);
- nfs_list_remove_request(req);
- nfs_mark_request_commit(req);
- dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
- dec_bdi_stat(req->wb_page->mapping->backing_dev_info,
- BDI_RECLAIMABLE);
- nfs_clear_page_tag_locked(req);
- }
+ nfs_retry_commit(head, NULL);
nfs_commit_clear_lock(NFS_I(inode));
return -ENOMEM;
}
@@ -1380,10 +1422,9 @@ static void nfs_commit_done(struct rpc_task *task, void *calldata)
return;
}
-static void nfs_commit_release(void *calldata)
+void nfs_commit_release_pages(struct nfs_write_data *data)
{
- struct nfs_write_data *data = calldata;
- struct nfs_page *req;
+ struct nfs_page *req;
int status = data->task.tk_status;
while (!list_empty(&data->pages)) {
@@ -1417,6 +1458,14 @@ static void nfs_commit_release(void *calldata)
next:
nfs_clear_page_tag_locked(req);
}
+}
+EXPORT_SYMBOL_GPL(nfs_commit_release_pages);
+
+static void nfs_commit_release(void *calldata)
+{
+ struct nfs_write_data *data = calldata;
+
+ nfs_commit_release_pages(data);
nfs_commit_clear_lock(NFS_I(data->inode));
nfs_commitdata_release(calldata);
}
@@ -1433,23 +1482,28 @@ int nfs_commit_inode(struct inode *inode, int how)
{
LIST_HEAD(head);
int may_wait = how & FLUSH_SYNC;
- int res = 0;
+ int res;
- if (!nfs_commit_set_lock(NFS_I(inode), may_wait))
+ res = nfs_commit_set_lock(NFS_I(inode), may_wait);
+ if (res <= 0)
goto out_mark_dirty;
- spin_lock(&inode->i_lock);
res = nfs_scan_commit(inode, &head, 0, 0);
- spin_unlock(&inode->i_lock);
if (res) {
- int error = nfs_commit_list(inode, &head, how);
+ int error;
+
+ error = pnfs_commit_list(inode, &head, how);
+ if (error == PNFS_NOT_ATTEMPTED)
+ error = nfs_commit_list(inode, &head, how);
if (error < 0)
return error;
- if (may_wait)
- wait_on_bit(&NFS_I(inode)->flags, NFS_INO_COMMIT,
- nfs_wait_bit_killable,
- TASK_KILLABLE);
- else
+ if (!may_wait)
goto out_mark_dirty;
+ error = wait_on_bit(&NFS_I(inode)->flags,
+ NFS_INO_COMMIT,
+ nfs_wait_bit_killable,
+ TASK_KILLABLE);
+ if (error < 0)
+ return error;
} else
nfs_commit_clear_lock(NFS_I(inode));
return res;
@@ -1503,7 +1557,22 @@ static int nfs_commit_unstable_pages(struct inode *inode, struct writeback_contr
int nfs_write_inode(struct inode *inode, struct writeback_control *wbc)
{
- return nfs_commit_unstable_pages(inode, wbc);
+ int ret;
+
+ ret = nfs_commit_unstable_pages(inode, wbc);
+ if (ret >= 0 && test_bit(NFS_INO_LAYOUTCOMMIT, &NFS_I(inode)->flags)) {
+ int status;
+ bool sync = true;
+
+ if (wbc->sync_mode == WB_SYNC_NONE || wbc->nonblocking ||
+ wbc->for_background)
+ sync = false;
+
+ status = pnfs_layoutcommit_inode(inode, sync);
+ if (status < 0)
+ return status;
+ }
+ return ret;
}
/*
diff --git a/fs/nfs_common/nfsacl.c b/fs/nfs_common/nfsacl.c
index 84c27d6..6940439 100644
--- a/fs/nfs_common/nfsacl.c
+++ b/fs/nfs_common/nfsacl.c
@@ -117,7 +117,6 @@ int nfsacl_encode(struct xdr_buf *buf, unsigned int base, struct inode *inode,
* invoked in contexts where a memory allocation failure is
* fatal. Fortunately this fake ACL is small enough to
* construct on the stack. */
- memset(acl2, 0, sizeof(acl2));
posix_acl_init(acl2, 4);
/* Insert entries in canonical order: other orders seem
@@ -174,7 +173,7 @@ xdr_nfsace_decode(struct xdr_array2_desc *desc, void *elem)
return -EINVAL;
break;
case ACL_MASK:
- /* Solaris sometimes sets additonal bits in the mask */
+ /* Solaris sometimes sets additional bits in the mask */
entry->e_perm &= S_IRWXO;
break;
default:
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index 8b31e5f..ad000ae 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -299,7 +299,6 @@ svc_expkey_update(struct svc_expkey *new, struct svc_expkey *old)
#define EXPORT_HASHBITS 8
#define EXPORT_HASHMAX (1<< EXPORT_HASHBITS)
-#define EXPORT_HASHMASK (EXPORT_HASHMAX -1)
static struct cache_head *export_table[EXPORT_HASHMAX];
diff --git a/fs/nfsd/lockd.c b/fs/nfsd/lockd.c
index 0c6d816..7c831a2 100644
--- a/fs/nfsd/lockd.c
+++ b/fs/nfsd/lockd.c
@@ -38,7 +38,6 @@ nlm_fopen(struct svc_rqst *rqstp, struct nfs_fh *f, struct file **filp)
exp_readlock();
nfserr = nfsd_open(rqstp, &fh, S_IFREG, NFSD_MAY_LOCK, filp);
fh_put(&fh);
- rqstp->rq_client = NULL;
exp_readunlock();
/* We return nlm error codes as nlm doesn't know
* about nfsd, but nfsd does know about nlm..
diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c
index 7e84a85..ad48fac 100644
--- a/fs/nfsd/nfs3xdr.c
+++ b/fs/nfsd/nfs3xdr.c
@@ -702,7 +702,7 @@ nfs3svc_encode_readres(struct svc_rqst *rqstp, __be32 *p,
*p++ = htonl(resp->eof);
*p++ = htonl(resp->count); /* xdr opaque count */
xdr_ressize_check(rqstp, p);
- /* now update rqstp->rq_res to reflect data aswell */
+ /* now update rqstp->rq_res to reflect data as well */
rqstp->rq_res.page_len = resp->count;
if (resp->count & 3) {
/* need to pad the tail */
diff --git a/fs/nfsd/nfs4idmap.c b/fs/nfsd/nfs4idmap.c
index 6d2c397..55780a2 100644
--- a/fs/nfsd/nfs4idmap.c
+++ b/fs/nfsd/nfs4idmap.c
@@ -63,7 +63,6 @@ struct ent {
#define ENT_HASHBITS 8
#define ENT_HASHMAX (1 << ENT_HASHBITS)
-#define ENT_HASHMASK (ENT_HASHMAX - 1)
static void
ent_init(struct cache_head *cnew, struct cache_head *citm)
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index db52546..5fcb139 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -984,8 +984,8 @@ typedef __be32(*nfsd4op_func)(struct svc_rqst *, struct nfsd4_compound_state *,
void *);
enum nfsd4_op_flags {
ALLOWED_WITHOUT_FH = 1 << 0, /* No current filehandle required */
- ALLOWED_ON_ABSENT_FS = 2 << 0, /* ops processed on absent fs */
- ALLOWED_AS_FIRST_OP = 3 << 0, /* ops reqired first in compound */
+ ALLOWED_ON_ABSENT_FS = 1 << 1, /* ops processed on absent fs */
+ ALLOWED_AS_FIRST_OP = 1 << 2, /* ops reqired first in compound */
};
struct nfsd4_operation {
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 7b566ec..4cf04e1 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -148,7 +148,7 @@ static struct list_head ownerstr_hashtbl[OWNER_HASH_SIZE];
/* hash table for nfs4_file */
#define FILE_HASH_BITS 8
#define FILE_HASH_SIZE (1 << FILE_HASH_BITS)
-#define FILE_HASH_MASK (FILE_HASH_SIZE - 1)
+
/* hash table for (open)nfs4_stateid */
#define STATEID_HASH_BITS 10
#define STATEID_HASH_SIZE (1 << STATEID_HASH_BITS)
@@ -258,6 +258,7 @@ static void nfs4_put_deleg_lease(struct nfs4_file *fp)
if (atomic_dec_and_test(&fp->fi_delegees)) {
vfs_setlease(fp->fi_deleg_file, F_UNLCK, &fp->fi_lease);
fp->fi_lease = NULL;
+ fput(fp->fi_deleg_file);
fp->fi_deleg_file = NULL;
}
}
@@ -316,64 +317,6 @@ static struct list_head unconf_id_hashtbl[CLIENT_HASH_SIZE];
static struct list_head client_lru;
static struct list_head close_lru;
-static void unhash_generic_stateid(struct nfs4_stateid *stp)
-{
- list_del(&stp->st_hash);
- list_del(&stp->st_perfile);
- list_del(&stp->st_perstateowner);
-}
-
-static void free_generic_stateid(struct nfs4_stateid *stp)
-{
- put_nfs4_file(stp->st_file);
- kmem_cache_free(stateid_slab, stp);
-}
-
-static void release_lock_stateid(struct nfs4_stateid *stp)
-{
- struct file *file;
-
- unhash_generic_stateid(stp);
- file = find_any_file(stp->st_file);
- if (file)
- locks_remove_posix(file, (fl_owner_t)stp->st_stateowner);
- free_generic_stateid(stp);
-}
-
-static void unhash_lockowner(struct nfs4_stateowner *sop)
-{
- struct nfs4_stateid *stp;
-
- list_del(&sop->so_idhash);
- list_del(&sop->so_strhash);
- list_del(&sop->so_perstateid);
- while (!list_empty(&sop->so_stateids)) {
- stp = list_first_entry(&sop->so_stateids,
- struct nfs4_stateid, st_perstateowner);
- release_lock_stateid(stp);
- }
-}
-
-static void release_lockowner(struct nfs4_stateowner *sop)
-{
- unhash_lockowner(sop);
- nfs4_put_stateowner(sop);
-}
-
-static void
-release_stateid_lockowners(struct nfs4_stateid *open_stp)
-{
- struct nfs4_stateowner *lock_sop;
-
- while (!list_empty(&open_stp->st_lockowners)) {
- lock_sop = list_entry(open_stp->st_lockowners.next,
- struct nfs4_stateowner, so_perstateid);
- /* list_del(&open_stp->st_lockowners); */
- BUG_ON(lock_sop->so_is_open_owner);
- release_lockowner(lock_sop);
- }
-}
-
/*
* We store the NONE, READ, WRITE, and BOTH bits separately in the
* st_{access,deny}_bmap field of the stateid, in order to track not
@@ -446,13 +389,74 @@ static int nfs4_access_bmap_to_omode(struct nfs4_stateid *stp)
return nfs4_access_to_omode(access);
}
-static void release_open_stateid(struct nfs4_stateid *stp)
+static void unhash_generic_stateid(struct nfs4_stateid *stp)
{
- int oflag = nfs4_access_bmap_to_omode(stp);
+ list_del(&stp->st_hash);
+ list_del(&stp->st_perfile);
+ list_del(&stp->st_perstateowner);
+}
+static void free_generic_stateid(struct nfs4_stateid *stp)
+{
+ int oflag;
+
+ if (stp->st_access_bmap) {
+ oflag = nfs4_access_bmap_to_omode(stp);
+ nfs4_file_put_access(stp->st_file, oflag);
+ }
+ put_nfs4_file(stp->st_file);
+ kmem_cache_free(stateid_slab, stp);
+}
+
+static void release_lock_stateid(struct nfs4_stateid *stp)
+{
+ struct file *file;
+
+ unhash_generic_stateid(stp);
+ file = find_any_file(stp->st_file);
+ if (file)
+ locks_remove_posix(file, (fl_owner_t)stp->st_stateowner);
+ free_generic_stateid(stp);
+}
+
+static void unhash_lockowner(struct nfs4_stateowner *sop)
+{
+ struct nfs4_stateid *stp;
+
+ list_del(&sop->so_idhash);
+ list_del(&sop->so_strhash);
+ list_del(&sop->so_perstateid);
+ while (!list_empty(&sop->so_stateids)) {
+ stp = list_first_entry(&sop->so_stateids,
+ struct nfs4_stateid, st_perstateowner);
+ release_lock_stateid(stp);
+ }
+}
+
+static void release_lockowner(struct nfs4_stateowner *sop)
+{
+ unhash_lockowner(sop);
+ nfs4_put_stateowner(sop);
+}
+
+static void
+release_stateid_lockowners(struct nfs4_stateid *open_stp)
+{
+ struct nfs4_stateowner *lock_sop;
+
+ while (!list_empty(&open_stp->st_lockowners)) {
+ lock_sop = list_entry(open_stp->st_lockowners.next,
+ struct nfs4_stateowner, so_perstateid);
+ /* list_del(&open_stp->st_lockowners); */
+ BUG_ON(lock_sop->so_is_open_owner);
+ release_lockowner(lock_sop);
+ }
+}
+
+static void release_open_stateid(struct nfs4_stateid *stp)
+{
unhash_generic_stateid(stp);
release_stateid_lockowners(stp);
- nfs4_file_put_access(stp->st_file, oflag);
free_generic_stateid(stp);
}
@@ -608,7 +612,8 @@ static void init_forechannel_attrs(struct nfsd4_channel_attrs *new, struct nfsd4
u32 maxrpc = nfsd_serv->sv_max_mesg;
new->maxreqs = numslots;
- new->maxresp_cached = slotsize + NFSD_MIN_HDR_SEQ_SZ;
+ new->maxresp_cached = min_t(u32, req->maxresp_cached,
+ slotsize + NFSD_MIN_HDR_SEQ_SZ);
new->maxreq_sz = min_t(u32, req->maxreq_sz, maxrpc);
new->maxresp_sz = min_t(u32, req->maxresp_sz, maxrpc);
new->maxops = min_t(u32, req->maxops, NFSD_MAX_OPS_PER_COMPOUND);
@@ -3054,7 +3059,7 @@ check_special_stateids(svc_fh *current_fh, stateid_t *stateid, int flags)
if (ONE_STATEID(stateid) && (flags & RD_STATE))
return nfs_ok;
else if (locks_in_grace()) {
- /* Answer in remaining cases depends on existance of
+ /* Answer in remaining cases depends on existence of
* conflicting state; so we must wait out the grace period. */
return nfserr_grace;
} else if (flags & WR_STATE)
@@ -3674,7 +3679,7 @@ find_lockstateowner_str(struct inode *inode, clientid_t *clid,
/*
* Alloc a lock owner structure.
* Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has
- * occured.
+ * occurred.
*
* strhashval = lock_ownerstr_hashval
*/
@@ -3735,6 +3740,7 @@ alloc_init_lock_stateid(struct nfs4_stateowner *sop, struct nfs4_file *fp, struc
stp->st_stateid.si_stateownerid = sop->so_id;
stp->st_stateid.si_fileid = fp->fi_id;
stp->st_stateid.si_generation = 0;
+ stp->st_access_bmap = 0;
stp->st_deny_bmap = open_stp->st_deny_bmap;
stp->st_openstp = open_stp;
@@ -3749,6 +3755,17 @@ check_lock_length(u64 offset, u64 length)
LOFF_OVERFLOW(offset, length)));
}
+static void get_lock_access(struct nfs4_stateid *lock_stp, u32 access)
+{
+ struct nfs4_file *fp = lock_stp->st_file;
+ int oflag = nfs4_access_to_omode(access);
+
+ if (test_bit(access, &lock_stp->st_access_bmap))
+ return;
+ nfs4_file_get_access(fp, oflag);
+ __set_bit(access, &lock_stp->st_access_bmap);
+}
+
/*
* LOCK operation
*/
@@ -3765,7 +3782,6 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct file_lock conflock;
__be32 status = 0;
unsigned int strhashval;
- unsigned int cmd;
int err;
dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
@@ -3847,22 +3863,18 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
switch (lock->lk_type) {
case NFS4_READ_LT:
case NFS4_READW_LT:
- if (find_readable_file(lock_stp->st_file)) {
- nfs4_get_vfs_file(rqstp, fp, &cstate->current_fh, NFS4_SHARE_ACCESS_READ);
- filp = find_readable_file(lock_stp->st_file);
- }
+ filp = find_readable_file(lock_stp->st_file);
+ if (filp)
+ get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ);
file_lock.fl_type = F_RDLCK;
- cmd = F_SETLK;
- break;
+ break;
case NFS4_WRITE_LT:
case NFS4_WRITEW_LT:
- if (find_writeable_file(lock_stp->st_file)) {
- nfs4_get_vfs_file(rqstp, fp, &cstate->current_fh, NFS4_SHARE_ACCESS_WRITE);
- filp = find_writeable_file(lock_stp->st_file);
- }
+ filp = find_writeable_file(lock_stp->st_file);
+ if (filp)
+ get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE);
file_lock.fl_type = F_WRLCK;
- cmd = F_SETLK;
- break;
+ break;
default:
status = nfserr_inval;
goto out;
@@ -3886,7 +3898,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
* Note: locks.c uses the BKL to protect the inode's lock list.
*/
- err = vfs_lock_file(filp, cmd, &file_lock, &conflock);
+ err = vfs_lock_file(filp, F_SETLK, &file_lock, &conflock);
switch (-err) {
case 0: /* success! */
update_stateid(&lock_stp->st_stateid);
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 615f0a9..c6766af 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -1142,7 +1142,7 @@ nfsd4_decode_create_session(struct nfsd4_compoundargs *argp,
u32 dummy;
char *machine_name;
- int i, j;
+ int i;
int nr_secflavs;
READ_BUF(16);
@@ -1215,8 +1215,6 @@ nfsd4_decode_create_session(struct nfsd4_compoundargs *argp,
READ_BUF(4);
READ32(dummy);
READ_BUF(dummy * 4);
- for (j = 0; j < dummy; ++j)
- READ32(dummy);
break;
case RPC_AUTH_GSS:
dprintk("RPC_AUTH_GSS callback secflavor "
@@ -1232,7 +1230,6 @@ nfsd4_decode_create_session(struct nfsd4_compoundargs *argp,
READ_BUF(4);
READ32(dummy);
READ_BUF(dummy);
- p += XDR_QUADLEN(dummy);
break;
default:
dprintk("Illegal callback secflavor\n");
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 33b3e2b..1f5eae4 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -12,13 +12,14 @@
#include <linux/nfsd/syscall.h>
#include <linux/lockd/lockd.h>
#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/gss_api.h>
#include "idmap.h"
#include "nfsd.h"
#include "cache.h"
/*
- * We have a single directory with 9 nodes in it.
+ * We have a single directory with several nodes in it.
*/
enum {
NFSD_Root = 1,
@@ -42,6 +43,7 @@ enum {
NFSD_Versions,
NFSD_Ports,
NFSD_MaxBlkSize,
+ NFSD_SupportedEnctypes,
/*
* The below MUST come last. Otherwise we leave a hole in nfsd_files[]
* with !CONFIG_NFSD_V4 and simple_fill_super() goes oops
@@ -187,6 +189,34 @@ static struct file_operations export_features_operations = {
.release = single_release,
};
+#ifdef CONFIG_SUNRPC_GSS
+static int supported_enctypes_show(struct seq_file *m, void *v)
+{
+ struct gss_api_mech *k5mech;
+
+ k5mech = gss_mech_get_by_name("krb5");
+ if (k5mech == NULL)
+ goto out;
+ if (k5mech->gm_upcall_enctypes != NULL)
+ seq_printf(m, k5mech->gm_upcall_enctypes);
+ gss_mech_put(k5mech);
+out:
+ return 0;
+}
+
+static int supported_enctypes_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, supported_enctypes_show, NULL);
+}
+
+static struct file_operations supported_enctypes_ops = {
+ .open = supported_enctypes_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+#endif /* CONFIG_SUNRPC_GSS */
+
extern int nfsd_pool_stats_open(struct inode *inode, struct file *file);
extern int nfsd_pool_stats_release(struct inode *inode, struct file *file);
@@ -1397,6 +1427,9 @@ static int nfsd_fill_super(struct super_block * sb, void * data, int silent)
[NFSD_Versions] = {"versions", &transaction_ops, S_IWUSR|S_IRUSR},
[NFSD_Ports] = {"portlist", &transaction_ops, S_IWUSR|S_IRUGO},
[NFSD_MaxBlkSize] = {"max_block_size", &transaction_ops, S_IWUSR|S_IRUGO},
+#ifdef CONFIG_SUNRPC_GSS
+ [NFSD_SupportedEnctypes] = {"supported_krb5_enctypes", &supported_enctypes_ops, S_IRUGO},
+#endif /* CONFIG_SUNRPC_GSS */
#ifdef CONFIG_NFSD_V4
[NFSD_Leasetime] = {"nfsv4leasetime", &transaction_ops, S_IWUSR|S_IRUSR},
[NFSD_Gracetime] = {"nfsv4gracetime", &transaction_ops, S_IWUSR|S_IRUSR},
diff --git a/fs/nfsd/nfsxdr.c b/fs/nfsd/nfsxdr.c
index 4ce005d..65ec595 100644
--- a/fs/nfsd/nfsxdr.c
+++ b/fs/nfsd/nfsxdr.c
@@ -451,7 +451,7 @@ nfssvc_encode_readres(struct svc_rqst *rqstp, __be32 *p,
*p++ = htonl(resp->count);
xdr_ressize_check(rqstp, p);
- /* now update rqstp->rq_res to reflect data aswell */
+ /* now update rqstp->rq_res to reflect data as well */
rqstp->rq_res.page_len = resp->count;
if (resp->count & 3) {
/* need to pad the tail */
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index 2d31224..6bd2f3c 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -367,16 +367,12 @@ struct nfs4_file {
struct list_head fi_delegations;
/* One each for O_RDONLY, O_WRONLY, O_RDWR: */
struct file * fi_fds[3];
- /* One each for O_RDONLY, O_WRONLY: */
- atomic_t fi_access[2];
/*
- * Each open stateid contributes 1 to either fi_readers or
- * fi_writers, or both, depending on the open mode. A
- * delegation also takes an fi_readers reference. Lock
- * stateid's take none.
+ * Each open or lock stateid contributes 1 to either
+ * fi_access[O_RDONLY], fi_access[O_WRONLY], or both, depending
+ * on open or lock mode:
*/
- atomic_t fi_readers;
- atomic_t fi_writers;
+ atomic_t fi_access[2];
struct file *fi_deleg_file;
struct file_lock *fi_lease;
atomic_t fi_delegees;
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index ff93025..129f3c9 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -1363,7 +1363,7 @@ nfsd_create_v3(struct svc_rqst *rqstp, struct svc_fh *fhp,
goto out;
if (!(iap->ia_valid & ATTR_MODE))
iap->ia_mode = 0;
- err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_CREATE);
+ err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_EXEC);
if (err)
goto out;
@@ -1385,6 +1385,13 @@ nfsd_create_v3(struct svc_rqst *rqstp, struct svc_fh *fhp,
if (IS_ERR(dchild))
goto out_nfserr;
+ /* If file doesn't exist, check for permissions to create one */
+ if (!dchild->d_inode) {
+ err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_CREATE);
+ if (err)
+ goto out;
+ }
+
err = fh_compose(resfhp, fhp->fh_export, dchild, fhp);
if (err)
goto out;
@@ -1749,8 +1756,6 @@ nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen,
if (host_err)
goto out_drop_write;
}
- if (host_err)
- goto out_drop_write;
host_err = vfs_rename(fdir, odentry, tdir, ndentry);
if (!host_err) {
host_err = commit_metadata(tfhp);
diff --git a/fs/nilfs2/alloc.c b/fs/nilfs2/alloc.c
index d7fd696..0a0a66d 100644
--- a/fs/nilfs2/alloc.c
+++ b/fs/nilfs2/alloc.c
@@ -521,8 +521,8 @@ void nilfs_palloc_commit_free_entry(struct inode *inode,
group_offset, bitmap))
printk(KERN_WARNING "%s: entry number %llu already freed\n",
__func__, (unsigned long long)req->pr_entry_nr);
-
- nilfs_palloc_group_desc_add_entries(inode, group, desc, 1);
+ else
+ nilfs_palloc_group_desc_add_entries(inode, group, desc, 1);
kunmap(req->pr_bitmap_bh->b_page);
kunmap(req->pr_desc_bh->b_page);
@@ -558,8 +558,8 @@ void nilfs_palloc_abort_alloc_entry(struct inode *inode,
group_offset, bitmap))
printk(KERN_WARNING "%s: entry number %llu already freed\n",
__func__, (unsigned long long)req->pr_entry_nr);
-
- nilfs_palloc_group_desc_add_entries(inode, group, desc, 1);
+ else
+ nilfs_palloc_group_desc_add_entries(inode, group, desc, 1);
kunmap(req->pr_bitmap_bh->b_page);
kunmap(req->pr_desc_bh->b_page);
@@ -665,7 +665,7 @@ int nilfs_palloc_freev(struct inode *inode, __u64 *entry_nrs, size_t nitems)
for (j = i, n = 0;
(j < nitems) && nilfs_palloc_group_is_in(inode, group,
entry_nrs[j]);
- j++, n++) {
+ j++) {
nilfs_palloc_group(inode, entry_nrs[j], &group_offset);
if (!nilfs_clear_bit_atomic(
nilfs_mdt_bgl_lock(inode, group),
@@ -674,6 +674,8 @@ int nilfs_palloc_freev(struct inode *inode, __u64 *entry_nrs, size_t nitems)
"%s: entry number %llu already freed\n",
__func__,
(unsigned long long)entry_nrs[j]);
+ } else {
+ n++;
}
}
nilfs_palloc_group_desc_add_entries(inode, group, desc, n);
diff --git a/fs/nilfs2/alloc.h b/fs/nilfs2/alloc.h
index 9af34a7..f5fde36 100644
--- a/fs/nilfs2/alloc.h
+++ b/fs/nilfs2/alloc.h
@@ -74,7 +74,7 @@ int nilfs_palloc_freev(struct inode *, __u64 *, size_t);
#define nilfs_set_bit_atomic ext2_set_bit_atomic
#define nilfs_clear_bit_atomic ext2_clear_bit_atomic
-#define nilfs_find_next_zero_bit ext2_find_next_zero_bit
+#define nilfs_find_next_zero_bit find_next_zero_bit_le
/*
* persistent object allocator cache
diff --git a/fs/nilfs2/bmap.c b/fs/nilfs2/bmap.c
index 3ee67c6..4723f04 100644
--- a/fs/nilfs2/bmap.c
+++ b/fs/nilfs2/bmap.c
@@ -25,7 +25,6 @@
#include <linux/errno.h>
#include "nilfs.h"
#include "bmap.h"
-#include "sb.h"
#include "btree.h"
#include "direct.h"
#include "btnode.h"
@@ -425,17 +424,6 @@ int nilfs_bmap_test_and_clear_dirty(struct nilfs_bmap *bmap)
/*
* Internal use only
*/
-
-void nilfs_bmap_add_blocks(const struct nilfs_bmap *bmap, int n)
-{
- inode_add_bytes(bmap->b_inode, (1 << bmap->b_inode->i_blkbits) * n);
-}
-
-void nilfs_bmap_sub_blocks(const struct nilfs_bmap *bmap, int n)
-{
- inode_sub_bytes(bmap->b_inode, (1 << bmap->b_inode->i_blkbits) * n);
-}
-
__u64 nilfs_bmap_data_get_key(const struct nilfs_bmap *bmap,
const struct buffer_head *bh)
{
diff --git a/fs/nilfs2/bmap.h b/fs/nilfs2/bmap.h
index bde1c0aa2..40d9f45 100644
--- a/fs/nilfs2/bmap.h
+++ b/fs/nilfs2/bmap.h
@@ -240,9 +240,6 @@ __u64 nilfs_bmap_data_get_key(const struct nilfs_bmap *,
__u64 nilfs_bmap_find_target_seq(const struct nilfs_bmap *, __u64);
__u64 nilfs_bmap_find_target_in_group(const struct nilfs_bmap *);
-void nilfs_bmap_add_blocks(const struct nilfs_bmap *, int);
-void nilfs_bmap_sub_blocks(const struct nilfs_bmap *, int);
-
/* Assume that bmap semaphore is locked. */
static inline int nilfs_bmap_dirty(const struct nilfs_bmap *bmap)
diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c
index 85f7baa..609cd22 100644
--- a/fs/nilfs2/btnode.c
+++ b/fs/nilfs2/btnode.c
@@ -34,15 +34,10 @@
#include "page.h"
#include "btnode.h"
-
-static const struct address_space_operations def_btnode_aops = {
- .sync_page = block_sync_page,
-};
-
void nilfs_btnode_cache_init(struct address_space *btnc,
struct backing_dev_info *bdi)
{
- nilfs_mapping_init(btnc, bdi, &def_btnode_aops);
+ nilfs_mapping_init(btnc, bdi);
}
void nilfs_btnode_cache_clear(struct address_space *btnc)
diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c
index 300c2bc..d451ae0 100644
--- a/fs/nilfs2/btree.c
+++ b/fs/nilfs2/btree.c
@@ -1174,7 +1174,7 @@ static int nilfs_btree_insert(struct nilfs_bmap *btree, __u64 key, __u64 ptr)
if (ret < 0)
goto out;
nilfs_btree_commit_insert(btree, path, level, key, ptr);
- nilfs_bmap_add_blocks(btree, stats.bs_nblocks);
+ nilfs_inode_add_blocks(btree->b_inode, stats.bs_nblocks);
out:
nilfs_btree_free_path(path);
@@ -1511,7 +1511,7 @@ static int nilfs_btree_delete(struct nilfs_bmap *btree, __u64 key)
if (ret < 0)
goto out;
nilfs_btree_commit_delete(btree, path, level, dat);
- nilfs_bmap_sub_blocks(btree, stats.bs_nblocks);
+ nilfs_inode_sub_blocks(btree->b_inode, stats.bs_nblocks);
out:
nilfs_btree_free_path(path);
@@ -1776,7 +1776,7 @@ int nilfs_btree_convert_and_insert(struct nilfs_bmap *btree,
return ret;
nilfs_btree_commit_convert_and_insert(btree, key, ptr, keys, ptrs, n,
di, ni, bh);
- nilfs_bmap_add_blocks(btree, stats.bs_nblocks);
+ nilfs_inode_add_blocks(btree->b_inode, stats.bs_nblocks);
return 0;
}
diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c
index 9d45773..3a19239 100644
--- a/fs/nilfs2/dir.c
+++ b/fs/nilfs2/dir.c
@@ -440,7 +440,6 @@ void nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de,
nilfs_commit_chunk(page, mapping, from, to);
nilfs_put_page(page);
dir->i_mtime = dir->i_ctime = CURRENT_TIME;
-/* NILFS_I(dir)->i_flags &= ~NILFS_BTREE_FL; */
}
/*
@@ -531,7 +530,6 @@ got_it:
nilfs_set_de_type(de, inode);
nilfs_commit_chunk(page, page->mapping, from, to);
dir->i_mtime = dir->i_ctime = CURRENT_TIME;
-/* NILFS_I(dir)->i_flags &= ~NILFS_BTREE_FL; */
nilfs_mark_inode_dirty(dir);
/* OFFSET_CACHE */
out_put:
@@ -579,7 +577,6 @@ int nilfs_delete_entry(struct nilfs_dir_entry *dir, struct page *page)
dir->inode = 0;
nilfs_commit_chunk(page, mapping, from, to);
inode->i_ctime = inode->i_mtime = CURRENT_TIME;
-/* NILFS_I(inode)->i_flags &= ~NILFS_BTREE_FL; */
out:
nilfs_put_page(page);
return err;
@@ -684,7 +681,7 @@ const struct file_operations nilfs_dir_operations = {
.readdir = nilfs_readdir,
.unlocked_ioctl = nilfs_ioctl,
#ifdef CONFIG_COMPAT
- .compat_ioctl = nilfs_ioctl,
+ .compat_ioctl = nilfs_compat_ioctl,
#endif /* CONFIG_COMPAT */
.fsync = nilfs_sync_file,
diff --git a/fs/nilfs2/direct.c b/fs/nilfs2/direct.c
index 324d80c..82f4865 100644
--- a/fs/nilfs2/direct.c
+++ b/fs/nilfs2/direct.c
@@ -146,7 +146,7 @@ static int nilfs_direct_insert(struct nilfs_bmap *bmap, __u64 key, __u64 ptr)
if (NILFS_BMAP_USE_VBN(bmap))
nilfs_bmap_set_target_v(bmap, key, req.bpr_ptr);
- nilfs_bmap_add_blocks(bmap, 1);
+ nilfs_inode_add_blocks(bmap->b_inode, 1);
}
return ret;
}
@@ -168,7 +168,7 @@ static int nilfs_direct_delete(struct nilfs_bmap *bmap, __u64 key)
if (!ret) {
nilfs_bmap_commit_end_ptr(bmap, &req, dat);
nilfs_direct_set_ptr(bmap, key, NILFS_BMAP_INVALID_PTR);
- nilfs_bmap_sub_blocks(bmap, 1);
+ nilfs_inode_sub_blocks(bmap->b_inode, 1);
}
return ret;
}
diff --git a/fs/nilfs2/file.c b/fs/nilfs2/file.c
index 2f560c9..397e732 100644
--- a/fs/nilfs2/file.c
+++ b/fs/nilfs2/file.c
@@ -59,7 +59,7 @@ static int nilfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
struct nilfs_transaction_info ti;
int ret;
- if (unlikely(nilfs_near_disk_full(NILFS_SB(inode->i_sb)->s_nilfs)))
+ if (unlikely(nilfs_near_disk_full(inode->i_sb->s_fs_info)))
return VM_FAULT_SIGBUS; /* -ENOSPC */
lock_page(page);
@@ -72,10 +72,9 @@ static int nilfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
/*
* check to see if the page is mapped already (no holes)
*/
- if (PageMappedToDisk(page)) {
- unlock_page(page);
+ if (PageMappedToDisk(page))
goto mapped;
- }
+
if (page_has_buffers(page)) {
struct buffer_head *bh, *head;
int fully_mapped = 1;
@@ -90,7 +89,6 @@ static int nilfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
if (fully_mapped) {
SetPageMappedToDisk(page);
- unlock_page(page);
goto mapped;
}
}
@@ -105,16 +103,17 @@ static int nilfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
return VM_FAULT_SIGBUS;
ret = block_page_mkwrite(vma, vmf, nilfs_get_block);
- if (unlikely(ret)) {
+ if (ret != VM_FAULT_LOCKED) {
nilfs_transaction_abort(inode->i_sb);
return ret;
}
+ nilfs_set_file_dirty(inode, 1 << (PAGE_SHIFT - inode->i_blkbits));
nilfs_transaction_commit(inode->i_sb);
mapped:
SetPageChecked(page);
wait_on_page_writeback(page);
- return 0;
+ return VM_FAULT_LOCKED;
}
static const struct vm_operations_struct nilfs_file_vm_ops = {
@@ -142,7 +141,7 @@ const struct file_operations nilfs_file_operations = {
.aio_write = generic_file_aio_write,
.unlocked_ioctl = nilfs_ioctl,
#ifdef CONFIG_COMPAT
- .compat_ioctl = nilfs_ioctl,
+ .compat_ioctl = nilfs_compat_ioctl,
#endif /* CONFIG_COMPAT */
.mmap = nilfs_file_mmap,
.open = generic_file_open,
diff --git a/fs/nilfs2/gcinode.c b/fs/nilfs2/gcinode.c
index caf9a6a..1c2a3e2 100644
--- a/fs/nilfs2/gcinode.c
+++ b/fs/nilfs2/gcinode.c
@@ -49,7 +49,6 @@
#include "ifile.h"
static const struct address_space_operations def_gcinode_aops = {
- .sync_page = block_sync_page,
};
/*
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index 2fd440d..c0aa274 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -41,6 +41,24 @@ struct nilfs_iget_args {
int for_gc;
};
+void nilfs_inode_add_blocks(struct inode *inode, int n)
+{
+ struct nilfs_root *root = NILFS_I(inode)->i_root;
+
+ inode_add_bytes(inode, (1 << inode->i_blkbits) * n);
+ if (root)
+ atomic_add(n, &root->blocks_count);
+}
+
+void nilfs_inode_sub_blocks(struct inode *inode, int n)
+{
+ struct nilfs_root *root = NILFS_I(inode)->i_root;
+
+ inode_sub_bytes(inode, (1 << inode->i_blkbits) * n);
+ if (root)
+ atomic_sub(n, &root->blocks_count);
+}
+
/**
* nilfs_get_block() - get a file block on the filesystem (callback function)
* @inode - inode struct of the target file
@@ -262,7 +280,6 @@ nilfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
const struct address_space_operations nilfs_aops = {
.writepage = nilfs_writepage,
.readpage = nilfs_readpage,
- .sync_page = block_sync_page,
.writepages = nilfs_writepages,
.set_page_dirty = nilfs_set_page_dirty,
.readpages = nilfs_readpages,
@@ -277,7 +294,7 @@ const struct address_space_operations nilfs_aops = {
struct inode *nilfs_new_inode(struct inode *dir, int mode)
{
struct super_block *sb = dir->i_sb;
- struct nilfs_sb_info *sbi = NILFS_SB(sb);
+ struct the_nilfs *nilfs = sb->s_fs_info;
struct inode *inode;
struct nilfs_inode_info *ii;
struct nilfs_root *root;
@@ -315,19 +332,16 @@ struct inode *nilfs_new_inode(struct inode *dir, int mode)
/* No lock is needed; iget() ensures it. */
}
- ii->i_flags = NILFS_I(dir)->i_flags;
- if (S_ISLNK(mode))
- ii->i_flags &= ~(NILFS_IMMUTABLE_FL | NILFS_APPEND_FL);
- if (!S_ISDIR(mode))
- ii->i_flags &= ~NILFS_DIRSYNC_FL;
+ ii->i_flags = nilfs_mask_flags(
+ mode, NILFS_I(dir)->i_flags & NILFS_FL_INHERITED);
/* ii->i_file_acl = 0; */
/* ii->i_dir_acl = 0; */
ii->i_dir_start_lookup = 0;
nilfs_set_inode_flags(inode);
- spin_lock(&sbi->s_next_gen_lock);
- inode->i_generation = sbi->s_next_generation++;
- spin_unlock(&sbi->s_next_gen_lock);
+ spin_lock(&nilfs->ns_next_gen_lock);
+ inode->i_generation = nilfs->ns_next_generation++;
+ spin_unlock(&nilfs->ns_next_gen_lock);
insert_inode_hash(inode);
err = nilfs_init_acl(inode, dir);
@@ -359,17 +373,15 @@ void nilfs_set_inode_flags(struct inode *inode)
inode->i_flags &= ~(S_SYNC | S_APPEND | S_IMMUTABLE | S_NOATIME |
S_DIRSYNC);
- if (flags & NILFS_SYNC_FL)
+ if (flags & FS_SYNC_FL)
inode->i_flags |= S_SYNC;
- if (flags & NILFS_APPEND_FL)
+ if (flags & FS_APPEND_FL)
inode->i_flags |= S_APPEND;
- if (flags & NILFS_IMMUTABLE_FL)
+ if (flags & FS_IMMUTABLE_FL)
inode->i_flags |= S_IMMUTABLE;
-#ifndef NILFS_ATIME_DISABLE
- if (flags & NILFS_NOATIME_FL)
-#endif
+ if (flags & FS_NOATIME_FL)
inode->i_flags |= S_NOATIME;
- if (flags & NILFS_DIRSYNC_FL)
+ if (flags & FS_DIRSYNC_FL)
inode->i_flags |= S_DIRSYNC;
mapping_set_gfp_mask(inode->i_mapping,
mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
@@ -420,7 +432,7 @@ static int __nilfs_read_inode(struct super_block *sb,
struct nilfs_root *root, unsigned long ino,
struct inode *inode)
{
- struct the_nilfs *nilfs = NILFS_SB(sb)->s_nilfs;
+ struct the_nilfs *nilfs = sb->s_fs_info;
struct buffer_head *bh;
struct nilfs_inode *raw_inode;
int err;
@@ -707,6 +719,7 @@ void nilfs_evict_inode(struct inode *inode)
struct nilfs_transaction_info ti;
struct super_block *sb = inode->i_sb;
struct nilfs_inode_info *ii = NILFS_I(inode);
+ int ret;
if (inode->i_nlink || !ii->i_root || unlikely(is_bad_inode(inode))) {
if (inode->i_data.nrpages)
@@ -725,8 +738,9 @@ void nilfs_evict_inode(struct inode *inode)
nilfs_mark_inode_dirty(inode);
end_writeback(inode);
- nilfs_ifile_delete_inode(ii->i_root->ifile, inode->i_ino);
- atomic_dec(&ii->i_root->inodes_count);
+ ret = nilfs_ifile_delete_inode(ii->i_root->ifile, inode->i_ino);
+ if (!ret)
+ atomic_dec(&ii->i_root->inodes_count);
nilfs_clear_inode(inode);
@@ -792,18 +806,18 @@ int nilfs_permission(struct inode *inode, int mask, unsigned int flags)
int nilfs_load_inode_block(struct inode *inode, struct buffer_head **pbh)
{
- struct nilfs_sb_info *sbi = NILFS_SB(inode->i_sb);
+ struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
struct nilfs_inode_info *ii = NILFS_I(inode);
int err;
- spin_lock(&sbi->s_inode_lock);
+ spin_lock(&nilfs->ns_inode_lock);
if (ii->i_bh == NULL) {
- spin_unlock(&sbi->s_inode_lock);
+ spin_unlock(&nilfs->ns_inode_lock);
err = nilfs_ifile_get_inode_block(ii->i_root->ifile,
inode->i_ino, pbh);
if (unlikely(err))
return err;
- spin_lock(&sbi->s_inode_lock);
+ spin_lock(&nilfs->ns_inode_lock);
if (ii->i_bh == NULL)
ii->i_bh = *pbh;
else {
@@ -814,36 +828,36 @@ int nilfs_load_inode_block(struct inode *inode, struct buffer_head **pbh)
*pbh = ii->i_bh;
get_bh(*pbh);
- spin_unlock(&sbi->s_inode_lock);
+ spin_unlock(&nilfs->ns_inode_lock);
return 0;
}
int nilfs_inode_dirty(struct inode *inode)
{
struct nilfs_inode_info *ii = NILFS_I(inode);
- struct nilfs_sb_info *sbi = NILFS_SB(inode->i_sb);
+ struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
int ret = 0;
if (!list_empty(&ii->i_dirty)) {
- spin_lock(&sbi->s_inode_lock);
+ spin_lock(&nilfs->ns_inode_lock);
ret = test_bit(NILFS_I_DIRTY, &ii->i_state) ||
test_bit(NILFS_I_BUSY, &ii->i_state);
- spin_unlock(&sbi->s_inode_lock);
+ spin_unlock(&nilfs->ns_inode_lock);
}
return ret;
}
int nilfs_set_file_dirty(struct inode *inode, unsigned nr_dirty)
{
- struct nilfs_sb_info *sbi = NILFS_SB(inode->i_sb);
struct nilfs_inode_info *ii = NILFS_I(inode);
+ struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
- atomic_add(nr_dirty, &sbi->s_nilfs->ns_ndirtyblks);
+ atomic_add(nr_dirty, &nilfs->ns_ndirtyblks);
if (test_and_set_bit(NILFS_I_DIRTY, &ii->i_state))
return 0;
- spin_lock(&sbi->s_inode_lock);
+ spin_lock(&nilfs->ns_inode_lock);
if (!test_bit(NILFS_I_QUEUED, &ii->i_state) &&
!test_bit(NILFS_I_BUSY, &ii->i_state)) {
/* Because this routine may race with nilfs_dispose_list(),
@@ -851,18 +865,18 @@ int nilfs_set_file_dirty(struct inode *inode, unsigned nr_dirty)
if (list_empty(&ii->i_dirty) && igrab(inode) == NULL) {
/* This will happen when somebody is freeing
this inode. */
- nilfs_warning(sbi->s_super, __func__,
+ nilfs_warning(inode->i_sb, __func__,
"cannot get inode (ino=%lu)\n",
inode->i_ino);
- spin_unlock(&sbi->s_inode_lock);
+ spin_unlock(&nilfs->ns_inode_lock);
return -EINVAL; /* NILFS_I_DIRTY may remain for
freeing inode */
}
list_del(&ii->i_dirty);
- list_add_tail(&ii->i_dirty, &sbi->s_dirty_files);
+ list_add_tail(&ii->i_dirty, &nilfs->ns_dirty_files);
set_bit(NILFS_I_QUEUED, &ii->i_state);
}
- spin_unlock(&sbi->s_inode_lock);
+ spin_unlock(&nilfs->ns_inode_lock);
return 0;
}
diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
index 4967389..f2469ba 100644
--- a/fs/nilfs2/ioctl.c
+++ b/fs/nilfs2/ioctl.c
@@ -26,7 +26,9 @@
#include <linux/capability.h> /* capable() */
#include <linux/uaccess.h> /* copy_from_user(), copy_to_user() */
#include <linux/vmalloc.h>
+#include <linux/compat.h> /* compat_ptr() */
#include <linux/mount.h> /* mnt_want_write(), mnt_drop_write() */
+#include <linux/buffer_head.h>
#include <linux/nilfs2_fs.h>
#include "nilfs.h"
#include "segment.h"
@@ -97,11 +99,74 @@ static int nilfs_ioctl_wrap_copy(struct the_nilfs *nilfs,
return ret;
}
+static int nilfs_ioctl_getflags(struct inode *inode, void __user *argp)
+{
+ unsigned int flags = NILFS_I(inode)->i_flags & FS_FL_USER_VISIBLE;
+
+ return put_user(flags, (int __user *)argp);
+}
+
+static int nilfs_ioctl_setflags(struct inode *inode, struct file *filp,
+ void __user *argp)
+{
+ struct nilfs_transaction_info ti;
+ unsigned int flags, oldflags;
+ int ret;
+
+ if (!inode_owner_or_capable(inode))
+ return -EACCES;
+
+ if (get_user(flags, (int __user *)argp))
+ return -EFAULT;
+
+ ret = mnt_want_write(filp->f_path.mnt);
+ if (ret)
+ return ret;
+
+ flags = nilfs_mask_flags(inode->i_mode, flags);
+
+ mutex_lock(&inode->i_mutex);
+
+ oldflags = NILFS_I(inode)->i_flags;
+
+ /*
+ * The IMMUTABLE and APPEND_ONLY flags can only be changed by the
+ * relevant capability.
+ */
+ ret = -EPERM;
+ if (((flags ^ oldflags) & (FS_APPEND_FL | FS_IMMUTABLE_FL)) &&
+ !capable(CAP_LINUX_IMMUTABLE))
+ goto out;
+
+ ret = nilfs_transaction_begin(inode->i_sb, &ti, 0);
+ if (ret)
+ goto out;
+
+ NILFS_I(inode)->i_flags = (oldflags & ~FS_FL_USER_MODIFIABLE) |
+ (flags & FS_FL_USER_MODIFIABLE);
+
+ nilfs_set_inode_flags(inode);
+ inode->i_ctime = CURRENT_TIME;
+ if (IS_SYNC(inode))
+ nilfs_set_transaction_flag(NILFS_TI_SYNC);
+
+ nilfs_mark_inode_dirty(inode);
+ ret = nilfs_transaction_commit(inode->i_sb);
+out:
+ mutex_unlock(&inode->i_mutex);
+ mnt_drop_write(filp->f_path.mnt);
+ return ret;
+}
+
+static int nilfs_ioctl_getversion(struct inode *inode, void __user *argp)
+{
+ return put_user(inode->i_generation, (int __user *)argp);
+}
+
static int nilfs_ioctl_change_cpmode(struct inode *inode, struct file *filp,
unsigned int cmd, void __user *argp)
{
- struct the_nilfs *nilfs = NILFS_SB(inode->i_sb)->s_nilfs;
- struct inode *cpfile = nilfs->ns_cpfile;
+ struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
struct nilfs_transaction_info ti;
struct nilfs_cpmode cpmode;
int ret;
@@ -121,7 +186,7 @@ static int nilfs_ioctl_change_cpmode(struct inode *inode, struct file *filp,
nilfs_transaction_begin(inode->i_sb, &ti, 0);
ret = nilfs_cpfile_change_cpmode(
- cpfile, cpmode.cm_cno, cpmode.cm_mode);
+ nilfs->ns_cpfile, cpmode.cm_cno, cpmode.cm_mode);
if (unlikely(ret < 0))
nilfs_transaction_abort(inode->i_sb);
else
@@ -137,7 +202,7 @@ static int
nilfs_ioctl_delete_checkpoint(struct inode *inode, struct file *filp,
unsigned int cmd, void __user *argp)
{
- struct inode *cpfile = NILFS_SB(inode->i_sb)->s_nilfs->ns_cpfile;
+ struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
struct nilfs_transaction_info ti;
__u64 cno;
int ret;
@@ -154,7 +219,7 @@ nilfs_ioctl_delete_checkpoint(struct inode *inode, struct file *filp,
goto out;
nilfs_transaction_begin(inode->i_sb, &ti, 0);
- ret = nilfs_cpfile_delete_checkpoint(cpfile, cno);
+ ret = nilfs_cpfile_delete_checkpoint(nilfs->ns_cpfile, cno);
if (unlikely(ret < 0))
nilfs_transaction_abort(inode->i_sb);
else
@@ -180,7 +245,7 @@ nilfs_ioctl_do_get_cpinfo(struct the_nilfs *nilfs, __u64 *posp, int flags,
static int nilfs_ioctl_get_cpstat(struct inode *inode, struct file *filp,
unsigned int cmd, void __user *argp)
{
- struct the_nilfs *nilfs = NILFS_SB(inode->i_sb)->s_nilfs;
+ struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
struct nilfs_cpstat cpstat;
int ret;
@@ -211,7 +276,7 @@ nilfs_ioctl_do_get_suinfo(struct the_nilfs *nilfs, __u64 *posp, int flags,
static int nilfs_ioctl_get_sustat(struct inode *inode, struct file *filp,
unsigned int cmd, void __user *argp)
{
- struct the_nilfs *nilfs = NILFS_SB(inode->i_sb)->s_nilfs;
+ struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
struct nilfs_sustat sustat;
int ret;
@@ -267,7 +332,7 @@ nilfs_ioctl_do_get_bdescs(struct the_nilfs *nilfs, __u64 *posp, int flags,
static int nilfs_ioctl_get_bdescs(struct inode *inode, struct file *filp,
unsigned int cmd, void __user *argp)
{
- struct the_nilfs *nilfs = NILFS_SB(inode->i_sb)->s_nilfs;
+ struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
struct nilfs_argv argv;
int ret;
@@ -336,7 +401,7 @@ static int nilfs_ioctl_move_blocks(struct super_block *sb,
struct nilfs_argv *argv, void *buf)
{
size_t nmembs = argv->v_nmembs;
- struct the_nilfs *nilfs = NILFS_SB(sb)->s_nilfs;
+ struct the_nilfs *nilfs = sb->s_fs_info;
struct inode *inode;
struct nilfs_vdesc *vdesc;
struct buffer_head *bh, *n;
@@ -550,7 +615,7 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
ret = PTR_ERR(kbufs[4]);
goto out;
}
- nilfs = NILFS_SB(inode->i_sb)->s_nilfs;
+ nilfs = inode->i_sb->s_fs_info;
for (n = 0; n < 4; n++) {
ret = -EINVAL;
@@ -623,7 +688,7 @@ static int nilfs_ioctl_sync(struct inode *inode, struct file *filp,
return ret;
if (argp != NULL) {
- nilfs = NILFS_SB(inode->i_sb)->s_nilfs;
+ nilfs = inode->i_sb->s_fs_info;
down_read(&nilfs->ns_segctor_sem);
cno = nilfs->ns_cno - 1;
up_read(&nilfs->ns_segctor_sem);
@@ -641,7 +706,7 @@ static int nilfs_ioctl_get_info(struct inode *inode, struct file *filp,
void *, size_t, size_t))
{
- struct the_nilfs *nilfs = NILFS_SB(inode->i_sb)->s_nilfs;
+ struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
struct nilfs_argv argv;
int ret;
@@ -666,6 +731,12 @@ long nilfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
void __user *argp = (void __user *)arg;
switch (cmd) {
+ case FS_IOC_GETFLAGS:
+ return nilfs_ioctl_getflags(inode, argp);
+ case FS_IOC_SETFLAGS:
+ return nilfs_ioctl_setflags(inode, filp, argp);
+ case FS_IOC_GETVERSION:
+ return nilfs_ioctl_getversion(inode, argp);
case NILFS_IOCTL_CHANGE_CPMODE:
return nilfs_ioctl_change_cpmode(inode, filp, cmd, argp);
case NILFS_IOCTL_DELETE_CHECKPOINT:
@@ -696,3 +767,23 @@ long nilfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return -ENOTTY;
}
}
+
+#ifdef CONFIG_COMPAT
+long nilfs_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ switch (cmd) {
+ case FS_IOC32_GETFLAGS:
+ cmd = FS_IOC_GETFLAGS;
+ break;
+ case FS_IOC32_SETFLAGS:
+ cmd = FS_IOC_SETFLAGS;
+ break;
+ case FS_IOC32_GETVERSION:
+ cmd = FS_IOC_GETVERSION;
+ break;
+ default:
+ return -ENOIOCTLCMD;
+ }
+ return nilfs_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
+}
+#endif
diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c
index a0babd2..a649b05 100644
--- a/fs/nilfs2/mdt.c
+++ b/fs/nilfs2/mdt.c
@@ -399,7 +399,6 @@ nilfs_mdt_write_page(struct page *page, struct writeback_control *wbc)
static const struct address_space_operations def_mdt_aops = {
.writepage = nilfs_mdt_write_page,
- .sync_page = block_sync_page,
};
static const struct inode_operations def_mdt_iops;
@@ -438,10 +437,6 @@ void nilfs_mdt_set_entry_size(struct inode *inode, unsigned entry_size,
mi->mi_first_entry_offset = DIV_ROUND_UP(header_size, entry_size);
}
-static const struct address_space_operations shadow_map_aops = {
- .sync_page = block_sync_page,
-};
-
/**
* nilfs_mdt_setup_shadow_map - setup shadow map and bind it to metadata file
* @inode: inode of the metadata file
@@ -455,9 +450,9 @@ int nilfs_mdt_setup_shadow_map(struct inode *inode,
INIT_LIST_HEAD(&shadow->frozen_buffers);
address_space_init_once(&shadow->frozen_data);
- nilfs_mapping_init(&shadow->frozen_data, bdi, &shadow_map_aops);
+ nilfs_mapping_init(&shadow->frozen_data, bdi);
address_space_init_once(&shadow->frozen_btnodes);
- nilfs_mapping_init(&shadow->frozen_btnodes, bdi, &shadow_map_aops);
+ nilfs_mapping_init(&shadow->frozen_btnodes, bdi);
mi->mi_shadow = shadow;
return 0;
}
diff --git a/fs/nilfs2/mdt.h b/fs/nilfs2/mdt.h
index b13734b..ed68563 100644
--- a/fs/nilfs2/mdt.h
+++ b/fs/nilfs2/mdt.h
@@ -66,7 +66,7 @@ static inline struct nilfs_mdt_info *NILFS_MDT(const struct inode *inode)
static inline struct the_nilfs *NILFS_I_NILFS(struct inode *inode)
{
- return NILFS_SB(inode->i_sb)->s_nilfs;
+ return inode->i_sb->s_fs_info;
}
/* Default GFP flags using highmem */
diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c
index 161791d..546849b 100644
--- a/fs/nilfs2/namei.c
+++ b/fs/nilfs2/namei.c
@@ -482,7 +482,7 @@ static struct dentry *nilfs_get_dentry(struct super_block *sb, u64 cno,
if (ino < NILFS_FIRST_INO(sb) && ino != NILFS_ROOT_INO)
return ERR_PTR(-ESTALE);
- root = nilfs_lookup_root(NILFS_SB(sb)->s_nilfs, cno);
+ root = nilfs_lookup_root(sb->s_fs_info, cno);
if (!root)
return ERR_PTR(-ESTALE);
diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h
index 777e8fd..a8dd344 100644
--- a/fs/nilfs2/nilfs.h
+++ b/fs/nilfs2/nilfs.h
@@ -30,7 +30,6 @@
#include <linux/blkdev.h>
#include <linux/nilfs2_fs.h>
#include "the_nilfs.h"
-#include "sb.h"
#include "bmap.h"
/*
@@ -115,19 +114,19 @@ enum {
* Macros to check inode numbers
*/
#define NILFS_MDT_INO_BITS \
- ((unsigned int)(1 << NILFS_DAT_INO | 1 << NILFS_CPFILE_INO | \
- 1 << NILFS_SUFILE_INO | 1 << NILFS_IFILE_INO | \
- 1 << NILFS_ATIME_INO | 1 << NILFS_SKETCH_INO))
+ ((unsigned int)(1 << NILFS_DAT_INO | 1 << NILFS_CPFILE_INO | \
+ 1 << NILFS_SUFILE_INO | 1 << NILFS_IFILE_INO | \
+ 1 << NILFS_ATIME_INO | 1 << NILFS_SKETCH_INO))
#define NILFS_SYS_INO_BITS \
- ((unsigned int)(1 << NILFS_ROOT_INO) | NILFS_MDT_INO_BITS)
+ ((unsigned int)(1 << NILFS_ROOT_INO) | NILFS_MDT_INO_BITS)
-#define NILFS_FIRST_INO(sb) (NILFS_SB(sb)->s_nilfs->ns_first_ino)
+#define NILFS_FIRST_INO(sb) (((struct the_nilfs *)sb->s_fs_info)->ns_first_ino)
#define NILFS_MDT_INODE(sb, ino) \
- ((ino) < NILFS_FIRST_INO(sb) && (NILFS_MDT_INO_BITS & (1 << (ino))))
+ ((ino) < NILFS_FIRST_INO(sb) && (NILFS_MDT_INO_BITS & (1 << (ino))))
#define NILFS_VALID_INODE(sb, ino) \
- ((ino) >= NILFS_FIRST_INO(sb) || (NILFS_SYS_INO_BITS & (1 << (ino))))
+ ((ino) >= NILFS_FIRST_INO(sb) || (NILFS_SYS_INO_BITS & (1 << (ino))))
/**
* struct nilfs_transaction_info: context information for synchronization
@@ -212,6 +211,23 @@ static inline int nilfs_init_acl(struct inode *inode, struct inode *dir)
#define NILFS_ATIME_DISABLE
+/* Flags that should be inherited by new inodes from their parent. */
+#define NILFS_FL_INHERITED \
+ (FS_SECRM_FL | FS_UNRM_FL | FS_COMPR_FL | FS_SYNC_FL | \
+ FS_IMMUTABLE_FL | FS_APPEND_FL | FS_NODUMP_FL | FS_NOATIME_FL |\
+ FS_COMPRBLK_FL | FS_NOCOMP_FL | FS_NOTAIL_FL | FS_DIRSYNC_FL)
+
+/* Mask out flags that are inappropriate for the given type of inode. */
+static inline __u32 nilfs_mask_flags(umode_t mode, __u32 flags)
+{
+ if (S_ISDIR(mode))
+ return flags;
+ else if (S_ISREG(mode))
+ return flags & ~(FS_DIRSYNC_FL | FS_TOPDIR_FL);
+ else
+ return flags & (FS_NODUMP_FL | FS_NOATIME_FL);
+}
+
/* dir.c */
extern int nilfs_add_link(struct dentry *, struct inode *);
extern ino_t nilfs_inode_by_name(struct inode *, const struct qstr *);
@@ -229,10 +245,13 @@ extern int nilfs_sync_file(struct file *, int);
/* ioctl.c */
long nilfs_ioctl(struct file *, unsigned int, unsigned long);
+long nilfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
int nilfs_ioctl_prepare_clean_segments(struct the_nilfs *, struct nilfs_argv *,
void **);
/* inode.c */
+void nilfs_inode_add_blocks(struct inode *inode, int n);
+void nilfs_inode_sub_blocks(struct inode *inode, int n);
extern struct inode *nilfs_new_inode(struct inode *, int);
extern void nilfs_free_inode(struct inode *);
extern int nilfs_get_block(struct inode *, sector_t, struct buffer_head *, int);
@@ -266,7 +285,7 @@ extern void nilfs_destroy_inode(struct inode *);
extern void nilfs_error(struct super_block *, const char *, const char *, ...)
__attribute__ ((format (printf, 3, 4)));
extern void nilfs_warning(struct super_block *, const char *, const char *, ...)
- __attribute__ ((format (printf, 3, 4)));
+ __attribute__ ((format (printf, 3, 4)));
extern struct nilfs_super_block *
nilfs_read_super_block(struct super_block *, u64, int, struct buffer_head **);
extern int nilfs_store_magic_and_option(struct super_block *,
@@ -275,11 +294,11 @@ extern int nilfs_check_feature_compatibility(struct super_block *,
struct nilfs_super_block *);
extern void nilfs_set_log_cursor(struct nilfs_super_block *,
struct the_nilfs *);
-extern struct nilfs_super_block **nilfs_prepare_super(struct nilfs_sb_info *,
- int flip);
-extern int nilfs_commit_super(struct nilfs_sb_info *, int);
-extern int nilfs_cleanup_super(struct nilfs_sb_info *);
-int nilfs_attach_checkpoint(struct nilfs_sb_info *sbi, __u64 cno, int curr_mnt,
+struct nilfs_super_block **nilfs_prepare_super(struct super_block *sb,
+ int flip);
+int nilfs_commit_super(struct super_block *sb, int flag);
+int nilfs_cleanup_super(struct super_block *sb);
+int nilfs_attach_checkpoint(struct super_block *sb, __u64 cno, int curr_mnt,
struct nilfs_root **root);
int nilfs_checkpoint_is_mounted(struct super_block *sb, __u64 cno);
diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c
index a585b35..1168059 100644
--- a/fs/nilfs2/page.c
+++ b/fs/nilfs2/page.c
@@ -493,15 +493,14 @@ unsigned nilfs_page_count_clean_buffers(struct page *page,
}
void nilfs_mapping_init(struct address_space *mapping,
- struct backing_dev_info *bdi,
- const struct address_space_operations *aops)
+ struct backing_dev_info *bdi)
{
mapping->host = NULL;
mapping->flags = 0;
mapping_set_gfp_mask(mapping, GFP_NOFS);
mapping->assoc_mapping = NULL;
mapping->backing_dev_info = bdi;
- mapping->a_ops = aops;
+ mapping->a_ops = &empty_aops;
}
/*
diff --git a/fs/nilfs2/page.h b/fs/nilfs2/page.h
index 2a00953..f06b79a 100644
--- a/fs/nilfs2/page.h
+++ b/fs/nilfs2/page.h
@@ -62,8 +62,7 @@ int nilfs_copy_dirty_pages(struct address_space *, struct address_space *);
void nilfs_copy_back_pages(struct address_space *, struct address_space *);
void nilfs_clear_dirty_pages(struct address_space *);
void nilfs_mapping_init(struct address_space *mapping,
- struct backing_dev_info *bdi,
- const struct address_space_operations *aops);
+ struct backing_dev_info *bdi);
unsigned nilfs_page_count_clean_buffers(struct page *, unsigned, unsigned);
unsigned long nilfs_find_uncommitted_extent(struct inode *inode,
sector_t start_blk,
diff --git a/fs/nilfs2/recovery.c b/fs/nilfs2/recovery.c
index 3dfcd3b..ba4a645 100644
--- a/fs/nilfs2/recovery.c
+++ b/fs/nilfs2/recovery.c
@@ -425,7 +425,7 @@ void nilfs_dispose_segment_list(struct list_head *head)
}
static int nilfs_prepare_segment_for_recovery(struct the_nilfs *nilfs,
- struct nilfs_sb_info *sbi,
+ struct super_block *sb,
struct nilfs_recovery_info *ri)
{
struct list_head *head = &ri->ri_used_segments;
@@ -501,7 +501,7 @@ static int nilfs_recovery_copy_block(struct the_nilfs *nilfs,
}
static int nilfs_recover_dsync_blocks(struct the_nilfs *nilfs,
- struct nilfs_sb_info *sbi,
+ struct super_block *sb,
struct nilfs_root *root,
struct list_head *head,
unsigned long *nr_salvaged_blocks)
@@ -514,7 +514,7 @@ static int nilfs_recover_dsync_blocks(struct the_nilfs *nilfs,
int err = 0, err2 = 0;
list_for_each_entry_safe(rb, n, head, list) {
- inode = nilfs_iget(sbi->s_super, root, rb->ino);
+ inode = nilfs_iget(sb, root, rb->ino);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
inode = NULL;
@@ -572,11 +572,11 @@ static int nilfs_recover_dsync_blocks(struct the_nilfs *nilfs,
* nilfs_do_roll_forward - salvage logical segments newer than the latest
* checkpoint
* @nilfs: nilfs object
- * @sbi: nilfs_sb_info
+ * @sb: super block instance
* @ri: pointer to a nilfs_recovery_info
*/
static int nilfs_do_roll_forward(struct the_nilfs *nilfs,
- struct nilfs_sb_info *sbi,
+ struct super_block *sb,
struct nilfs_root *root,
struct nilfs_recovery_info *ri)
{
@@ -648,7 +648,7 @@ static int nilfs_do_roll_forward(struct the_nilfs *nilfs,
goto failed;
if (flags & NILFS_SS_LOGEND) {
err = nilfs_recover_dsync_blocks(
- nilfs, sbi, root, &dsync_blocks,
+ nilfs, sb, root, &dsync_blocks,
&nsalvaged_blocks);
if (unlikely(err))
goto failed;
@@ -681,7 +681,7 @@ static int nilfs_do_roll_forward(struct the_nilfs *nilfs,
if (nsalvaged_blocks) {
printk(KERN_INFO "NILFS (device %s): salvaged %lu blocks\n",
- sbi->s_super->s_id, nsalvaged_blocks);
+ sb->s_id, nsalvaged_blocks);
ri->ri_need_recovery = NILFS_RECOVERY_ROLLFORWARD_DONE;
}
out:
@@ -695,7 +695,7 @@ static int nilfs_do_roll_forward(struct the_nilfs *nilfs,
printk(KERN_ERR
"NILFS (device %s): Error roll-forwarding "
"(err=%d, pseg block=%llu). ",
- sbi->s_super->s_id, err, (unsigned long long)pseg_start);
+ sb->s_id, err, (unsigned long long)pseg_start);
goto out;
}
@@ -724,7 +724,7 @@ static void nilfs_finish_roll_forward(struct the_nilfs *nilfs,
/**
* nilfs_salvage_orphan_logs - salvage logs written after the latest checkpoint
* @nilfs: nilfs object
- * @sbi: nilfs_sb_info
+ * @sb: super block instance
* @ri: pointer to a nilfs_recovery_info struct to store search results.
*
* Return Value: On success, 0 is returned. On error, one of the following
@@ -741,7 +741,7 @@ static void nilfs_finish_roll_forward(struct the_nilfs *nilfs,
* %-ENOMEM - Insufficient memory available.
*/
int nilfs_salvage_orphan_logs(struct the_nilfs *nilfs,
- struct nilfs_sb_info *sbi,
+ struct super_block *sb,
struct nilfs_recovery_info *ri)
{
struct nilfs_root *root;
@@ -750,32 +750,32 @@ int nilfs_salvage_orphan_logs(struct the_nilfs *nilfs,
if (ri->ri_lsegs_start == 0 || ri->ri_lsegs_end == 0)
return 0;
- err = nilfs_attach_checkpoint(sbi, ri->ri_cno, true, &root);
+ err = nilfs_attach_checkpoint(sb, ri->ri_cno, true, &root);
if (unlikely(err)) {
printk(KERN_ERR
"NILFS: error loading the latest checkpoint.\n");
return err;
}
- err = nilfs_do_roll_forward(nilfs, sbi, root, ri);
+ err = nilfs_do_roll_forward(nilfs, sb, root, ri);
if (unlikely(err))
goto failed;
if (ri->ri_need_recovery == NILFS_RECOVERY_ROLLFORWARD_DONE) {
- err = nilfs_prepare_segment_for_recovery(nilfs, sbi, ri);
+ err = nilfs_prepare_segment_for_recovery(nilfs, sb, ri);
if (unlikely(err)) {
printk(KERN_ERR "NILFS: Error preparing segments for "
"recovery.\n");
goto failed;
}
- err = nilfs_attach_segment_constructor(sbi, root);
+ err = nilfs_attach_log_writer(sb, root);
if (unlikely(err))
goto failed;
set_nilfs_discontinued(nilfs);
- err = nilfs_construct_segment(sbi->s_super);
- nilfs_detach_segment_constructor(sbi);
+ err = nilfs_construct_segment(sb);
+ nilfs_detach_log_writer(sb);
if (unlikely(err)) {
printk(KERN_ERR "NILFS: Oops! recovery failed. "
diff --git a/fs/nilfs2/sb.h b/fs/nilfs2/sb.h
deleted file mode 100644
index 7a17715..0000000
--- a/fs/nilfs2/sb.h
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * sb.h - NILFS on-memory super block structure.
- *
- * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
- * Written by Ryusuke Konishi <ryusuke@osrg.net>
- *
- */
-
-#ifndef _NILFS_SB
-#define _NILFS_SB
-
-#include <linux/types.h>
-#include <linux/fs.h>
-
-struct the_nilfs;
-struct nilfs_sc_info;
-
-/*
- * NILFS super-block data in memory
- */
-struct nilfs_sb_info {
- /* Mount options */
- unsigned long s_mount_opt;
- uid_t s_resuid;
- gid_t s_resgid;
-
- unsigned long s_interval; /* construction interval */
- unsigned long s_watermark; /* threshold of data amount
- for the segment construction */
-
- /* Fundamental members */
- struct super_block *s_super; /* reverse pointer to super_block */
- struct the_nilfs *s_nilfs;
-
- /* Segment constructor */
- struct list_head s_dirty_files; /* dirty files list */
- struct nilfs_sc_info *s_sc_info; /* segment constructor info */
- spinlock_t s_inode_lock; /* Lock for the nilfs inode.
- It covers s_dirty_files list */
-
- /* Inode allocator */
- spinlock_t s_next_gen_lock;
- u32 s_next_generation;
-};
-
-static inline struct nilfs_sb_info *NILFS_SB(struct super_block *sb)
-{
- return sb->s_fs_info;
-}
-
-static inline struct nilfs_sc_info *NILFS_SC(struct nilfs_sb_info *sbi)
-{
- return sbi->s_sc_info;
-}
-
-/*
- * Bit operations for the mount option
- */
-#define nilfs_clear_opt(sbi, opt) \
- do { (sbi)->s_mount_opt &= ~NILFS_MOUNT_##opt; } while (0)
-#define nilfs_set_opt(sbi, opt) \
- do { (sbi)->s_mount_opt |= NILFS_MOUNT_##opt; } while (0)
-#define nilfs_test_opt(sbi, opt) ((sbi)->s_mount_opt & NILFS_MOUNT_##opt)
-#define nilfs_write_opt(sbi, mask, opt) \
- do { (sbi)->s_mount_opt = \
- (((sbi)->s_mount_opt & ~NILFS_MOUNT_##mask) | \
- NILFS_MOUNT_##opt); \
- } while (0)
-
-#endif /* _NILFS_SB */
diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c
index 0f83e93..2853ff2 100644
--- a/fs/nilfs2/segbuf.c
+++ b/fs/nilfs2/segbuf.c
@@ -509,7 +509,7 @@ static int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf,
* Last BIO is always sent through the following
* submission.
*/
- rw |= REQ_SYNC | REQ_UNPLUG;
+ rw |= REQ_SYNC;
res = nilfs_segbuf_submit_bio(segbuf, &wi, rw);
}
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 2de9f63..afe4f21 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -104,8 +104,7 @@ struct nilfs_sc_operations {
static void nilfs_segctor_start_timer(struct nilfs_sc_info *);
static void nilfs_segctor_do_flush(struct nilfs_sc_info *, int);
static void nilfs_segctor_do_immediate_flush(struct nilfs_sc_info *);
-static void nilfs_dispose_list(struct nilfs_sb_info *, struct list_head *,
- int);
+static void nilfs_dispose_list(struct the_nilfs *, struct list_head *, int);
#define nilfs_cnt32_gt(a, b) \
(typecheck(__u32, a) && typecheck(__u32, b) && \
@@ -182,7 +181,6 @@ int nilfs_transaction_begin(struct super_block *sb,
struct nilfs_transaction_info *ti,
int vacancy_check)
{
- struct nilfs_sb_info *sbi;
struct the_nilfs *nilfs;
int ret = nilfs_prepare_segment_lock(ti);
@@ -193,8 +191,7 @@ int nilfs_transaction_begin(struct super_block *sb,
vfs_check_frozen(sb, SB_FREEZE_WRITE);
- sbi = NILFS_SB(sb);
- nilfs = sbi->s_nilfs;
+ nilfs = sb->s_fs_info;
down_read(&nilfs->ns_segctor_sem);
if (vacancy_check && nilfs_near_disk_full(nilfs)) {
up_read(&nilfs->ns_segctor_sem);
@@ -225,8 +222,7 @@ int nilfs_transaction_begin(struct super_block *sb,
int nilfs_transaction_commit(struct super_block *sb)
{
struct nilfs_transaction_info *ti = current->journal_info;
- struct nilfs_sb_info *sbi;
- struct nilfs_sc_info *sci;
+ struct the_nilfs *nilfs = sb->s_fs_info;
int err = 0;
BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
@@ -235,16 +231,15 @@ int nilfs_transaction_commit(struct super_block *sb)
ti->ti_count--;
return 0;
}
- sbi = NILFS_SB(sb);
- sci = NILFS_SC(sbi);
- if (sci != NULL) {
+ if (nilfs->ns_writer) {
+ struct nilfs_sc_info *sci = nilfs->ns_writer;
+
if (ti->ti_flags & NILFS_TI_COMMIT)
nilfs_segctor_start_timer(sci);
- if (atomic_read(&sbi->s_nilfs->ns_ndirtyblks) >
- sci->sc_watermark)
+ if (atomic_read(&nilfs->ns_ndirtyblks) > sci->sc_watermark)
nilfs_segctor_do_flush(sci, 0);
}
- up_read(&sbi->s_nilfs->ns_segctor_sem);
+ up_read(&nilfs->ns_segctor_sem);
current->journal_info = ti->ti_save;
if (ti->ti_flags & NILFS_TI_SYNC)
@@ -257,13 +252,14 @@ int nilfs_transaction_commit(struct super_block *sb)
void nilfs_transaction_abort(struct super_block *sb)
{
struct nilfs_transaction_info *ti = current->journal_info;
+ struct the_nilfs *nilfs = sb->s_fs_info;
BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
if (ti->ti_count > 0) {
ti->ti_count--;
return;
}
- up_read(&NILFS_SB(sb)->s_nilfs->ns_segctor_sem);
+ up_read(&nilfs->ns_segctor_sem);
current->journal_info = ti->ti_save;
if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
@@ -272,9 +268,8 @@ void nilfs_transaction_abort(struct super_block *sb)
void nilfs_relax_pressure_in_lock(struct super_block *sb)
{
- struct nilfs_sb_info *sbi = NILFS_SB(sb);
- struct nilfs_sc_info *sci = NILFS_SC(sbi);
- struct the_nilfs *nilfs = sbi->s_nilfs;
+ struct the_nilfs *nilfs = sb->s_fs_info;
+ struct nilfs_sc_info *sci = nilfs->ns_writer;
if (!sci || !sci->sc_flush_request)
return;
@@ -294,11 +289,13 @@ void nilfs_relax_pressure_in_lock(struct super_block *sb)
downgrade_write(&nilfs->ns_segctor_sem);
}
-static void nilfs_transaction_lock(struct nilfs_sb_info *sbi,
+static void nilfs_transaction_lock(struct super_block *sb,
struct nilfs_transaction_info *ti,
int gcflag)
{
struct nilfs_transaction_info *cur_ti = current->journal_info;
+ struct the_nilfs *nilfs = sb->s_fs_info;
+ struct nilfs_sc_info *sci = nilfs->ns_writer;
WARN_ON(cur_ti);
ti->ti_flags = NILFS_TI_WRITER;
@@ -309,30 +306,31 @@ static void nilfs_transaction_lock(struct nilfs_sb_info *sbi,
current->journal_info = ti;
for (;;) {
- down_write(&sbi->s_nilfs->ns_segctor_sem);
- if (!test_bit(NILFS_SC_PRIOR_FLUSH, &NILFS_SC(sbi)->sc_flags))
+ down_write(&nilfs->ns_segctor_sem);
+ if (!test_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags))
break;
- nilfs_segctor_do_immediate_flush(NILFS_SC(sbi));
+ nilfs_segctor_do_immediate_flush(sci);
- up_write(&sbi->s_nilfs->ns_segctor_sem);
+ up_write(&nilfs->ns_segctor_sem);
yield();
}
if (gcflag)
ti->ti_flags |= NILFS_TI_GC;
}
-static void nilfs_transaction_unlock(struct nilfs_sb_info *sbi)
+static void nilfs_transaction_unlock(struct super_block *sb)
{
struct nilfs_transaction_info *ti = current->journal_info;
+ struct the_nilfs *nilfs = sb->s_fs_info;
BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
BUG_ON(ti->ti_count > 0);
- up_write(&sbi->s_nilfs->ns_segctor_sem);
+ up_write(&nilfs->ns_segctor_sem);
current->journal_info = ti->ti_save;
if (!list_empty(&ti->ti_garbage))
- nilfs_dispose_list(sbi, &ti->ti_garbage, 0);
+ nilfs_dispose_list(nilfs, &ti->ti_garbage, 0);
}
static void *nilfs_segctor_map_segsum_entry(struct nilfs_sc_info *sci,
@@ -714,7 +712,7 @@ static void nilfs_lookup_dirty_node_buffers(struct inode *inode,
}
}
-static void nilfs_dispose_list(struct nilfs_sb_info *sbi,
+static void nilfs_dispose_list(struct the_nilfs *nilfs,
struct list_head *head, int force)
{
struct nilfs_inode_info *ii, *n;
@@ -722,7 +720,7 @@ static void nilfs_dispose_list(struct nilfs_sb_info *sbi,
unsigned nv = 0;
while (!list_empty(head)) {
- spin_lock(&sbi->s_inode_lock);
+ spin_lock(&nilfs->ns_inode_lock);
list_for_each_entry_safe(ii, n, head, i_dirty) {
list_del_init(&ii->i_dirty);
if (force) {
@@ -733,14 +731,14 @@ static void nilfs_dispose_list(struct nilfs_sb_info *sbi,
} else if (test_bit(NILFS_I_DIRTY, &ii->i_state)) {
set_bit(NILFS_I_QUEUED, &ii->i_state);
list_add_tail(&ii->i_dirty,
- &sbi->s_dirty_files);
+ &nilfs->ns_dirty_files);
continue;
}
ivec[nv++] = ii;
if (nv == SC_N_INODEVEC)
break;
}
- spin_unlock(&sbi->s_inode_lock);
+ spin_unlock(&nilfs->ns_inode_lock);
for (pii = ivec; nv > 0; pii++, nv--)
iput(&(*pii)->vfs_inode);
@@ -773,24 +771,23 @@ static int nilfs_segctor_clean(struct nilfs_sc_info *sci)
static int nilfs_segctor_confirm(struct nilfs_sc_info *sci)
{
- struct nilfs_sb_info *sbi = sci->sc_sbi;
+ struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
int ret = 0;
- if (nilfs_test_metadata_dirty(sbi->s_nilfs, sci->sc_root))
+ if (nilfs_test_metadata_dirty(nilfs, sci->sc_root))
set_bit(NILFS_SC_DIRTY, &sci->sc_flags);
- spin_lock(&sbi->s_inode_lock);
- if (list_empty(&sbi->s_dirty_files) && nilfs_segctor_clean(sci))
+ spin_lock(&nilfs->ns_inode_lock);
+ if (list_empty(&nilfs->ns_dirty_files) && nilfs_segctor_clean(sci))
ret++;
- spin_unlock(&sbi->s_inode_lock);
+ spin_unlock(&nilfs->ns_inode_lock);
return ret;
}
static void nilfs_segctor_clear_metadata_dirty(struct nilfs_sc_info *sci)
{
- struct nilfs_sb_info *sbi = sci->sc_sbi;
- struct the_nilfs *nilfs = sbi->s_nilfs;
+ struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
nilfs_mdt_clear_dirty(sci->sc_root->ifile);
nilfs_mdt_clear_dirty(nilfs->ns_cpfile);
@@ -800,7 +797,7 @@ static void nilfs_segctor_clear_metadata_dirty(struct nilfs_sc_info *sci)
static int nilfs_segctor_create_checkpoint(struct nilfs_sc_info *sci)
{
- struct the_nilfs *nilfs = sci->sc_sbi->s_nilfs;
+ struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
struct buffer_head *bh_cp;
struct nilfs_checkpoint *raw_cp;
int err;
@@ -824,8 +821,7 @@ static int nilfs_segctor_create_checkpoint(struct nilfs_sc_info *sci)
static int nilfs_segctor_fill_in_checkpoint(struct nilfs_sc_info *sci)
{
- struct nilfs_sb_info *sbi = sci->sc_sbi;
- struct the_nilfs *nilfs = sbi->s_nilfs;
+ struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
struct buffer_head *bh_cp;
struct nilfs_checkpoint *raw_cp;
int err;
@@ -1049,8 +1045,7 @@ static int nilfs_segctor_scan_file_dsync(struct nilfs_sc_info *sci,
static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode)
{
- struct nilfs_sb_info *sbi = sci->sc_sbi;
- struct the_nilfs *nilfs = sbi->s_nilfs;
+ struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
struct list_head *head;
struct nilfs_inode_info *ii;
size_t ndone;
@@ -1859,7 +1854,7 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
{
struct nilfs_segment_buffer *segbuf;
struct page *bd_page = NULL, *fs_page = NULL;
- struct the_nilfs *nilfs = sci->sc_sbi->s_nilfs;
+ struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
int update_sr = false;
list_for_each_entry(segbuf, &sci->sc_write_logs, sb_list) {
@@ -1963,30 +1958,30 @@ static int nilfs_segctor_wait(struct nilfs_sc_info *sci)
return ret;
}
-static int nilfs_segctor_check_in_files(struct nilfs_sc_info *sci,
- struct nilfs_sb_info *sbi)
+static int nilfs_segctor_collect_dirty_files(struct nilfs_sc_info *sci,
+ struct the_nilfs *nilfs)
{
struct nilfs_inode_info *ii, *n;
struct inode *ifile = sci->sc_root->ifile;
- spin_lock(&sbi->s_inode_lock);
+ spin_lock(&nilfs->ns_inode_lock);
retry:
- list_for_each_entry_safe(ii, n, &sbi->s_dirty_files, i_dirty) {
+ list_for_each_entry_safe(ii, n, &nilfs->ns_dirty_files, i_dirty) {
if (!ii->i_bh) {
struct buffer_head *ibh;
int err;
- spin_unlock(&sbi->s_inode_lock);
+ spin_unlock(&nilfs->ns_inode_lock);
err = nilfs_ifile_get_inode_block(
ifile, ii->vfs_inode.i_ino, &ibh);
if (unlikely(err)) {
- nilfs_warning(sbi->s_super, __func__,
+ nilfs_warning(sci->sc_super, __func__,
"failed to get inode block.\n");
return err;
}
nilfs_mdt_mark_buffer_dirty(ibh);
nilfs_mdt_mark_dirty(ifile);
- spin_lock(&sbi->s_inode_lock);
+ spin_lock(&nilfs->ns_inode_lock);
if (likely(!ii->i_bh))
ii->i_bh = ibh;
else
@@ -1999,18 +1994,18 @@ static int nilfs_segctor_check_in_files(struct nilfs_sc_info *sci,
list_del(&ii->i_dirty);
list_add_tail(&ii->i_dirty, &sci->sc_dirty_files);
}
- spin_unlock(&sbi->s_inode_lock);
+ spin_unlock(&nilfs->ns_inode_lock);
return 0;
}
-static void nilfs_segctor_check_out_files(struct nilfs_sc_info *sci,
- struct nilfs_sb_info *sbi)
+static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci,
+ struct the_nilfs *nilfs)
{
struct nilfs_transaction_info *ti = current->journal_info;
struct nilfs_inode_info *ii, *n;
- spin_lock(&sbi->s_inode_lock);
+ spin_lock(&nilfs->ns_inode_lock);
list_for_each_entry_safe(ii, n, &sci->sc_dirty_files, i_dirty) {
if (!test_and_clear_bit(NILFS_I_UPDATED, &ii->i_state) ||
test_bit(NILFS_I_DIRTY, &ii->i_state))
@@ -2022,7 +2017,7 @@ static void nilfs_segctor_check_out_files(struct nilfs_sc_info *sci,
list_del(&ii->i_dirty);
list_add_tail(&ii->i_dirty, &ti->ti_garbage);
}
- spin_unlock(&sbi->s_inode_lock);
+ spin_unlock(&nilfs->ns_inode_lock);
}
/*
@@ -2030,15 +2025,14 @@ static void nilfs_segctor_check_out_files(struct nilfs_sc_info *sci,
*/
static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
{
- struct nilfs_sb_info *sbi = sci->sc_sbi;
- struct the_nilfs *nilfs = sbi->s_nilfs;
+ struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
struct page *failed_page;
int err;
sci->sc_stage.scnt = NILFS_ST_INIT;
sci->sc_cno = nilfs->ns_cno;
- err = nilfs_segctor_check_in_files(sci, sbi);
+ err = nilfs_segctor_collect_dirty_files(sci, nilfs);
if (unlikely(err))
goto out;
@@ -2116,7 +2110,7 @@ static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
} while (sci->sc_stage.scnt != NILFS_ST_DONE);
out:
- nilfs_segctor_check_out_files(sci, sbi);
+ nilfs_segctor_drop_written_files(sci, nilfs);
return err;
failed_to_write:
@@ -2169,8 +2163,8 @@ static void nilfs_segctor_do_flush(struct nilfs_sc_info *sci, int bn)
*/
void nilfs_flush_segment(struct super_block *sb, ino_t ino)
{
- struct nilfs_sb_info *sbi = NILFS_SB(sb);
- struct nilfs_sc_info *sci = NILFS_SC(sbi);
+ struct the_nilfs *nilfs = sb->s_fs_info;
+ struct nilfs_sc_info *sci = nilfs->ns_writer;
if (!sci || nilfs_doing_construction())
return;
@@ -2259,8 +2253,8 @@ static void nilfs_segctor_wakeup(struct nilfs_sc_info *sci, int err)
*/
int nilfs_construct_segment(struct super_block *sb)
{
- struct nilfs_sb_info *sbi = NILFS_SB(sb);
- struct nilfs_sc_info *sci = NILFS_SC(sbi);
+ struct the_nilfs *nilfs = sb->s_fs_info;
+ struct nilfs_sc_info *sci = nilfs->ns_writer;
struct nilfs_transaction_info *ti;
int err;
@@ -2297,8 +2291,8 @@ int nilfs_construct_segment(struct super_block *sb)
int nilfs_construct_dsync_segment(struct super_block *sb, struct inode *inode,
loff_t start, loff_t end)
{
- struct nilfs_sb_info *sbi = NILFS_SB(sb);
- struct nilfs_sc_info *sci = NILFS_SC(sbi);
+ struct the_nilfs *nilfs = sb->s_fs_info;
+ struct nilfs_sc_info *sci = nilfs->ns_writer;
struct nilfs_inode_info *ii;
struct nilfs_transaction_info ti;
int err = 0;
@@ -2306,33 +2300,33 @@ int nilfs_construct_dsync_segment(struct super_block *sb, struct inode *inode,
if (!sci)
return -EROFS;
- nilfs_transaction_lock(sbi, &ti, 0);
+ nilfs_transaction_lock(sb, &ti, 0);
ii = NILFS_I(inode);
if (test_bit(NILFS_I_INODE_DIRTY, &ii->i_state) ||
- nilfs_test_opt(sbi, STRICT_ORDER) ||
+ nilfs_test_opt(nilfs, STRICT_ORDER) ||
test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags) ||
- nilfs_discontinued(sbi->s_nilfs)) {
- nilfs_transaction_unlock(sbi);
+ nilfs_discontinued(nilfs)) {
+ nilfs_transaction_unlock(sb);
err = nilfs_segctor_sync(sci);
return err;
}
- spin_lock(&sbi->s_inode_lock);
+ spin_lock(&nilfs->ns_inode_lock);
if (!test_bit(NILFS_I_QUEUED, &ii->i_state) &&
!test_bit(NILFS_I_BUSY, &ii->i_state)) {
- spin_unlock(&sbi->s_inode_lock);
- nilfs_transaction_unlock(sbi);
+ spin_unlock(&nilfs->ns_inode_lock);
+ nilfs_transaction_unlock(sb);
return 0;
}
- spin_unlock(&sbi->s_inode_lock);
+ spin_unlock(&nilfs->ns_inode_lock);
sci->sc_dsync_inode = ii;
sci->sc_dsync_start = start;
sci->sc_dsync_end = end;
err = nilfs_segctor_do_construct(sci, SC_LSEG_DSYNC);
- nilfs_transaction_unlock(sbi);
+ nilfs_transaction_unlock(sb);
return err;
}
@@ -2388,8 +2382,7 @@ static void nilfs_segctor_notify(struct nilfs_sc_info *sci, int mode, int err)
*/
static int nilfs_segctor_construct(struct nilfs_sc_info *sci, int mode)
{
- struct nilfs_sb_info *sbi = sci->sc_sbi;
- struct the_nilfs *nilfs = sbi->s_nilfs;
+ struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
struct nilfs_super_block **sbp;
int err = 0;
@@ -2407,11 +2400,12 @@ static int nilfs_segctor_construct(struct nilfs_sc_info *sci, int mode)
nilfs_discontinued(nilfs)) {
down_write(&nilfs->ns_sem);
err = -EIO;
- sbp = nilfs_prepare_super(sbi,
+ sbp = nilfs_prepare_super(sci->sc_super,
nilfs_sb_will_flip(nilfs));
if (likely(sbp)) {
nilfs_set_log_cursor(sbp[0], nilfs);
- err = nilfs_commit_super(sbi, NILFS_SB_COMMIT);
+ err = nilfs_commit_super(sci->sc_super,
+ NILFS_SB_COMMIT);
}
up_write(&nilfs->ns_sem);
}
@@ -2443,16 +2437,15 @@ nilfs_remove_written_gcinodes(struct the_nilfs *nilfs, struct list_head *head)
int nilfs_clean_segments(struct super_block *sb, struct nilfs_argv *argv,
void **kbufs)
{
- struct nilfs_sb_info *sbi = NILFS_SB(sb);
- struct nilfs_sc_info *sci = NILFS_SC(sbi);
- struct the_nilfs *nilfs = sbi->s_nilfs;
+ struct the_nilfs *nilfs = sb->s_fs_info;
+ struct nilfs_sc_info *sci = nilfs->ns_writer;
struct nilfs_transaction_info ti;
int err;
if (unlikely(!sci))
return -EROFS;
- nilfs_transaction_lock(sbi, &ti, 1);
+ nilfs_transaction_lock(sb, &ti, 1);
err = nilfs_mdt_save_to_shadow_map(nilfs->ns_dat);
if (unlikely(err))
@@ -2480,14 +2473,14 @@ int nilfs_clean_segments(struct super_block *sb, struct nilfs_argv *argv,
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(sci->sc_interval);
}
- if (nilfs_test_opt(sbi, DISCARD)) {
+ if (nilfs_test_opt(nilfs, DISCARD)) {
int ret = nilfs_discard_segments(nilfs, sci->sc_freesegs,
sci->sc_nfreesegs);
if (ret) {
printk(KERN_WARNING
"NILFS warning: error %d on discard request, "
"turning discards off for the device\n", ret);
- nilfs_clear_opt(sbi, DISCARD);
+ nilfs_clear_opt(nilfs, DISCARD);
}
}
@@ -2495,16 +2488,15 @@ int nilfs_clean_segments(struct super_block *sb, struct nilfs_argv *argv,
sci->sc_freesegs = NULL;
sci->sc_nfreesegs = 0;
nilfs_mdt_clear_shadow_map(nilfs->ns_dat);
- nilfs_transaction_unlock(sbi);
+ nilfs_transaction_unlock(sb);
return err;
}
static void nilfs_segctor_thread_construct(struct nilfs_sc_info *sci, int mode)
{
- struct nilfs_sb_info *sbi = sci->sc_sbi;
struct nilfs_transaction_info ti;
- nilfs_transaction_lock(sbi, &ti, 0);
+ nilfs_transaction_lock(sci->sc_super, &ti, 0);
nilfs_segctor_construct(sci, mode);
/*
@@ -2515,7 +2507,7 @@ static void nilfs_segctor_thread_construct(struct nilfs_sc_info *sci, int mode)
if (test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags))
nilfs_segctor_start_timer(sci);
- nilfs_transaction_unlock(sbi);
+ nilfs_transaction_unlock(sci->sc_super);
}
static void nilfs_segctor_do_immediate_flush(struct nilfs_sc_info *sci)
@@ -2561,7 +2553,7 @@ static int nilfs_segctor_flush_mode(struct nilfs_sc_info *sci)
static int nilfs_segctor_thread(void *arg)
{
struct nilfs_sc_info *sci = (struct nilfs_sc_info *)arg;
- struct the_nilfs *nilfs = sci->sc_sbi->s_nilfs;
+ struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
int timeout = 0;
sci->sc_timer.data = (unsigned long)current;
@@ -2672,17 +2664,17 @@ static void nilfs_segctor_kill_thread(struct nilfs_sc_info *sci)
/*
* Setup & clean-up functions
*/
-static struct nilfs_sc_info *nilfs_segctor_new(struct nilfs_sb_info *sbi,
+static struct nilfs_sc_info *nilfs_segctor_new(struct super_block *sb,
struct nilfs_root *root)
{
+ struct the_nilfs *nilfs = sb->s_fs_info;
struct nilfs_sc_info *sci;
sci = kzalloc(sizeof(*sci), GFP_KERNEL);
if (!sci)
return NULL;
- sci->sc_sbi = sbi;
- sci->sc_super = sbi->s_super;
+ sci->sc_super = sb;
nilfs_get_root(root);
sci->sc_root = root;
@@ -2702,10 +2694,10 @@ static struct nilfs_sc_info *nilfs_segctor_new(struct nilfs_sb_info *sbi,
sci->sc_mjcp_freq = HZ * NILFS_SC_DEFAULT_SR_FREQ;
sci->sc_watermark = NILFS_SC_DEFAULT_WATERMARK;
- if (sbi->s_interval)
- sci->sc_interval = sbi->s_interval;
- if (sbi->s_watermark)
- sci->sc_watermark = sbi->s_watermark;
+ if (nilfs->ns_interval)
+ sci->sc_interval = nilfs->ns_interval;
+ if (nilfs->ns_watermark)
+ sci->sc_watermark = nilfs->ns_watermark;
return sci;
}
@@ -2716,12 +2708,11 @@ static void nilfs_segctor_write_out(struct nilfs_sc_info *sci)
/* The segctord thread was stopped and its timer was removed.
But some tasks remain. */
do {
- struct nilfs_sb_info *sbi = sci->sc_sbi;
struct nilfs_transaction_info ti;
- nilfs_transaction_lock(sbi, &ti, 0);
+ nilfs_transaction_lock(sci->sc_super, &ti, 0);
ret = nilfs_segctor_construct(sci, SC_LSEG_SR);
- nilfs_transaction_unlock(sbi);
+ nilfs_transaction_unlock(sci->sc_super);
} while (ret && retrycount-- > 0);
}
@@ -2736,10 +2727,10 @@ static void nilfs_segctor_write_out(struct nilfs_sc_info *sci)
*/
static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
{
- struct nilfs_sb_info *sbi = sci->sc_sbi;
+ struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
int flag;
- up_write(&sbi->s_nilfs->ns_segctor_sem);
+ up_write(&nilfs->ns_segctor_sem);
spin_lock(&sci->sc_state_lock);
nilfs_segctor_kill_thread(sci);
@@ -2753,9 +2744,9 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
WARN_ON(!list_empty(&sci->sc_copied_buffers));
if (!list_empty(&sci->sc_dirty_files)) {
- nilfs_warning(sbi->s_super, __func__,
+ nilfs_warning(sci->sc_super, __func__,
"dirty file(s) after the final construction\n");
- nilfs_dispose_list(sbi, &sci->sc_dirty_files, 1);
+ nilfs_dispose_list(nilfs, &sci->sc_dirty_files, 1);
}
WARN_ON(!list_empty(&sci->sc_segbufs));
@@ -2763,79 +2754,78 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
nilfs_put_root(sci->sc_root);
- down_write(&sbi->s_nilfs->ns_segctor_sem);
+ down_write(&nilfs->ns_segctor_sem);
del_timer_sync(&sci->sc_timer);
kfree(sci);
}
/**
- * nilfs_attach_segment_constructor - attach a segment constructor
- * @sbi: nilfs_sb_info
+ * nilfs_attach_log_writer - attach log writer
+ * @sb: super block instance
* @root: root object of the current filesystem tree
*
- * nilfs_attach_segment_constructor() allocates a struct nilfs_sc_info,
- * initializes it, and starts the segment constructor.
+ * This allocates a log writer object, initializes it, and starts the
+ * log writer.
*
* Return Value: On success, 0 is returned. On error, one of the following
* negative error code is returned.
*
* %-ENOMEM - Insufficient memory available.
*/
-int nilfs_attach_segment_constructor(struct nilfs_sb_info *sbi,
- struct nilfs_root *root)
+int nilfs_attach_log_writer(struct super_block *sb, struct nilfs_root *root)
{
+ struct the_nilfs *nilfs = sb->s_fs_info;
int err;
- if (NILFS_SC(sbi)) {
+ if (nilfs->ns_writer) {
/*
* This happens if the filesystem was remounted
* read/write after nilfs_error degenerated it into a
* read-only mount.
*/
- nilfs_detach_segment_constructor(sbi);
+ nilfs_detach_log_writer(sb);
}
- sbi->s_sc_info = nilfs_segctor_new(sbi, root);
- if (!sbi->s_sc_info)
+ nilfs->ns_writer = nilfs_segctor_new(sb, root);
+ if (!nilfs->ns_writer)
return -ENOMEM;
- err = nilfs_segctor_start_thread(NILFS_SC(sbi));
+ err = nilfs_segctor_start_thread(nilfs->ns_writer);
if (err) {
- kfree(sbi->s_sc_info);
- sbi->s_sc_info = NULL;
+ kfree(nilfs->ns_writer);
+ nilfs->ns_writer = NULL;
}
return err;
}
/**
- * nilfs_detach_segment_constructor - destroy the segment constructor
- * @sbi: nilfs_sb_info
+ * nilfs_detach_log_writer - destroy log writer
+ * @sb: super block instance
*
- * nilfs_detach_segment_constructor() kills the segment constructor daemon,
- * frees the struct nilfs_sc_info, and destroy the dirty file list.
+ * This kills log writer daemon, frees the log writer object, and
+ * destroys list of dirty files.
*/
-void nilfs_detach_segment_constructor(struct nilfs_sb_info *sbi)
+void nilfs_detach_log_writer(struct super_block *sb)
{
- struct the_nilfs *nilfs = sbi->s_nilfs;
+ struct the_nilfs *nilfs = sb->s_fs_info;
LIST_HEAD(garbage_list);
down_write(&nilfs->ns_segctor_sem);
- if (NILFS_SC(sbi)) {
- nilfs_segctor_destroy(NILFS_SC(sbi));
- sbi->s_sc_info = NULL;
+ if (nilfs->ns_writer) {
+ nilfs_segctor_destroy(nilfs->ns_writer);
+ nilfs->ns_writer = NULL;
}
/* Force to free the list of dirty files */
- spin_lock(&sbi->s_inode_lock);
- if (!list_empty(&sbi->s_dirty_files)) {
- list_splice_init(&sbi->s_dirty_files, &garbage_list);
- nilfs_warning(sbi->s_super, __func__,
- "Non empty dirty list after the last "
- "segment construction\n");
- }
- spin_unlock(&sbi->s_inode_lock);
+ spin_lock(&nilfs->ns_inode_lock);
+ if (!list_empty(&nilfs->ns_dirty_files)) {
+ list_splice_init(&nilfs->ns_dirty_files, &garbage_list);
+ nilfs_warning(sb, __func__,
+ "Hit dirty file after stopped log writer\n");
+ }
+ spin_unlock(&nilfs->ns_inode_lock);
up_write(&nilfs->ns_segctor_sem);
- nilfs_dispose_list(sbi, &garbage_list, 1);
+ nilfs_dispose_list(nilfs, &garbage_list, 1);
}
diff --git a/fs/nilfs2/segment.h b/fs/nilfs2/segment.h
index cd8056e..6c02a86 100644
--- a/fs/nilfs2/segment.h
+++ b/fs/nilfs2/segment.h
@@ -27,7 +27,7 @@
#include <linux/fs.h>
#include <linux/buffer_head.h>
#include <linux/nilfs2_fs.h>
-#include "sb.h"
+#include "nilfs.h"
struct nilfs_root;
@@ -88,7 +88,6 @@ struct nilfs_segsum_pointer {
/**
* struct nilfs_sc_info - Segment constructor information
* @sc_super: Back pointer to super_block struct
- * @sc_sbi: Back pointer to nilfs_sb_info struct
* @sc_root: root object of the current filesystem tree
* @sc_nblk_inc: Block count of current generation
* @sc_dirty_files: List of files to be written
@@ -131,7 +130,6 @@ struct nilfs_segsum_pointer {
*/
struct nilfs_sc_info {
struct super_block *sc_super;
- struct nilfs_sb_info *sc_sbi;
struct nilfs_root *sc_root;
unsigned long sc_nblk_inc;
@@ -235,18 +233,16 @@ extern void nilfs_flush_segment(struct super_block *, ino_t);
extern int nilfs_clean_segments(struct super_block *, struct nilfs_argv *,
void **);
-int nilfs_attach_segment_constructor(struct nilfs_sb_info *sbi,
- struct nilfs_root *root);
-extern void nilfs_detach_segment_constructor(struct nilfs_sb_info *);
+int nilfs_attach_log_writer(struct super_block *sb, struct nilfs_root *root);
+void nilfs_detach_log_writer(struct super_block *sb);
/* recovery.c */
extern int nilfs_read_super_root_block(struct the_nilfs *, sector_t,
struct buffer_head **, int);
extern int nilfs_search_super_root(struct the_nilfs *,
struct nilfs_recovery_info *);
-extern int nilfs_salvage_orphan_logs(struct the_nilfs *,
- struct nilfs_sb_info *,
- struct nilfs_recovery_info *);
+int nilfs_salvage_orphan_logs(struct the_nilfs *nilfs, struct super_block *sb,
+ struct nilfs_recovery_info *ri);
extern void nilfs_dispose_segment_list(struct list_head *);
#endif /* _NILFS_SEGMENT_H */
diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
index 1673b3d..062cca0 100644
--- a/fs/nilfs2/super.c
+++ b/fs/nilfs2/super.c
@@ -43,7 +43,6 @@
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/parser.h>
-#include <linux/random.h>
#include <linux/crc32.h>
#include <linux/vfs.h>
#include <linux/writeback.h>
@@ -72,23 +71,23 @@ struct kmem_cache *nilfs_transaction_cachep;
struct kmem_cache *nilfs_segbuf_cachep;
struct kmem_cache *nilfs_btree_path_cache;
-static int nilfs_setup_super(struct nilfs_sb_info *sbi, int is_mount);
+static int nilfs_setup_super(struct super_block *sb, int is_mount);
static int nilfs_remount(struct super_block *sb, int *flags, char *data);
-static void nilfs_set_error(struct nilfs_sb_info *sbi)
+static void nilfs_set_error(struct super_block *sb)
{
- struct the_nilfs *nilfs = sbi->s_nilfs;
+ struct the_nilfs *nilfs = sb->s_fs_info;
struct nilfs_super_block **sbp;
down_write(&nilfs->ns_sem);
if (!(nilfs->ns_mount_state & NILFS_ERROR_FS)) {
nilfs->ns_mount_state |= NILFS_ERROR_FS;
- sbp = nilfs_prepare_super(sbi, 0);
+ sbp = nilfs_prepare_super(sb, 0);
if (likely(sbp)) {
sbp[0]->s_state |= cpu_to_le16(NILFS_ERROR_FS);
if (sbp[1])
sbp[1]->s_state |= cpu_to_le16(NILFS_ERROR_FS);
- nilfs_commit_super(sbi, NILFS_SB_COMMIT_ALL);
+ nilfs_commit_super(sb, NILFS_SB_COMMIT_ALL);
}
}
up_write(&nilfs->ns_sem);
@@ -109,7 +108,7 @@ static void nilfs_set_error(struct nilfs_sb_info *sbi)
void nilfs_error(struct super_block *sb, const char *function,
const char *fmt, ...)
{
- struct nilfs_sb_info *sbi = NILFS_SB(sb);
+ struct the_nilfs *nilfs = sb->s_fs_info;
struct va_format vaf;
va_list args;
@@ -124,15 +123,15 @@ void nilfs_error(struct super_block *sb, const char *function,
va_end(args);
if (!(sb->s_flags & MS_RDONLY)) {
- nilfs_set_error(sbi);
+ nilfs_set_error(sb);
- if (nilfs_test_opt(sbi, ERRORS_RO)) {
+ if (nilfs_test_opt(nilfs, ERRORS_RO)) {
printk(KERN_CRIT "Remounting filesystem read-only\n");
sb->s_flags |= MS_RDONLY;
}
}
- if (nilfs_test_opt(sbi, ERRORS_PANIC))
+ if (nilfs_test_opt(nilfs, ERRORS_PANIC))
panic("NILFS (device %s): panic forced after error\n",
sb->s_id);
}
@@ -189,14 +188,14 @@ void nilfs_destroy_inode(struct inode *inode)
call_rcu(&inode->i_rcu, nilfs_i_callback);
}
-static int nilfs_sync_super(struct nilfs_sb_info *sbi, int flag)
+static int nilfs_sync_super(struct super_block *sb, int flag)
{
- struct the_nilfs *nilfs = sbi->s_nilfs;
+ struct the_nilfs *nilfs = sb->s_fs_info;
int err;
retry:
set_buffer_dirty(nilfs->ns_sbh[0]);
- if (nilfs_test_opt(sbi, BARRIER)) {
+ if (nilfs_test_opt(nilfs, BARRIER)) {
err = __sync_dirty_buffer(nilfs->ns_sbh[0],
WRITE_SYNC | WRITE_FLUSH_FUA);
} else {
@@ -263,10 +262,10 @@ void nilfs_set_log_cursor(struct nilfs_super_block *sbp,
spin_unlock(&nilfs->ns_last_segment_lock);
}
-struct nilfs_super_block **nilfs_prepare_super(struct nilfs_sb_info *sbi,
+struct nilfs_super_block **nilfs_prepare_super(struct super_block *sb,
int flip)
{
- struct the_nilfs *nilfs = sbi->s_nilfs;
+ struct the_nilfs *nilfs = sb->s_fs_info;
struct nilfs_super_block **sbp = nilfs->ns_sbp;
/* nilfs->ns_sem must be locked by the caller. */
@@ -276,7 +275,7 @@ struct nilfs_super_block **nilfs_prepare_super(struct nilfs_sb_info *sbi,
memcpy(sbp[0], sbp[1], nilfs->ns_sbsize);
} else {
printk(KERN_CRIT "NILFS: superblock broke on dev %s\n",
- sbi->s_super->s_id);
+ sb->s_id);
return NULL;
}
} else if (sbp[1] &&
@@ -290,9 +289,9 @@ struct nilfs_super_block **nilfs_prepare_super(struct nilfs_sb_info *sbi,
return sbp;
}
-int nilfs_commit_super(struct nilfs_sb_info *sbi, int flag)
+int nilfs_commit_super(struct super_block *sb, int flag)
{
- struct the_nilfs *nilfs = sbi->s_nilfs;
+ struct the_nilfs *nilfs = sb->s_fs_info;
struct nilfs_super_block **sbp = nilfs->ns_sbp;
time_t t;
@@ -312,27 +311,28 @@ int nilfs_commit_super(struct nilfs_sb_info *sbi, int flag)
nilfs->ns_sbsize));
}
clear_nilfs_sb_dirty(nilfs);
- return nilfs_sync_super(sbi, flag);
+ return nilfs_sync_super(sb, flag);
}
/**
* nilfs_cleanup_super() - write filesystem state for cleanup
- * @sbi: nilfs_sb_info to be unmounted or degraded to read-only
+ * @sb: super block instance to be unmounted or degraded to read-only
*
* This function restores state flags in the on-disk super block.
* This will set "clean" flag (i.e. NILFS_VALID_FS) unless the
* filesystem was not clean previously.
*/
-int nilfs_cleanup_super(struct nilfs_sb_info *sbi)
+int nilfs_cleanup_super(struct super_block *sb)
{
+ struct the_nilfs *nilfs = sb->s_fs_info;
struct nilfs_super_block **sbp;
int flag = NILFS_SB_COMMIT;
int ret = -EIO;
- sbp = nilfs_prepare_super(sbi, 0);
+ sbp = nilfs_prepare_super(sb, 0);
if (sbp) {
- sbp[0]->s_state = cpu_to_le16(sbi->s_nilfs->ns_mount_state);
- nilfs_set_log_cursor(sbp[0], sbi->s_nilfs);
+ sbp[0]->s_state = cpu_to_le16(nilfs->ns_mount_state);
+ nilfs_set_log_cursor(sbp[0], nilfs);
if (sbp[1] && sbp[0]->s_last_cno == sbp[1]->s_last_cno) {
/*
* make the "clean" flag also to the opposite
@@ -342,21 +342,20 @@ int nilfs_cleanup_super(struct nilfs_sb_info *sbi)
sbp[1]->s_state = sbp[0]->s_state;
flag = NILFS_SB_COMMIT_ALL;
}
- ret = nilfs_commit_super(sbi, flag);
+ ret = nilfs_commit_super(sb, flag);
}
return ret;
}
static void nilfs_put_super(struct super_block *sb)
{
- struct nilfs_sb_info *sbi = NILFS_SB(sb);
- struct the_nilfs *nilfs = sbi->s_nilfs;
+ struct the_nilfs *nilfs = sb->s_fs_info;
- nilfs_detach_segment_constructor(sbi);
+ nilfs_detach_log_writer(sb);
if (!(sb->s_flags & MS_RDONLY)) {
down_write(&nilfs->ns_sem);
- nilfs_cleanup_super(sbi);
+ nilfs_cleanup_super(sb);
up_write(&nilfs->ns_sem);
}
@@ -365,15 +364,12 @@ static void nilfs_put_super(struct super_block *sb)
iput(nilfs->ns_dat);
destroy_nilfs(nilfs);
- sbi->s_super = NULL;
sb->s_fs_info = NULL;
- kfree(sbi);
}
static int nilfs_sync_fs(struct super_block *sb, int wait)
{
- struct nilfs_sb_info *sbi = NILFS_SB(sb);
- struct the_nilfs *nilfs = sbi->s_nilfs;
+ struct the_nilfs *nilfs = sb->s_fs_info;
struct nilfs_super_block **sbp;
int err = 0;
@@ -383,10 +379,10 @@ static int nilfs_sync_fs(struct super_block *sb, int wait)
down_write(&nilfs->ns_sem);
if (nilfs_sb_dirty(nilfs)) {
- sbp = nilfs_prepare_super(sbi, nilfs_sb_will_flip(nilfs));
+ sbp = nilfs_prepare_super(sb, nilfs_sb_will_flip(nilfs));
if (likely(sbp)) {
nilfs_set_log_cursor(sbp[0], nilfs);
- nilfs_commit_super(sbi, NILFS_SB_COMMIT);
+ nilfs_commit_super(sb, NILFS_SB_COMMIT);
}
}
up_write(&nilfs->ns_sem);
@@ -394,10 +390,10 @@ static int nilfs_sync_fs(struct super_block *sb, int wait)
return err;
}
-int nilfs_attach_checkpoint(struct nilfs_sb_info *sbi, __u64 cno, int curr_mnt,
+int nilfs_attach_checkpoint(struct super_block *sb, __u64 cno, int curr_mnt,
struct nilfs_root **rootp)
{
- struct the_nilfs *nilfs = sbi->s_nilfs;
+ struct the_nilfs *nilfs = sb->s_fs_info;
struct nilfs_root *root;
struct nilfs_checkpoint *raw_cp;
struct buffer_head *bh_cp;
@@ -426,7 +422,7 @@ int nilfs_attach_checkpoint(struct nilfs_sb_info *sbi, __u64 cno, int curr_mnt,
goto failed;
}
- err = nilfs_ifile_read(sbi->s_super, root, nilfs->ns_inode_size,
+ err = nilfs_ifile_read(sb, root, nilfs->ns_inode_size,
&raw_cp->cp_ifile_inode, &root->ifile);
if (err)
goto failed_bh;
@@ -450,8 +446,7 @@ int nilfs_attach_checkpoint(struct nilfs_sb_info *sbi, __u64 cno, int curr_mnt,
static int nilfs_freeze(struct super_block *sb)
{
- struct nilfs_sb_info *sbi = NILFS_SB(sb);
- struct the_nilfs *nilfs = sbi->s_nilfs;
+ struct the_nilfs *nilfs = sb->s_fs_info;
int err;
if (sb->s_flags & MS_RDONLY)
@@ -459,21 +454,20 @@ static int nilfs_freeze(struct super_block *sb)
/* Mark super block clean */
down_write(&nilfs->ns_sem);
- err = nilfs_cleanup_super(sbi);
+ err = nilfs_cleanup_super(sb);
up_write(&nilfs->ns_sem);
return err;
}
static int nilfs_unfreeze(struct super_block *sb)
{
- struct nilfs_sb_info *sbi = NILFS_SB(sb);
- struct the_nilfs *nilfs = sbi->s_nilfs;
+ struct the_nilfs *nilfs = sb->s_fs_info;
if (sb->s_flags & MS_RDONLY)
return 0;
down_write(&nilfs->ns_sem);
- nilfs_setup_super(sbi, false);
+ nilfs_setup_super(sb, false);
up_write(&nilfs->ns_sem);
return 0;
}
@@ -530,22 +524,22 @@ static int nilfs_statfs(struct dentry *dentry, struct kstatfs *buf)
static int nilfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
{
struct super_block *sb = vfs->mnt_sb;
- struct nilfs_sb_info *sbi = NILFS_SB(sb);
+ struct the_nilfs *nilfs = sb->s_fs_info;
struct nilfs_root *root = NILFS_I(vfs->mnt_root->d_inode)->i_root;
- if (!nilfs_test_opt(sbi, BARRIER))
+ if (!nilfs_test_opt(nilfs, BARRIER))
seq_puts(seq, ",nobarrier");
if (root->cno != NILFS_CPTREE_CURRENT_CNO)
seq_printf(seq, ",cp=%llu", (unsigned long long)root->cno);
- if (nilfs_test_opt(sbi, ERRORS_PANIC))
+ if (nilfs_test_opt(nilfs, ERRORS_PANIC))
seq_puts(seq, ",errors=panic");
- if (nilfs_test_opt(sbi, ERRORS_CONT))
+ if (nilfs_test_opt(nilfs, ERRORS_CONT))
seq_puts(seq, ",errors=continue");
- if (nilfs_test_opt(sbi, STRICT_ORDER))
+ if (nilfs_test_opt(nilfs, STRICT_ORDER))
seq_puts(seq, ",order=strict");
- if (nilfs_test_opt(sbi, NORECOVERY))
+ if (nilfs_test_opt(nilfs, NORECOVERY))
seq_puts(seq, ",norecovery");
- if (nilfs_test_opt(sbi, DISCARD))
+ if (nilfs_test_opt(nilfs, DISCARD))
seq_puts(seq, ",discard");
return 0;
@@ -594,7 +588,7 @@ static match_table_t tokens = {
static int parse_options(char *options, struct super_block *sb, int is_remount)
{
- struct nilfs_sb_info *sbi = NILFS_SB(sb);
+ struct the_nilfs *nilfs = sb->s_fs_info;
char *p;
substring_t args[MAX_OPT_ARGS];
@@ -609,29 +603,29 @@ static int parse_options(char *options, struct super_block *sb, int is_remount)
token = match_token(p, tokens, args);
switch (token) {
case Opt_barrier:
- nilfs_set_opt(sbi, BARRIER);
+ nilfs_set_opt(nilfs, BARRIER);
break;
case Opt_nobarrier:
- nilfs_clear_opt(sbi, BARRIER);
+ nilfs_clear_opt(nilfs, BARRIER);
break;
case Opt_order:
if (strcmp(args[0].from, "relaxed") == 0)
/* Ordered data semantics */
- nilfs_clear_opt(sbi, STRICT_ORDER);
+ nilfs_clear_opt(nilfs, STRICT_ORDER);
else if (strcmp(args[0].from, "strict") == 0)
/* Strict in-order semantics */
- nilfs_set_opt(sbi, STRICT_ORDER);
+ nilfs_set_opt(nilfs, STRICT_ORDER);
else
return 0;
break;
case Opt_err_panic:
- nilfs_write_opt(sbi, ERROR_MODE, ERRORS_PANIC);
+ nilfs_write_opt(nilfs, ERROR_MODE, ERRORS_PANIC);
break;
case Opt_err_ro:
- nilfs_write_opt(sbi, ERROR_MODE, ERRORS_RO);
+ nilfs_write_opt(nilfs, ERROR_MODE, ERRORS_RO);
break;
case Opt_err_cont:
- nilfs_write_opt(sbi, ERROR_MODE, ERRORS_CONT);
+ nilfs_write_opt(nilfs, ERROR_MODE, ERRORS_CONT);
break;
case Opt_snapshot:
if (is_remount) {
@@ -642,13 +636,13 @@ static int parse_options(char *options, struct super_block *sb, int is_remount)
}
break;
case Opt_norecovery:
- nilfs_set_opt(sbi, NORECOVERY);
+ nilfs_set_opt(nilfs, NORECOVERY);
break;
case Opt_discard:
- nilfs_set_opt(sbi, DISCARD);
+ nilfs_set_opt(nilfs, DISCARD);
break;
case Opt_nodiscard:
- nilfs_clear_opt(sbi, DISCARD);
+ nilfs_clear_opt(nilfs, DISCARD);
break;
default:
printk(KERN_ERR
@@ -660,22 +654,24 @@ static int parse_options(char *options, struct super_block *sb, int is_remount)
}
static inline void
-nilfs_set_default_options(struct nilfs_sb_info *sbi,
+nilfs_set_default_options(struct super_block *sb,
struct nilfs_super_block *sbp)
{
- sbi->s_mount_opt =
+ struct the_nilfs *nilfs = sb->s_fs_info;
+
+ nilfs->ns_mount_opt =
NILFS_MOUNT_ERRORS_RO | NILFS_MOUNT_BARRIER;
}
-static int nilfs_setup_super(struct nilfs_sb_info *sbi, int is_mount)
+static int nilfs_setup_super(struct super_block *sb, int is_mount)
{
- struct the_nilfs *nilfs = sbi->s_nilfs;
+ struct the_nilfs *nilfs = sb->s_fs_info;
struct nilfs_super_block **sbp;
int max_mnt_count;
int mnt_count;
/* nilfs->ns_sem must be locked by the caller. */
- sbp = nilfs_prepare_super(sbi, 0);
+ sbp = nilfs_prepare_super(sb, 0);
if (!sbp)
return -EIO;
@@ -706,7 +702,7 @@ skip_mount_setup:
/* synchronize sbp[1] with sbp[0] */
if (sbp[1])
memcpy(sbp[1], sbp[0], nilfs->ns_sbsize);
- return nilfs_commit_super(sbi, NILFS_SB_COMMIT_ALL);
+ return nilfs_commit_super(sb, NILFS_SB_COMMIT_ALL);
}
struct nilfs_super_block *nilfs_read_super_block(struct super_block *sb,
@@ -727,7 +723,7 @@ int nilfs_store_magic_and_option(struct super_block *sb,
struct nilfs_super_block *sbp,
char *data)
{
- struct nilfs_sb_info *sbi = NILFS_SB(sb);
+ struct the_nilfs *nilfs = sb->s_fs_info;
sb->s_magic = le16_to_cpu(sbp->s_magic);
@@ -736,12 +732,12 @@ int nilfs_store_magic_and_option(struct super_block *sb,
sb->s_flags |= MS_NOATIME;
#endif
- nilfs_set_default_options(sbi, sbp);
+ nilfs_set_default_options(sb, sbp);
- sbi->s_resuid = le16_to_cpu(sbp->s_def_resuid);
- sbi->s_resgid = le16_to_cpu(sbp->s_def_resgid);
- sbi->s_interval = le32_to_cpu(sbp->s_c_interval);
- sbi->s_watermark = le32_to_cpu(sbp->s_c_block_max);
+ nilfs->ns_resuid = le16_to_cpu(sbp->s_def_resuid);
+ nilfs->ns_resgid = le16_to_cpu(sbp->s_def_resgid);
+ nilfs->ns_interval = le32_to_cpu(sbp->s_c_interval);
+ nilfs->ns_watermark = le32_to_cpu(sbp->s_c_block_max);
return !parse_options(data, sb, 0) ? -EINVAL : 0 ;
}
@@ -822,7 +818,7 @@ static int nilfs_get_root_dentry(struct super_block *sb,
static int nilfs_attach_snapshot(struct super_block *s, __u64 cno,
struct dentry **root_dentry)
{
- struct the_nilfs *nilfs = NILFS_SB(s)->s_nilfs;
+ struct the_nilfs *nilfs = s->s_fs_info;
struct nilfs_root *root;
int ret;
@@ -840,7 +836,7 @@ static int nilfs_attach_snapshot(struct super_block *s, __u64 cno,
goto out;
}
- ret = nilfs_attach_checkpoint(NILFS_SB(s), cno, false, &root);
+ ret = nilfs_attach_checkpoint(s, cno, false, &root);
if (ret) {
printk(KERN_ERR "NILFS: error loading snapshot "
"(checkpoint number=%llu).\n",
@@ -874,7 +870,7 @@ static int nilfs_try_to_shrink_tree(struct dentry *root_dentry)
int nilfs_checkpoint_is_mounted(struct super_block *sb, __u64 cno)
{
- struct the_nilfs *nilfs = NILFS_SB(sb)->s_nilfs;
+ struct the_nilfs *nilfs = sb->s_fs_info;
struct nilfs_root *root;
struct inode *inode;
struct dentry *dentry;
@@ -887,7 +883,7 @@ int nilfs_checkpoint_is_mounted(struct super_block *sb, __u64 cno)
return true; /* protect recent checkpoints */
ret = false;
- root = nilfs_lookup_root(NILFS_SB(sb)->s_nilfs, cno);
+ root = nilfs_lookup_root(nilfs, cno);
if (root) {
inode = nilfs_ilookup(sb, root, NILFS_ROOT_INO);
if (inode) {
@@ -917,43 +913,21 @@ static int
nilfs_fill_super(struct super_block *sb, void *data, int silent)
{
struct the_nilfs *nilfs;
- struct nilfs_sb_info *sbi;
struct nilfs_root *fsroot;
struct backing_dev_info *bdi;
__u64 cno;
int err;
- sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
- if (!sbi)
+ nilfs = alloc_nilfs(sb->s_bdev);
+ if (!nilfs)
return -ENOMEM;
- sb->s_fs_info = sbi;
- sbi->s_super = sb;
-
- nilfs = alloc_nilfs(sb->s_bdev);
- if (!nilfs) {
- err = -ENOMEM;
- goto failed_sbi;
- }
- sbi->s_nilfs = nilfs;
+ sb->s_fs_info = nilfs;
- err = init_nilfs(nilfs, sbi, (char *)data);
+ err = init_nilfs(nilfs, sb, (char *)data);
if (err)
goto failed_nilfs;
- spin_lock_init(&sbi->s_inode_lock);
- INIT_LIST_HEAD(&sbi->s_dirty_files);
-
- /*
- * Following initialization is overlapped because
- * nilfs_sb_info structure has been cleared at the beginning.
- * But we reserve them to keep our interest and make ready
- * for the future change.
- */
- get_random_bytes(&sbi->s_next_generation,
- sizeof(sbi->s_next_generation));
- spin_lock_init(&sbi->s_next_gen_lock);
-
sb->s_op = &nilfs_sops;
sb->s_export_op = &nilfs_export_ops;
sb->s_root = NULL;
@@ -962,12 +936,12 @@ nilfs_fill_super(struct super_block *sb, void *data, int silent)
bdi = sb->s_bdev->bd_inode->i_mapping->backing_dev_info;
sb->s_bdi = bdi ? : &default_backing_dev_info;
- err = load_nilfs(nilfs, sbi);
+ err = load_nilfs(nilfs, sb);
if (err)
goto failed_nilfs;
cno = nilfs_last_cno(nilfs);
- err = nilfs_attach_checkpoint(sbi, cno, true, &fsroot);
+ err = nilfs_attach_checkpoint(sb, cno, true, &fsroot);
if (err) {
printk(KERN_ERR "NILFS: error loading last checkpoint "
"(checkpoint number=%llu).\n", (unsigned long long)cno);
@@ -975,7 +949,7 @@ nilfs_fill_super(struct super_block *sb, void *data, int silent)
}
if (!(sb->s_flags & MS_RDONLY)) {
- err = nilfs_attach_segment_constructor(sbi, fsroot);
+ err = nilfs_attach_log_writer(sb, fsroot);
if (err)
goto failed_checkpoint;
}
@@ -988,14 +962,14 @@ nilfs_fill_super(struct super_block *sb, void *data, int silent)
if (!(sb->s_flags & MS_RDONLY)) {
down_write(&nilfs->ns_sem);
- nilfs_setup_super(sbi, true);
+ nilfs_setup_super(sb, true);
up_write(&nilfs->ns_sem);
}
return 0;
failed_segctor:
- nilfs_detach_segment_constructor(sbi);
+ nilfs_detach_log_writer(sb);
failed_checkpoint:
nilfs_put_root(fsroot);
@@ -1007,23 +981,18 @@ nilfs_fill_super(struct super_block *sb, void *data, int silent)
failed_nilfs:
destroy_nilfs(nilfs);
-
- failed_sbi:
- sb->s_fs_info = NULL;
- kfree(sbi);
return err;
}
static int nilfs_remount(struct super_block *sb, int *flags, char *data)
{
- struct nilfs_sb_info *sbi = NILFS_SB(sb);
- struct the_nilfs *nilfs = sbi->s_nilfs;
+ struct the_nilfs *nilfs = sb->s_fs_info;
unsigned long old_sb_flags;
unsigned long old_mount_opt;
int err;
old_sb_flags = sb->s_flags;
- old_mount_opt = sbi->s_mount_opt;
+ old_mount_opt = nilfs->ns_mount_opt;
if (!parse_options(data, sb, 1)) {
err = -EINVAL;
@@ -1043,8 +1012,8 @@ static int nilfs_remount(struct super_block *sb, int *flags, char *data)
if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY))
goto out;
if (*flags & MS_RDONLY) {
- /* Shutting down the segment constructor */
- nilfs_detach_segment_constructor(sbi);
+ /* Shutting down log writer */
+ nilfs_detach_log_writer(sb);
sb->s_flags |= MS_RDONLY;
/*
@@ -1052,7 +1021,7 @@ static int nilfs_remount(struct super_block *sb, int *flags, char *data)
* the RDONLY flag and then mark the partition as valid again.
*/
down_write(&nilfs->ns_sem);
- nilfs_cleanup_super(sbi);
+ nilfs_cleanup_super(sb);
up_write(&nilfs->ns_sem);
} else {
__u64 features;
@@ -1079,12 +1048,12 @@ static int nilfs_remount(struct super_block *sb, int *flags, char *data)
sb->s_flags &= ~MS_RDONLY;
root = NILFS_I(sb->s_root->d_inode)->i_root;
- err = nilfs_attach_segment_constructor(sbi, root);
+ err = nilfs_attach_log_writer(sb, root);
if (err)
goto restore_opts;
down_write(&nilfs->ns_sem);
- nilfs_setup_super(sbi, true);
+ nilfs_setup_super(sb, true);
up_write(&nilfs->ns_sem);
}
out:
@@ -1092,13 +1061,12 @@ static int nilfs_remount(struct super_block *sb, int *flags, char *data)
restore_opts:
sb->s_flags = old_sb_flags;
- sbi->s_mount_opt = old_mount_opt;
+ nilfs->ns_mount_opt = old_mount_opt;
return err;
}
struct nilfs_super_data {
struct block_device *bdev;
- struct nilfs_sb_info *sbi;
__u64 cno;
int flags;
};
diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
index ad4ac60..d2acd1a 100644
--- a/fs/nilfs2/the_nilfs.c
+++ b/fs/nilfs2/the_nilfs.c
@@ -25,6 +25,7 @@
#include <linux/slab.h>
#include <linux/blkdev.h>
#include <linux/backing-dev.h>
+#include <linux/random.h>
#include <linux/crc32.h>
#include "nilfs.h"
#include "segment.h"
@@ -75,7 +76,10 @@ struct the_nilfs *alloc_nilfs(struct block_device *bdev)
nilfs->ns_bdev = bdev;
atomic_set(&nilfs->ns_ndirtyblks, 0);
init_rwsem(&nilfs->ns_sem);
+ INIT_LIST_HEAD(&nilfs->ns_dirty_files);
INIT_LIST_HEAD(&nilfs->ns_gc_inodes);
+ spin_lock_init(&nilfs->ns_inode_lock);
+ spin_lock_init(&nilfs->ns_next_gen_lock);
spin_lock_init(&nilfs->ns_last_segment_lock);
nilfs->ns_cptree = RB_ROOT;
spin_lock_init(&nilfs->ns_cptree_lock);
@@ -197,16 +201,16 @@ static int nilfs_store_log_cursor(struct the_nilfs *nilfs,
/**
* load_nilfs - load and recover the nilfs
* @nilfs: the_nilfs structure to be released
- * @sbi: nilfs_sb_info used to recover past segment
+ * @sb: super block isntance used to recover past segment
*
* load_nilfs() searches and load the latest super root,
* attaches the last segment, and does recovery if needed.
* The caller must call this exclusively for simultaneous mounts.
*/
-int load_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi)
+int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb)
{
struct nilfs_recovery_info ri;
- unsigned int s_flags = sbi->s_super->s_flags;
+ unsigned int s_flags = sb->s_flags;
int really_read_only = bdev_read_only(nilfs->ns_bdev);
int valid_fs = nilfs_valid_fs(nilfs);
int err;
@@ -271,7 +275,7 @@ int load_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi)
goto scan_error;
}
- err = nilfs_load_super_root(nilfs, sbi->s_super, ri.ri_super_root);
+ err = nilfs_load_super_root(nilfs, sb, ri.ri_super_root);
if (unlikely(err)) {
printk(KERN_ERR "NILFS: error loading super root.\n");
goto failed;
@@ -283,7 +287,7 @@ int load_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi)
if (s_flags & MS_RDONLY) {
__u64 features;
- if (nilfs_test_opt(sbi, NORECOVERY)) {
+ if (nilfs_test_opt(nilfs, NORECOVERY)) {
printk(KERN_INFO "NILFS: norecovery option specified. "
"skipping roll-forward recovery\n");
goto skip_recovery;
@@ -304,21 +308,21 @@ int load_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi)
err = -EROFS;
goto failed_unload;
}
- sbi->s_super->s_flags &= ~MS_RDONLY;
- } else if (nilfs_test_opt(sbi, NORECOVERY)) {
+ sb->s_flags &= ~MS_RDONLY;
+ } else if (nilfs_test_opt(nilfs, NORECOVERY)) {
printk(KERN_ERR "NILFS: recovery cancelled because norecovery "
"option was specified for a read/write mount\n");
err = -EINVAL;
goto failed_unload;
}
- err = nilfs_salvage_orphan_logs(nilfs, sbi, &ri);
+ err = nilfs_salvage_orphan_logs(nilfs, sb, &ri);
if (err)
goto failed_unload;
down_write(&nilfs->ns_sem);
nilfs->ns_mount_state |= NILFS_VALID_FS; /* set "clean" flag */
- err = nilfs_cleanup_super(sbi);
+ err = nilfs_cleanup_super(sb);
up_write(&nilfs->ns_sem);
if (err) {
@@ -330,7 +334,7 @@ int load_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi)
skip_recovery:
nilfs_clear_recovery_info(&ri);
- sbi->s_super->s_flags = s_flags;
+ sb->s_flags = s_flags;
return 0;
scan_error:
@@ -344,7 +348,7 @@ int load_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi)
failed:
nilfs_clear_recovery_info(&ri);
- sbi->s_super->s_flags = s_flags;
+ sb->s_flags = s_flags;
return err;
}
@@ -475,10 +479,13 @@ static int nilfs_load_super_block(struct the_nilfs *nilfs,
return -EIO;
}
printk(KERN_WARNING
- "NILFS warning: unable to read primary superblock\n");
- } else if (!sbp[1])
+ "NILFS warning: unable to read primary superblock "
+ "(blocksize = %d)\n", blocksize);
+ } else if (!sbp[1]) {
printk(KERN_WARNING
- "NILFS warning: unable to read secondary superblock\n");
+ "NILFS warning: unable to read secondary superblock "
+ "(blocksize = %d)\n", blocksize);
+ }
/*
* Compare two super blocks and set 1 in swp if the secondary
@@ -505,7 +512,7 @@ static int nilfs_load_super_block(struct the_nilfs *nilfs,
if (!valid[!swp])
printk(KERN_WARNING "NILFS warning: broken superblock. "
- "using spare superblock.\n");
+ "using spare superblock (blocksize = %d).\n", blocksize);
if (swp)
nilfs_swap_super_block(nilfs);
@@ -519,7 +526,6 @@ static int nilfs_load_super_block(struct the_nilfs *nilfs,
/**
* init_nilfs - initialize a NILFS instance.
* @nilfs: the_nilfs structure
- * @sbi: nilfs_sb_info
* @sb: super block
* @data: mount options
*
@@ -530,9 +536,8 @@ static int nilfs_load_super_block(struct the_nilfs *nilfs,
* Return Value: On success, 0 is returned. On error, a negative error
* code is returned.
*/
-int init_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi, char *data)
+int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb, char *data)
{
- struct super_block *sb = sbi->s_super;
struct nilfs_super_block *sbp;
int blocksize;
int err;
@@ -588,6 +593,9 @@ int init_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi, char *data)
nilfs->ns_blocksize_bits = sb->s_blocksize_bits;
nilfs->ns_blocksize = blocksize;
+ get_random_bytes(&nilfs->ns_next_generation,
+ sizeof(nilfs->ns_next_generation));
+
err = nilfs_store_disk_layout(nilfs, sbp);
if (err)
goto failed_sbh;
diff --git a/fs/nilfs2/the_nilfs.h b/fs/nilfs2/the_nilfs.h
index fd85e4c..f496814 100644
--- a/fs/nilfs2/the_nilfs.h
+++ b/fs/nilfs2/the_nilfs.h
@@ -31,7 +31,8 @@
#include <linux/blkdev.h>
#include <linux/backing-dev.h>
#include <linux/slab.h>
-#include "sb.h"
+
+struct nilfs_sc_info;
/* the_nilfs struct */
enum {
@@ -65,13 +66,23 @@ enum {
* @ns_last_cno: checkpoint number of the latest segment
* @ns_prot_seq: least sequence number of segments which must not be reclaimed
* @ns_prev_seq: base sequence number used to decide if advance log cursor
- * @ns_segctor_sem: segment constructor semaphore
+ * @ns_writer: log writer
+ * @ns_segctor_sem: semaphore protecting log write
* @ns_dat: DAT file inode
* @ns_cpfile: checkpoint file inode
* @ns_sufile: segusage file inode
* @ns_cptree: rb-tree of all mounted checkpoints (nilfs_root)
* @ns_cptree_lock: lock protecting @ns_cptree
+ * @ns_dirty_files: list of dirty files
+ * @ns_inode_lock: lock protecting @ns_dirty_files
* @ns_gc_inodes: dummy inodes to keep live blocks
+ * @ns_next_generation: next generation number for inodes
+ * @ns_next_gen_lock: lock protecting @ns_next_generation
+ * @ns_mount_opt: mount options
+ * @ns_resuid: uid for reserved blocks
+ * @ns_resgid: gid for reserved blocks
+ * @ns_interval: checkpoint creation interval
+ * @ns_watermark: watermark for the number of dirty buffers
* @ns_blocksize_bits: bit length of block size
* @ns_blocksize: block size
* @ns_nsegments: number of segments in filesystem
@@ -131,6 +142,7 @@ struct the_nilfs {
u64 ns_prot_seq;
u64 ns_prev_seq;
+ struct nilfs_sc_info *ns_writer;
struct rw_semaphore ns_segctor_sem;
/*
@@ -145,9 +157,25 @@ struct the_nilfs {
struct rb_root ns_cptree;
spinlock_t ns_cptree_lock;
+ /* Dirty inode list */
+ struct list_head ns_dirty_files;
+ spinlock_t ns_inode_lock;
+
/* GC inode list */
struct list_head ns_gc_inodes;
+ /* Inode allocator */
+ u32 ns_next_generation;
+ spinlock_t ns_next_gen_lock;
+
+ /* Mount options */
+ unsigned long ns_mount_opt;
+
+ uid_t ns_resuid;
+ gid_t ns_resgid;
+ unsigned long ns_interval;
+ unsigned long ns_watermark;
+
/* Disk layout information (static) */
unsigned int ns_blocksize_bits;
unsigned int ns_blocksize;
@@ -180,6 +208,20 @@ THE_NILFS_FNS(DISCONTINUED, discontinued)
THE_NILFS_FNS(GC_RUNNING, gc_running)
THE_NILFS_FNS(SB_DIRTY, sb_dirty)
+/*
+ * Mount option operations
+ */
+#define nilfs_clear_opt(nilfs, opt) \
+ do { (nilfs)->ns_mount_opt &= ~NILFS_MOUNT_##opt; } while (0)
+#define nilfs_set_opt(nilfs, opt) \
+ do { (nilfs)->ns_mount_opt |= NILFS_MOUNT_##opt; } while (0)
+#define nilfs_test_opt(nilfs, opt) ((nilfs)->ns_mount_opt & NILFS_MOUNT_##opt)
+#define nilfs_write_opt(nilfs, mask, opt) \
+ do { (nilfs)->ns_mount_opt = \
+ (((nilfs)->ns_mount_opt & ~NILFS_MOUNT_##mask) | \
+ NILFS_MOUNT_##opt); \
+ } while (0)
+
/**
* struct nilfs_root - nilfs root object
* @cno: checkpoint number
@@ -224,15 +266,14 @@ static inline int nilfs_sb_will_flip(struct the_nilfs *nilfs)
void nilfs_set_last_segment(struct the_nilfs *, sector_t, u64, __u64);
struct the_nilfs *alloc_nilfs(struct block_device *bdev);
void destroy_nilfs(struct the_nilfs *nilfs);
-int init_nilfs(struct the_nilfs *, struct nilfs_sb_info *, char *);
-int load_nilfs(struct the_nilfs *, struct nilfs_sb_info *);
+int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb, char *data);
+int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb);
int nilfs_discard_segments(struct the_nilfs *, __u64 *, size_t);
int nilfs_count_free_blocks(struct the_nilfs *, sector_t *);
struct nilfs_root *nilfs_lookup_root(struct the_nilfs *nilfs, __u64 cno);
struct nilfs_root *nilfs_find_or_create_root(struct the_nilfs *nilfs,
__u64 cno);
void nilfs_put_root(struct nilfs_root *root);
-struct nilfs_sb_info *nilfs_find_sbinfo(struct the_nilfs *, int, __u64);
int nilfs_near_disk_full(struct the_nilfs *);
void nilfs_fall_back_super_block(struct the_nilfs *);
void nilfs_swap_super_block(struct the_nilfs *);
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index 6b1305d..9fde1c0 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -164,7 +164,7 @@ static int process_access_response(struct fsnotify_group *group,
fd, response);
/*
* make sure the response is valid, if invalid we do nothing and either
- * userspace can send a valid responce or we will clean it up after the
+ * userspace can send a valid response or we will clean it up after the
* timeout
*/
switch (response) {
diff --git a/fs/notify/inode_mark.c b/fs/notify/inode_mark.c
index 4c29fcf..07ea8d3 100644
--- a/fs/notify/inode_mark.c
+++ b/fs/notify/inode_mark.c
@@ -22,13 +22,14 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
-#include <linux/writeback.h> /* for inode_lock */
#include <asm/atomic.h>
#include <linux/fsnotify_backend.h>
#include "fsnotify.h"
+#include "../internal.h"
+
/*
* Recalculate the mask of events relevant to a given inode locked.
*/
@@ -237,15 +238,14 @@ out:
* fsnotify_unmount_inodes - an sb is unmounting. handle any watched inodes.
* @list: list of inodes being unmounted (sb->s_inodes)
*
- * Called with inode_lock held, protecting the unmounting super block's list
- * of inodes, and with iprune_mutex held, keeping shrink_icache_memory() at bay.
- * We temporarily drop inode_lock, however, and CAN block.
+ * Called during unmount with no locks held, so needs to be safe against
+ * concurrent modifiers. We temporarily drop inode_sb_list_lock and CAN block.
*/
void fsnotify_unmount_inodes(struct list_head *list)
{
struct inode *inode, *next_i, *need_iput = NULL;
- spin_lock(&inode_lock);
+ spin_lock(&inode_sb_list_lock);
list_for_each_entry_safe(inode, next_i, list, i_sb_list) {
struct inode *need_iput_tmp;
@@ -254,8 +254,11 @@ void fsnotify_unmount_inodes(struct list_head *list)
* I_WILL_FREE, or I_NEW which is fine because by that point
* the inode cannot have any associated watches.
*/
- if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW))
+ spin_lock(&inode->i_lock);
+ if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) {
+ spin_unlock(&inode->i_lock);
continue;
+ }
/*
* If i_count is zero, the inode cannot have any watches and
@@ -263,8 +266,10 @@ void fsnotify_unmount_inodes(struct list_head *list)
* evict all inodes with zero i_count from icache which is
* unnecessarily violent and may in fact be illegal to do.
*/
- if (!atomic_read(&inode->i_count))
+ if (!atomic_read(&inode->i_count)) {
+ spin_unlock(&inode->i_lock);
continue;
+ }
need_iput_tmp = need_iput;
need_iput = NULL;
@@ -274,22 +279,25 @@ void fsnotify_unmount_inodes(struct list_head *list)
__iget(inode);
else
need_iput_tmp = NULL;
+ spin_unlock(&inode->i_lock);
/* In case the dropping of a reference would nuke next_i. */
if ((&next_i->i_sb_list != list) &&
- atomic_read(&next_i->i_count) &&
- !(next_i->i_state & (I_FREEING | I_WILL_FREE))) {
- __iget(next_i);
- need_iput = next_i;
+ atomic_read(&next_i->i_count)) {
+ spin_lock(&next_i->i_lock);
+ if (!(next_i->i_state & (I_FREEING | I_WILL_FREE))) {
+ __iget(next_i);
+ need_iput = next_i;
+ }
+ spin_unlock(&next_i->i_lock);
}
/*
- * We can safely drop inode_lock here because we hold
+ * We can safely drop inode_sb_list_lock here because we hold
* references on both inode and next_i. Also no new inodes
- * will be added since the umount has begun. Finally,
- * iprune_mutex keeps shrink_icache_memory() away.
+ * will be added since the umount has begun.
*/
- spin_unlock(&inode_lock);
+ spin_unlock(&inode_sb_list_lock);
if (need_iput_tmp)
iput(need_iput_tmp);
@@ -301,7 +309,7 @@ void fsnotify_unmount_inodes(struct list_head *list)
iput(inode);
- spin_lock(&inode_lock);
+ spin_lock(&inode_sb_list_lock);
}
- spin_unlock(&inode_lock);
+ spin_unlock(&inode_sb_list_lock);
}
diff --git a/fs/notify/inotify/inotify_fsnotify.c b/fs/notify/inotify/inotify_fsnotify.c
index a91b69a..e3cbd74 100644
--- a/fs/notify/inotify/inotify_fsnotify.c
+++ b/fs/notify/inotify/inotify_fsnotify.c
@@ -194,10 +194,11 @@ static int idr_callback(int id, void *p, void *data)
static void inotify_free_group_priv(struct fsnotify_group *group)
{
- /* ideally the idr is empty and we won't hit the BUG in teh callback */
+ /* ideally the idr is empty and we won't hit the BUG in the callback */
idr_for_each(&group->inotify_data.idr, idr_callback, group);
idr_remove_all(&group->inotify_data.idr);
idr_destroy(&group->inotify_data.idr);
+ atomic_dec(&group->inotify_data.user->inotify_devs);
free_uid(group->inotify_data.user);
}
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index bd46e7c..8445fbc 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -290,7 +290,6 @@ static int inotify_fasync(int fd, struct file *file, int on)
static int inotify_release(struct inode *ignored, struct file *file)
{
struct fsnotify_group *group = file->private_data;
- struct user_struct *user = group->inotify_data.user;
pr_debug("%s: group=%p\n", __func__, group);
@@ -299,8 +298,6 @@ static int inotify_release(struct inode *ignored, struct file *file)
/* free this group, matching get was inotify_init->fsnotify_obtain_group */
fsnotify_put_group(group);
- atomic_dec(&user->inotify_devs);
-
return 0;
}
@@ -697,7 +694,7 @@ retry:
return ret;
}
-static struct fsnotify_group *inotify_new_group(struct user_struct *user, unsigned int max_events)
+static struct fsnotify_group *inotify_new_group(unsigned int max_events)
{
struct fsnotify_group *group;
@@ -710,8 +707,14 @@ static struct fsnotify_group *inotify_new_group(struct user_struct *user, unsign
spin_lock_init(&group->inotify_data.idr_lock);
idr_init(&group->inotify_data.idr);
group->inotify_data.last_wd = 0;
- group->inotify_data.user = user;
group->inotify_data.fa = NULL;
+ group->inotify_data.user = get_current_user();
+
+ if (atomic_inc_return(&group->inotify_data.user->inotify_devs) >
+ inotify_max_user_instances) {
+ fsnotify_put_group(group);
+ return ERR_PTR(-EMFILE);
+ }
return group;
}
@@ -721,7 +724,6 @@ static struct fsnotify_group *inotify_new_group(struct user_struct *user, unsign
SYSCALL_DEFINE1(inotify_init1, int, flags)
{
struct fsnotify_group *group;
- struct user_struct *user;
int ret;
/* Check the IN_* constants for consistency. */
@@ -731,31 +733,16 @@ SYSCALL_DEFINE1(inotify_init1, int, flags)
if (flags & ~(IN_CLOEXEC | IN_NONBLOCK))
return -EINVAL;
- user = get_current_user();
- if (unlikely(atomic_read(&user->inotify_devs) >=
- inotify_max_user_instances)) {
- ret = -EMFILE;
- goto out_free_uid;
- }
-
/* fsnotify_obtain_group took a reference to group, we put this when we kill the file in the end */
- group = inotify_new_group(user, inotify_max_queued_events);
- if (IS_ERR(group)) {
- ret = PTR_ERR(group);
- goto out_free_uid;
- }
-
- atomic_inc(&user->inotify_devs);
+ group = inotify_new_group(inotify_max_queued_events);
+ if (IS_ERR(group))
+ return PTR_ERR(group);
ret = anon_inode_getfd("inotify", &inotify_fops, group,
O_RDONLY | flags);
- if (ret >= 0)
- return ret;
+ if (ret < 0)
+ fsnotify_put_group(group);
- fsnotify_put_group(group);
- atomic_dec(&user->inotify_devs);
-out_free_uid:
- free_uid(user);
return ret;
}
diff --git a/fs/notify/mark.c b/fs/notify/mark.c
index 325185e..252ab1f 100644
--- a/fs/notify/mark.c
+++ b/fs/notify/mark.c
@@ -24,7 +24,7 @@
* referencing this object. The object typically will live inside the kernel
* with a refcnt of 2, one for each list it is on (i_list, g_list). Any task
* which can find this object holding the appropriete locks, can take a reference
- * and the object itself is guarenteed to survive until the reference is dropped.
+ * and the object itself is guaranteed to survive until the reference is dropped.
*
* LOCKING:
* There are 3 spinlocks involved with fsnotify inode marks and they MUST
@@ -91,7 +91,6 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/srcu.h>
-#include <linux/writeback.h> /* for inode_lock */
#include <asm/atomic.h>
diff --git a/fs/notify/vfsmount_mark.c b/fs/notify/vfsmount_mark.c
index 85eebff..e86577d 100644
--- a/fs/notify/vfsmount_mark.c
+++ b/fs/notify/vfsmount_mark.c
@@ -23,7 +23,6 @@
#include <linux/mount.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
-#include <linux/writeback.h> /* for inode_lock */
#include <asm/atomic.h>
diff --git a/fs/ntfs/Makefile b/fs/ntfs/Makefile
index 4ff028f..30206b2 100644
--- a/fs/ntfs/Makefile
+++ b/fs/ntfs/Makefile
@@ -2,18 +2,13 @@
obj-$(CONFIG_NTFS_FS) += ntfs.o
-ntfs-objs := aops.o attrib.o collate.o compress.o debug.o dir.o file.o \
- index.o inode.o mft.o mst.o namei.o runlist.o super.o sysctl.o \
- unistr.o upcase.o
+ntfs-y := aops.o attrib.o collate.o compress.o debug.o dir.o file.o \
+ index.o inode.o mft.o mst.o namei.o runlist.o super.o sysctl.o \
+ unistr.o upcase.o
-EXTRA_CFLAGS = -DNTFS_VERSION=\"2.1.30\"
+ntfs-$(CONFIG_NTFS_RW) += bitmap.o lcnalloc.o logfile.o quota.o usnjrnl.o
-ifeq ($(CONFIG_NTFS_DEBUG),y)
-EXTRA_CFLAGS += -DDEBUG
-endif
+ccflags-y := -DNTFS_VERSION=\"2.1.30\"
+ccflags-$(CONFIG_NTFS_DEBUG) += -DDEBUG
+ccflags-$(CONFIG_NTFS_RW) += -DNTFS_RW
-ifeq ($(CONFIG_NTFS_RW),y)
-EXTRA_CFLAGS += -DNTFS_RW
-
-ntfs-objs += bitmap.o lcnalloc.o logfile.o quota.o usnjrnl.o
-endif
diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c
index c3c2c7a..0b1e885b 100644
--- a/fs/ntfs/aops.c
+++ b/fs/ntfs/aops.c
@@ -1543,8 +1543,6 @@ err_out:
*/
const struct address_space_operations ntfs_aops = {
.readpage = ntfs_readpage, /* Fill page with data. */
- .sync_page = block_sync_page, /* Currently, just unplugs the
- disk request queue. */
#ifdef NTFS_RW
.writepage = ntfs_writepage, /* Write dirty page to disk. */
#endif /* NTFS_RW */
@@ -1560,8 +1558,6 @@ const struct address_space_operations ntfs_aops = {
*/
const struct address_space_operations ntfs_mst_aops = {
.readpage = ntfs_readpage, /* Fill page with data. */
- .sync_page = block_sync_page, /* Currently, just unplugs the
- disk request queue. */
#ifdef NTFS_RW
.writepage = ntfs_writepage, /* Write dirty page to disk. */
.set_page_dirty = __set_page_dirty_nobuffers, /* Set the page dirty
diff --git a/fs/ntfs/attrib.c b/fs/ntfs/attrib.c
index f5094ee..f14fde2 100644
--- a/fs/ntfs/attrib.c
+++ b/fs/ntfs/attrib.c
@@ -197,7 +197,7 @@ err_out:
} else if (ctx_needs_reset) {
/*
* If there is no attribute list, restoring the search context
- * is acomplished simply by copying the saved context back over
+ * is accomplished simply by copying the saved context back over
* the caller supplied context. If there is an attribute list,
* things are more complicated as we need to deal with mapping
* of mft records and resulting potential changes in pointers.
@@ -1181,7 +1181,7 @@ not_found:
* for, i.e. if one wants to add the attribute to the mft record this is the
* correct place to insert its attribute list entry into.
*
- * When -errno != -ENOENT, an error occured during the lookup. @ctx->attr is
+ * When -errno != -ENOENT, an error occurred during the lookup. @ctx->attr is
* then undefined and in particular you should not rely on it not changing.
*/
int ntfs_attr_lookup(const ATTR_TYPE type, const ntfschar *name,
diff --git a/fs/ntfs/compress.c b/fs/ntfs/compress.c
index 6551c7c..ee4144c 100644
--- a/fs/ntfs/compress.c
+++ b/fs/ntfs/compress.c
@@ -501,7 +501,7 @@ int ntfs_read_compressed_block(struct page *page)
VCN start_vcn = (((s64)index << PAGE_CACHE_SHIFT) & ~cb_size_mask) >>
vol->cluster_size_bits;
/*
- * The first vcn after the last wanted vcn (minumum alignment is again
+ * The first vcn after the last wanted vcn (minimum alignment is again
* PAGE_CACHE_SIZE.
*/
VCN end_vcn = ((((s64)(index + 1UL) << PAGE_CACHE_SHIFT) + cb_size - 1)
@@ -698,8 +698,7 @@ lock_retry_remap:
"uptodate! Unplugging the disk queue "
"and rescheduling.");
get_bh(tbh);
- blk_run_address_space(mapping);
- schedule();
+ io_schedule();
put_bh(tbh);
if (unlikely(!buffer_uptodate(tbh)))
goto read_err;
diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
index a627ed8..c05d6dc 100644
--- a/fs/ntfs/inode.c
+++ b/fs/ntfs/inode.c
@@ -54,7 +54,7 @@
*
* Return 1 if the attributes match and 0 if not.
*
- * NOTE: This function runs with the inode_lock spin lock held so it is not
+ * NOTE: This function runs with the inode->i_lock spin lock held so it is not
* allowed to sleep.
*/
int ntfs_test_inode(struct inode *vi, ntfs_attr *na)
@@ -98,7 +98,7 @@ int ntfs_test_inode(struct inode *vi, ntfs_attr *na)
*
* Return 0 on success and -errno on error.
*
- * NOTE: This function runs with the inode_lock spin lock held so it is not
+ * NOTE: This function runs with the inode->i_lock spin lock held so it is not
* allowed to sleep. (Hence the GFP_ATOMIC allocation.)
*/
static int ntfs_init_locked_inode(struct inode *vi, ntfs_attr *na)
@@ -622,7 +622,7 @@ static int ntfs_read_locked_inode(struct inode *vi)
*/
/* Everyone gets all permissions. */
vi->i_mode |= S_IRWXUGO;
- /* If read-only, noone gets write permissions. */
+ /* If read-only, no one gets write permissions. */
if (IS_RDONLY(vi))
vi->i_mode &= ~S_IWUGO;
if (m->flags & MFT_RECORD_IS_DIRECTORY) {
@@ -2529,7 +2529,7 @@ retry_truncate:
* specifies that the behaviour is unspecified thus we do not
* have to do anything. This means that in our implementation
* in the rare case that the file is mmap()ped and a write
- * occured into the mmap()ped region just beyond the file size
+ * occurred into the mmap()ped region just beyond the file size
* and writepage has not yet been called to write out the page
* (which would clear the area beyond the file size) and we now
* extend the file size to incorporate this dirty region
diff --git a/fs/ntfs/layout.h b/fs/ntfs/layout.h
index 8b2549f..faece71 100644
--- a/fs/ntfs/layout.h
+++ b/fs/ntfs/layout.h
@@ -286,7 +286,7 @@ typedef le16 MFT_RECORD_FLAGS;
* fragmented. Volume free space includes the empty part of the mft zone and
* when the volume's free 88% are used up, the mft zone is shrunk by a factor
* of 2, thus making more space available for more files/data. This process is
- * repeated everytime there is no more free space except for the mft zone until
+ * repeated every time there is no more free space except for the mft zone until
* there really is no more free space.
*/
@@ -1657,13 +1657,13 @@ typedef enum {
* pointed to by the Owner field was provided by a defaulting mechanism
* rather than explicitly provided by the original provider of the
* security descriptor. This may affect the treatment of the SID with
- * respect to inheritence of an owner.
+ * respect to inheritance of an owner.
*
* SE_GROUP_DEFAULTED - This boolean flag, when set, indicates that the SID in
* the Group field was provided by a defaulting mechanism rather than
* explicitly provided by the original provider of the security
* descriptor. This may affect the treatment of the SID with respect to
- * inheritence of a primary group.
+ * inheritance of a primary group.
*
* SE_DACL_PRESENT - This boolean flag, when set, indicates that the security
* descriptor contains a discretionary ACL. If this flag is set and the
@@ -1674,7 +1674,7 @@ typedef enum {
* pointed to by the Dacl field was provided by a defaulting mechanism
* rather than explicitly provided by the original provider of the
* security descriptor. This may affect the treatment of the ACL with
- * respect to inheritence of an ACL. This flag is ignored if the
+ * respect to inheritance of an ACL. This flag is ignored if the
* DaclPresent flag is not set.
*
* SE_SACL_PRESENT - This boolean flag, when set, indicates that the security
@@ -1686,7 +1686,7 @@ typedef enum {
* pointed to by the Sacl field was provided by a defaulting mechanism
* rather than explicitly provided by the original provider of the
* security descriptor. This may affect the treatment of the ACL with
- * respect to inheritence of an ACL. This flag is ignored if the
+ * respect to inheritance of an ACL. This flag is ignored if the
* SaclPresent flag is not set.
*
* SE_SELF_RELATIVE - This boolean flag, when set, indicates that the security
@@ -2283,7 +2283,7 @@ typedef struct {
// the key_length is zero, then the vcn immediately
// follows the INDEX_ENTRY_HEADER. Regardless of
// key_length, the address of the 8-byte boundary
- // alligned vcn of INDEX_ENTRY{_HEADER} *ie is given by
+ // aligned vcn of INDEX_ENTRY{_HEADER} *ie is given by
// (char*)ie + le16_to_cpu(ie*)->length) - sizeof(VCN),
// where sizeof(VCN) can be hardcoded as 8 if wanted. */
} __attribute__ ((__packed__)) INDEX_ENTRY;
diff --git a/fs/ntfs/logfile.c b/fs/ntfs/logfile.c
index 4dadcdf..c71de29 100644
--- a/fs/ntfs/logfile.c
+++ b/fs/ntfs/logfile.c
@@ -669,7 +669,7 @@ err_out:
* of cases where we think that a volume is dirty when in fact it is clean.
* This should only affect volumes that have not been shutdown cleanly but did
* not have any pending, non-check-pointed i/o, i.e. they were completely idle
- * at least for the five seconds preceeding the unclean shutdown.
+ * at least for the five seconds preceding the unclean shutdown.
*
* This function assumes that the $LogFile journal has already been consistency
* checked by a call to ntfs_check_logfile() and in particular if the $LogFile
diff --git a/fs/ntfs/logfile.h b/fs/ntfs/logfile.h
index b5a6f08..aa2b6ac 100644
--- a/fs/ntfs/logfile.h
+++ b/fs/ntfs/logfile.h
@@ -222,7 +222,7 @@ typedef struct {
/* 24*/ sle64 file_size; /* Usable byte size of the log file. If the
restart_area_offset + the offset of the
file_size are > 510 then corruption has
- occured. This is the very first check when
+ occurred. This is the very first check when
starting with the restart_area as if it
fails it means that some of the above values
will be corrupted by the multi sector
diff --git a/fs/ntfs/mft.c b/fs/ntfs/mft.c
index 326e747..382857f 100644
--- a/fs/ntfs/mft.c
+++ b/fs/ntfs/mft.c
@@ -73,7 +73,7 @@ static inline MFT_RECORD *map_mft_record_page(ntfs_inode *ni)
if (index > end_index || (i_size & ~PAGE_CACHE_MASK) < ofs +
vol->mft_record_size) {
page = ERR_PTR(-ENOENT);
- ntfs_error(vol->sb, "Attemt to read mft record 0x%lx, "
+ ntfs_error(vol->sb, "Attempt to read mft record 0x%lx, "
"which is beyond the end of the mft. "
"This is probably a bug in the ntfs "
"driver.", ni->mft_no);
@@ -1442,7 +1442,7 @@ static int ntfs_mft_bitmap_extend_allocation_nolock(ntfs_volume *vol)
// Note: It will need to be a special mft record and if none of
// those are available it gets rather complicated...
ntfs_error(vol->sb, "Not enough space in this mft record to "
- "accomodate extended mft bitmap attribute "
+ "accommodate extended mft bitmap attribute "
"extent. Cannot handle this yet.");
ret = -EOPNOTSUPP;
goto undo_alloc;
@@ -1879,7 +1879,7 @@ static int ntfs_mft_data_extend_allocation_nolock(ntfs_volume *vol)
// and we would then need to update all references to this mft
// record appropriately. This is rather complicated...
ntfs_error(vol->sb, "Not enough space in this mft record to "
- "accomodate extended mft data attribute "
+ "accommodate extended mft data attribute "
"extent. Cannot handle this yet.");
ret = -EOPNOTSUPP;
goto undo_alloc;
@@ -2357,7 +2357,7 @@ ntfs_inode *ntfs_mft_record_alloc(ntfs_volume *vol, const int mode,
}
#ifdef DEBUG
read_lock_irqsave(&mftbmp_ni->size_lock, flags);
- ntfs_debug("Status of mftbmp after initialized extention: "
+ ntfs_debug("Status of mftbmp after initialized extension: "
"allocated_size 0x%llx, data_size 0x%llx, "
"initialized_size 0x%llx.",
(long long)mftbmp_ni->allocated_size,
diff --git a/fs/ntfs/runlist.c b/fs/ntfs/runlist.c
index 56a9a6d2..eac7d67 100644
--- a/fs/ntfs/runlist.c
+++ b/fs/ntfs/runlist.c
@@ -1243,7 +1243,7 @@ err_out:
* write.
*
* This is used when building the mapping pairs array of a runlist to compress
- * a given logical cluster number (lcn) or a specific run length to the minumum
+ * a given logical cluster number (lcn) or a specific run length to the minimum
* size possible.
*
* Return the number of bytes written on success. On error, i.e. the
diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
index 29099a0..b52706d 100644
--- a/fs/ntfs/super.c
+++ b/fs/ntfs/super.c
@@ -458,7 +458,7 @@ static int ntfs_remount(struct super_block *sb, int *flags, char *opt)
* the volume on boot and updates them.
*
* When remounting read-only, mark the volume clean if no volume errors
- * have occured.
+ * have occurred.
*/
if ((sb->s_flags & MS_RDONLY) && !(*flags & MS_RDONLY)) {
static const char *es = ". Cannot remount read-write.";
@@ -1269,7 +1269,7 @@ static int check_windows_hibernation_status(ntfs_volume *vol)
"hibernated on the volume.");
return 0;
}
- /* A real error occured. */
+ /* A real error occurred. */
ntfs_error(vol->sb, "Failed to find inode number for "
"hiberfil.sys.");
return ret;
@@ -1370,7 +1370,7 @@ static bool load_and_init_quota(ntfs_volume *vol)
NVolSetQuotaOutOfDate(vol);
return true;
}
- /* A real error occured. */
+ /* A real error occurred. */
ntfs_error(vol->sb, "Failed to find inode number for $Quota.");
return false;
}
@@ -1454,7 +1454,7 @@ not_enabled:
NVolSetUsnJrnlStamped(vol);
return true;
}
- /* A real error occured. */
+ /* A real error occurred. */
ntfs_error(vol->sb, "Failed to find inode number for "
"$UsnJrnl.");
return false;
@@ -2292,7 +2292,7 @@ static void ntfs_put_super(struct super_block *sb)
ntfs_commit_inode(vol->mft_ino);
/*
- * If a read-write mount and no volume errors have occured, mark the
+ * If a read-write mount and no volume errors have occurred, mark the
* volume clean. Also, re-commit all affected inodes.
*/
if (!(sb->s_flags & MS_RDONLY)) {
@@ -2496,7 +2496,7 @@ static s64 get_nr_free_clusters(ntfs_volume *vol)
if (vol->nr_clusters & 63)
nr_free += 64 - (vol->nr_clusters & 63);
up_read(&vol->lcnbmp_lock);
- /* If errors occured we may well have gone below zero, fix this. */
+ /* If errors occurred we may well have gone below zero, fix this. */
if (nr_free < 0)
nr_free = 0;
ntfs_debug("Exiting.");
@@ -2561,7 +2561,7 @@ static unsigned long __get_nr_free_mft_records(ntfs_volume *vol,
}
ntfs_debug("Finished reading $MFT/$BITMAP, last index = 0x%lx.",
index - 1);
- /* If errors occured we may well have gone below zero, fix this. */
+ /* If errors occurred we may well have gone below zero, fix this. */
if (nr_free < 0)
nr_free = 0;
ntfs_debug("Exiting.");
diff --git a/fs/ocfs2/Makefile b/fs/ocfs2/Makefile
index 07d9fd8..d8a0313 100644
--- a/fs/ocfs2/Makefile
+++ b/fs/ocfs2/Makefile
@@ -1,6 +1,6 @@
-EXTRA_CFLAGS += -Ifs/ocfs2
+ccflags-y := -Ifs/ocfs2
-EXTRA_CFLAGS += -DCATCH_BH_JBD_RACES
+ccflags-y += -DCATCH_BH_JBD_RACES
obj-$(CONFIG_OCFS2_FS) += \
ocfs2.o \
diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c
index 704f6b1..e913ad1 100644
--- a/fs/ocfs2/acl.c
+++ b/fs/ocfs2/acl.c
@@ -24,7 +24,6 @@
#include <linux/slab.h>
#include <linux/string.h>
-#define MLOG_MASK_PREFIX ML_INODE
#include <cluster/masklog.h>
#include "ocfs2.h"
@@ -497,7 +496,7 @@ static int ocfs2_xattr_set_acl(struct dentry *dentry, const char *name,
if (!(osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL))
return -EOPNOTSUPP;
- if (!is_owner_or_cap(inode))
+ if (!inode_owner_or_capable(inode))
return -EPERM;
if (value) {
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index e4984e2..48aa9c7 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -30,7 +30,6 @@
#include <linux/swap.h>
#include <linux/quotaops.h>
-#define MLOG_MASK_PREFIX ML_DISK_ALLOC
#include <cluster/masklog.h>
#include "ocfs2.h"
@@ -50,6 +49,7 @@
#include "uptodate.h"
#include "xattr.h"
#include "refcounttree.h"
+#include "ocfs2_trace.h"
#include "buffer_head_io.h"
@@ -886,8 +886,7 @@ static int ocfs2_validate_extent_block(struct super_block *sb,
struct ocfs2_extent_block *eb =
(struct ocfs2_extent_block *)bh->b_data;
- mlog(0, "Validating extent block %llu\n",
- (unsigned long long)bh->b_blocknr);
+ trace_ocfs2_validate_extent_block((unsigned long long)bh->b_blocknr);
BUG_ON(!buffer_uptodate(bh));
@@ -965,8 +964,6 @@ int ocfs2_num_free_extents(struct ocfs2_super *osb,
struct buffer_head *eb_bh = NULL;
u64 last_eb_blk = 0;
- mlog_entry_void();
-
el = et->et_root_el;
last_eb_blk = ocfs2_et_get_last_eb_blk(et);
@@ -987,7 +984,7 @@ int ocfs2_num_free_extents(struct ocfs2_super *osb,
bail:
brelse(eb_bh);
- mlog_exit(retval);
+ trace_ocfs2_num_free_extents(retval);
return retval;
}
@@ -1010,8 +1007,6 @@ static int ocfs2_create_new_meta_bhs(handle_t *handle,
OCFS2_SB(ocfs2_metadata_cache_get_super(et->et_ci));
struct ocfs2_extent_block *eb;
- mlog_entry_void();
-
count = 0;
while (count < wanted) {
status = ocfs2_claim_metadata(handle,
@@ -1074,8 +1069,8 @@ bail:
brelse(bhs[i]);
bhs[i] = NULL;
}
+ mlog_errno(status);
}
- mlog_exit(status);
return status;
}
@@ -1173,8 +1168,6 @@ static int ocfs2_add_branch(handle_t *handle,
struct ocfs2_extent_list *el;
u32 new_cpos, root_end;
- mlog_entry_void();
-
BUG_ON(!last_eb_bh || !*last_eb_bh);
if (eb_bh) {
@@ -1200,8 +1193,11 @@ static int ocfs2_add_branch(handle_t *handle,
* from new_cpos).
*/
if (root_end > new_cpos) {
- mlog(0, "adjust the cluster end from %u to %u\n",
- root_end, new_cpos);
+ trace_ocfs2_adjust_rightmost_branch(
+ (unsigned long long)
+ ocfs2_metadata_cache_owner(et->et_ci),
+ root_end, new_cpos);
+
status = ocfs2_adjust_rightmost_branch(handle, et);
if (status) {
mlog_errno(status);
@@ -1332,7 +1328,6 @@ bail:
kfree(new_eb_bhs);
}
- mlog_exit(status);
return status;
}
@@ -1353,8 +1348,6 @@ static int ocfs2_shift_tree_depth(handle_t *handle,
struct ocfs2_extent_list *root_el;
struct ocfs2_extent_list *eb_el;
- mlog_entry_void();
-
status = ocfs2_create_new_meta_bhs(handle, et, 1, meta_ac,
&new_eb_bh);
if (status < 0) {
@@ -1415,7 +1408,6 @@ static int ocfs2_shift_tree_depth(handle_t *handle,
bail:
brelse(new_eb_bh);
- mlog_exit(status);
return status;
}
@@ -1446,8 +1438,6 @@ static int ocfs2_find_branch_target(struct ocfs2_extent_tree *et,
struct buffer_head *bh = NULL;
struct buffer_head *lowest_bh = NULL;
- mlog_entry_void();
-
*target_bh = NULL;
el = et->et_root_el;
@@ -1503,7 +1493,6 @@ static int ocfs2_find_branch_target(struct ocfs2_extent_tree *et,
bail:
brelse(bh);
- mlog_exit(status);
return status;
}
@@ -1540,7 +1529,10 @@ static int ocfs2_grow_tree(handle_t *handle, struct ocfs2_extent_tree *et,
* another tree level */
if (shift) {
BUG_ON(bh);
- mlog(0, "need to shift tree depth (current = %d)\n", depth);
+ trace_ocfs2_grow_tree(
+ (unsigned long long)
+ ocfs2_metadata_cache_owner(et->et_ci),
+ depth);
/* ocfs2_shift_tree_depth will return us a buffer with
* the new extent block (so we can pass that to
@@ -1570,7 +1562,6 @@ static int ocfs2_grow_tree(handle_t *handle, struct ocfs2_extent_tree *et,
/* call ocfs2_add_branch to add the final part of the tree with
* the new data. */
- mlog(0, "add branch. bh = %p\n", bh);
ret = ocfs2_add_branch(handle, et, bh, last_eb_bh,
meta_ac);
if (ret < 0) {
@@ -1645,8 +1636,9 @@ static void ocfs2_rotate_leaf(struct ocfs2_extent_list *el,
}
insert_index = i;
- mlog(0, "ins %u: index %d, has_empty %d, next_free %d, count %d\n",
- insert_cpos, insert_index, has_empty, next_free, le16_to_cpu(el->l_count));
+ trace_ocfs2_rotate_leaf(insert_cpos, insert_index,
+ has_empty, next_free,
+ le16_to_cpu(el->l_count));
BUG_ON(insert_index < 0);
BUG_ON(insert_index >= le16_to_cpu(el->l_count));
@@ -2059,7 +2051,7 @@ static void ocfs2_complete_edge_insert(handle_t *handle,
left_el = path_leaf_el(left_path);
right_el = path_leaf_el(right_path);
for(i = left_path->p_tree_depth - 1; i > subtree_index; i--) {
- mlog(0, "Adjust records at index %u\n", i);
+ trace_ocfs2_complete_edge_insert(i);
/*
* One nice property of knowing that all of these
@@ -2389,7 +2381,9 @@ static int ocfs2_rotate_tree_right(handle_t *handle,
goto out;
}
- mlog(0, "Insert: %u, first left path cpos: %u\n", insert_cpos, cpos);
+ trace_ocfs2_rotate_tree_right(
+ (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
+ insert_cpos, cpos);
/*
* What we want to do here is:
@@ -2418,8 +2412,10 @@ static int ocfs2_rotate_tree_right(handle_t *handle,
* rotating subtrees.
*/
while (cpos && insert_cpos <= cpos) {
- mlog(0, "Rotating a tree: ins. cpos: %u, left path cpos: %u\n",
- insert_cpos, cpos);
+ trace_ocfs2_rotate_tree_right(
+ (unsigned long long)
+ ocfs2_metadata_cache_owner(et->et_ci),
+ insert_cpos, cpos);
ret = ocfs2_find_path(et->et_ci, left_path, cpos);
if (ret) {
@@ -2461,10 +2457,10 @@ static int ocfs2_rotate_tree_right(handle_t *handle,
start = ocfs2_find_subtree_root(et, left_path, right_path);
- mlog(0, "Subtree root at index %d (blk %llu, depth %d)\n",
- start,
- (unsigned long long) right_path->p_node[start].bh->b_blocknr,
- right_path->p_tree_depth);
+ trace_ocfs2_rotate_subtree(start,
+ (unsigned long long)
+ right_path->p_node[start].bh->b_blocknr,
+ right_path->p_tree_depth);
ret = ocfs2_extend_rotate_transaction(handle, start,
orig_credits, right_path);
@@ -2964,8 +2960,7 @@ static int __ocfs2_rotate_tree_left(handle_t *handle,
subtree_root = ocfs2_find_subtree_root(et, left_path,
right_path);
- mlog(0, "Subtree root at index %d (blk %llu, depth %d)\n",
- subtree_root,
+ trace_ocfs2_rotate_subtree(subtree_root,
(unsigned long long)
right_path->p_node[subtree_root].bh->b_blocknr,
right_path->p_tree_depth);
@@ -3989,9 +3984,11 @@ static int ocfs2_append_rec_to_path(handle_t *handle,
goto out;
}
- mlog(0, "Append may need a left path update. cpos: %u, "
- "left_cpos: %u\n", le32_to_cpu(insert_rec->e_cpos),
- left_cpos);
+ trace_ocfs2_append_rec_to_path(
+ (unsigned long long)
+ ocfs2_metadata_cache_owner(et->et_ci),
+ le32_to_cpu(insert_rec->e_cpos),
+ left_cpos);
/*
* No need to worry if the append is already in the
@@ -4522,7 +4519,7 @@ set_tail_append:
}
/*
- * Helper function called at the begining of an insert.
+ * Helper function called at the beginning of an insert.
*
* This computes a few things that are commonly used in the process of
* inserting into the btree:
@@ -4562,7 +4559,7 @@ static int ocfs2_figure_insert_type(struct ocfs2_extent_tree *et,
ocfs2_et_get_last_eb_blk(et),
&bh);
if (ret) {
- mlog_exit(ret);
+ mlog_errno(ret);
goto out;
}
eb = (struct ocfs2_extent_block *) bh->b_data;
@@ -4678,9 +4675,9 @@ int ocfs2_insert_extent(handle_t *handle,
struct ocfs2_insert_type insert = {0, };
struct ocfs2_extent_rec rec;
- mlog(0, "add %u clusters at position %u to owner %llu\n",
- new_clusters, cpos,
- (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci));
+ trace_ocfs2_insert_extent_start(
+ (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
+ cpos, new_clusters);
memset(&rec, 0, sizeof(rec));
rec.e_cpos = cpu_to_le32(cpos);
@@ -4700,11 +4697,9 @@ int ocfs2_insert_extent(handle_t *handle,
goto bail;
}
- mlog(0, "Insert.appending: %u, Insert.Contig: %u, "
- "Insert.contig_index: %d, Insert.free_records: %d, "
- "Insert.tree_depth: %d\n",
- insert.ins_appending, insert.ins_contig, insert.ins_contig_index,
- free_records, insert.ins_tree_depth);
+ trace_ocfs2_insert_extent(insert.ins_appending, insert.ins_contig,
+ insert.ins_contig_index, free_records,
+ insert.ins_tree_depth);
if (insert.ins_contig == CONTIG_NONE && free_records == 0) {
status = ocfs2_grow_tree(handle, et,
@@ -4726,7 +4721,6 @@ int ocfs2_insert_extent(handle_t *handle,
bail:
brelse(last_eb_bh);
- mlog_exit(status);
return status;
}
@@ -4746,7 +4740,7 @@ int ocfs2_add_clusters_in_btree(handle_t *handle,
struct ocfs2_alloc_context *meta_ac,
enum ocfs2_alloc_restarted *reason_ret)
{
- int status = 0;
+ int status = 0, err = 0;
int free_extents;
enum ocfs2_alloc_restarted reason = RESTART_NONE;
u32 bit_off, num_bits;
@@ -4773,14 +4767,14 @@ int ocfs2_add_clusters_in_btree(handle_t *handle,
* 2) we are so fragmented, we've needed to add metadata too
* many times. */
if (!free_extents && !meta_ac) {
- mlog(0, "we haven't reserved any metadata!\n");
+ err = -1;
status = -EAGAIN;
reason = RESTART_META;
goto leave;
} else if ((!free_extents)
&& (ocfs2_alloc_context_bits_left(meta_ac)
< ocfs2_extend_meta_needed(et->et_root_el))) {
- mlog(0, "filesystem is really fragmented...\n");
+ err = -2;
status = -EAGAIN;
reason = RESTART_META;
goto leave;
@@ -4805,9 +4799,9 @@ int ocfs2_add_clusters_in_btree(handle_t *handle,
}
block = ocfs2_clusters_to_blocks(osb->sb, bit_off);
- mlog(0, "Allocating %u clusters at block %u for owner %llu\n",
- num_bits, bit_off,
- (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci));
+ trace_ocfs2_add_clusters_in_btree(
+ (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
+ bit_off, num_bits);
status = ocfs2_insert_extent(handle, et, *logical_offset, block,
num_bits, flags, meta_ac);
if (status < 0) {
@@ -4821,16 +4815,15 @@ int ocfs2_add_clusters_in_btree(handle_t *handle,
*logical_offset += num_bits;
if (clusters_to_add) {
- mlog(0, "need to alloc once more, wanted = %u\n",
- clusters_to_add);
+ err = clusters_to_add;
status = -EAGAIN;
reason = RESTART_TRANS;
}
leave:
- mlog_exit(status);
if (reason_ret)
*reason_ret = reason;
+ trace_ocfs2_add_clusters_in_btree_ret(status, reason, err);
return status;
}
@@ -5039,7 +5032,7 @@ int ocfs2_split_extent(handle_t *handle,
ocfs2_et_get_last_eb_blk(et),
&last_eb_bh);
if (ret) {
- mlog_exit(ret);
+ mlog_errno(ret);
goto out;
}
@@ -5056,9 +5049,9 @@ int ocfs2_split_extent(handle_t *handle,
ctxt.c_has_empty_extent = ocfs2_is_empty_extent(&el->l_recs[0]);
- mlog(0, "index: %d, contig: %u, has_empty: %u, split_covers: %u\n",
- split_index, ctxt.c_contig_type, ctxt.c_has_empty_extent,
- ctxt.c_split_covers_rec);
+ trace_ocfs2_split_extent(split_index, ctxt.c_contig_type,
+ ctxt.c_has_empty_extent,
+ ctxt.c_split_covers_rec);
if (ctxt.c_contig_type == CONTIG_NONE) {
if (ctxt.c_split_covers_rec)
@@ -5192,8 +5185,9 @@ int ocfs2_mark_extent_written(struct inode *inode,
{
int ret;
- mlog(0, "Inode %lu cpos %u, len %u, phys clusters %u\n",
- inode->i_ino, cpos, len, phys);
+ trace_ocfs2_mark_extent_written(
+ (unsigned long long)OCFS2_I(inode)->ip_blkno,
+ cpos, len, phys);
if (!ocfs2_writes_unwritten_extents(OCFS2_SB(inode->i_sb))) {
ocfs2_error(inode->i_sb, "Inode %llu has unwritten extents "
@@ -5512,11 +5506,10 @@ int ocfs2_remove_extent(handle_t *handle,
BUG_ON(cpos < le32_to_cpu(rec->e_cpos) || trunc_range > rec_range);
- mlog(0, "Owner %llu, remove (cpos %u, len %u). Existing index %d "
- "(cpos %u, len %u)\n",
- (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
- cpos, len, index,
- le32_to_cpu(rec->e_cpos), ocfs2_rec_clusters(el, rec));
+ trace_ocfs2_remove_extent(
+ (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
+ cpos, len, index, le32_to_cpu(rec->e_cpos),
+ ocfs2_rec_clusters(el, rec));
if (le32_to_cpu(rec->e_cpos) == cpos || rec_range == trunc_range) {
ret = ocfs2_truncate_rec(handle, et, path, index, dealloc,
@@ -5795,9 +5788,6 @@ int ocfs2_truncate_log_append(struct ocfs2_super *osb,
struct ocfs2_dinode *di;
struct ocfs2_truncate_log *tl;
- mlog_entry("start_blk = %llu, num_clusters = %u\n",
- (unsigned long long)start_blk, num_clusters);
-
BUG_ON(mutex_trylock(&tl_inode->i_mutex));
start_cluster = ocfs2_blocks_to_clusters(osb->sb, start_blk);
@@ -5834,10 +5824,9 @@ int ocfs2_truncate_log_append(struct ocfs2_super *osb,
goto bail;
}
- mlog(0, "Log truncate of %u clusters starting at cluster %u to "
- "%llu (index = %d)\n", num_clusters, start_cluster,
- (unsigned long long)OCFS2_I(tl_inode)->ip_blkno, index);
-
+ trace_ocfs2_truncate_log_append(
+ (unsigned long long)OCFS2_I(tl_inode)->ip_blkno, index,
+ start_cluster, num_clusters);
if (ocfs2_truncate_log_can_coalesce(tl, start_cluster)) {
/*
* Move index back to the record we are coalescing with.
@@ -5846,9 +5835,10 @@ int ocfs2_truncate_log_append(struct ocfs2_super *osb,
index--;
num_clusters += le32_to_cpu(tl->tl_recs[index].t_clusters);
- mlog(0, "Coalesce with index %u (start = %u, clusters = %u)\n",
- index, le32_to_cpu(tl->tl_recs[index].t_start),
- num_clusters);
+ trace_ocfs2_truncate_log_append(
+ (unsigned long long)OCFS2_I(tl_inode)->ip_blkno,
+ index, le32_to_cpu(tl->tl_recs[index].t_start),
+ num_clusters);
} else {
tl->tl_recs[index].t_start = cpu_to_le32(start_cluster);
tl->tl_used = cpu_to_le16(index + 1);
@@ -5859,7 +5849,6 @@ int ocfs2_truncate_log_append(struct ocfs2_super *osb,
osb->truncated_clusters += num_clusters;
bail:
- mlog_exit(status);
return status;
}
@@ -5878,8 +5867,6 @@ static int ocfs2_replay_truncate_records(struct ocfs2_super *osb,
struct inode *tl_inode = osb->osb_tl_inode;
struct buffer_head *tl_bh = osb->osb_tl_bh;
- mlog_entry_void();
-
di = (struct ocfs2_dinode *) tl_bh->b_data;
tl = &di->id2.i_dealloc;
i = le16_to_cpu(tl->tl_used) - 1;
@@ -5915,8 +5902,9 @@ static int ocfs2_replay_truncate_records(struct ocfs2_super *osb,
/* if start_blk is not set, we ignore the record as
* invalid. */
if (start_blk) {
- mlog(0, "free record %d, start = %u, clusters = %u\n",
- i, le32_to_cpu(rec.t_start), num_clusters);
+ trace_ocfs2_replay_truncate_records(
+ (unsigned long long)OCFS2_I(tl_inode)->ip_blkno,
+ i, le32_to_cpu(rec.t_start), num_clusters);
status = ocfs2_free_clusters(handle, data_alloc_inode,
data_alloc_bh, start_blk,
@@ -5932,7 +5920,6 @@ static int ocfs2_replay_truncate_records(struct ocfs2_super *osb,
osb->truncated_clusters = 0;
bail:
- mlog_exit(status);
return status;
}
@@ -5949,8 +5936,6 @@ int __ocfs2_flush_truncate_log(struct ocfs2_super *osb)
struct ocfs2_dinode *di;
struct ocfs2_truncate_log *tl;
- mlog_entry_void();
-
BUG_ON(mutex_trylock(&tl_inode->i_mutex));
di = (struct ocfs2_dinode *) tl_bh->b_data;
@@ -5962,8 +5947,9 @@ int __ocfs2_flush_truncate_log(struct ocfs2_super *osb)
tl = &di->id2.i_dealloc;
num_to_flush = le16_to_cpu(tl->tl_used);
- mlog(0, "Flush %u records from truncate log #%llu\n",
- num_to_flush, (unsigned long long)OCFS2_I(tl_inode)->ip_blkno);
+ trace_ocfs2_flush_truncate_log(
+ (unsigned long long)OCFS2_I(tl_inode)->ip_blkno,
+ num_to_flush);
if (!num_to_flush) {
status = 0;
goto out;
@@ -6009,7 +5995,6 @@ out_mutex:
iput(data_alloc_inode);
out:
- mlog_exit(status);
return status;
}
@@ -6032,15 +6017,11 @@ static void ocfs2_truncate_log_worker(struct work_struct *work)
container_of(work, struct ocfs2_super,
osb_truncate_log_wq.work);
- mlog_entry_void();
-
status = ocfs2_flush_truncate_log(osb);
if (status < 0)
mlog_errno(status);
else
ocfs2_init_steal_slots(osb);
-
- mlog_exit(status);
}
#define OCFS2_TRUNCATE_LOG_FLUSH_INTERVAL (2 * HZ)
@@ -6086,7 +6067,6 @@ static int ocfs2_get_truncate_log_info(struct ocfs2_super *osb,
*tl_inode = inode;
*tl_bh = bh;
bail:
- mlog_exit(status);
return status;
}
@@ -6106,7 +6086,7 @@ int ocfs2_begin_truncate_log_recovery(struct ocfs2_super *osb,
*tl_copy = NULL;
- mlog(0, "recover truncate log from slot %d\n", slot_num);
+ trace_ocfs2_begin_truncate_log_recovery(slot_num);
status = ocfs2_get_truncate_log_info(osb, slot_num, &tl_inode, &tl_bh);
if (status < 0) {
@@ -6123,8 +6103,7 @@ int ocfs2_begin_truncate_log_recovery(struct ocfs2_super *osb,
tl = &di->id2.i_dealloc;
if (le16_to_cpu(tl->tl_used)) {
- mlog(0, "We'll have %u logs to recover\n",
- le16_to_cpu(tl->tl_used));
+ trace_ocfs2_truncate_log_recovery_num(le16_to_cpu(tl->tl_used));
*tl_copy = kmalloc(tl_bh->b_size, GFP_KERNEL);
if (!(*tl_copy)) {
@@ -6157,9 +6136,9 @@ bail:
if (status < 0 && (*tl_copy)) {
kfree(*tl_copy);
*tl_copy = NULL;
+ mlog_errno(status);
}
- mlog_exit(status);
return status;
}
@@ -6174,8 +6153,6 @@ int ocfs2_complete_truncate_log_recovery(struct ocfs2_super *osb,
struct inode *tl_inode = osb->osb_tl_inode;
struct ocfs2_truncate_log *tl;
- mlog_entry_void();
-
if (OCFS2_I(tl_inode)->ip_blkno == le64_to_cpu(tl_copy->i_blkno)) {
mlog(ML_ERROR, "Asked to recover my own truncate log!\n");
return -EINVAL;
@@ -6183,8 +6160,9 @@ int ocfs2_complete_truncate_log_recovery(struct ocfs2_super *osb,
tl = &tl_copy->id2.i_dealloc;
num_recs = le16_to_cpu(tl->tl_used);
- mlog(0, "cleanup %u records from %llu\n", num_recs,
- (unsigned long long)le64_to_cpu(tl_copy->i_blkno));
+ trace_ocfs2_complete_truncate_log_recovery(
+ (unsigned long long)le64_to_cpu(tl_copy->i_blkno),
+ num_recs);
mutex_lock(&tl_inode->i_mutex);
for(i = 0; i < num_recs; i++) {
@@ -6219,7 +6197,6 @@ int ocfs2_complete_truncate_log_recovery(struct ocfs2_super *osb,
bail_up:
mutex_unlock(&tl_inode->i_mutex);
- mlog_exit(status);
return status;
}
@@ -6228,8 +6205,6 @@ void ocfs2_truncate_log_shutdown(struct ocfs2_super *osb)
int status;
struct inode *tl_inode = osb->osb_tl_inode;
- mlog_entry_void();
-
if (tl_inode) {
cancel_delayed_work(&osb->osb_truncate_log_wq);
flush_workqueue(ocfs2_wq);
@@ -6241,8 +6216,6 @@ void ocfs2_truncate_log_shutdown(struct ocfs2_super *osb)
brelse(osb->osb_tl_bh);
iput(osb->osb_tl_inode);
}
-
- mlog_exit_void();
}
int ocfs2_truncate_log_init(struct ocfs2_super *osb)
@@ -6251,8 +6224,6 @@ int ocfs2_truncate_log_init(struct ocfs2_super *osb)
struct inode *tl_inode = NULL;
struct buffer_head *tl_bh = NULL;
- mlog_entry_void();
-
status = ocfs2_get_truncate_log_info(osb,
osb->slot_num,
&tl_inode,
@@ -6268,7 +6239,6 @@ int ocfs2_truncate_log_init(struct ocfs2_super *osb)
osb->osb_tl_bh = tl_bh;
osb->osb_tl_inode = tl_inode;
- mlog_exit(status);
return status;
}
@@ -6350,8 +6320,8 @@ static int ocfs2_free_cached_blocks(struct ocfs2_super *osb,
else
bg_blkno = ocfs2_which_suballoc_group(head->free_blk,
head->free_bit);
- mlog(0, "Free bit: (bit %u, blkno %llu)\n",
- head->free_bit, (unsigned long long)head->free_blk);
+ trace_ocfs2_free_cached_blocks(
+ (unsigned long long)head->free_blk, head->free_bit);
ret = ocfs2_free_suballoc_bits(handle, inode, di_bh,
head->free_bit, bg_blkno, 1);
@@ -6404,8 +6374,7 @@ int ocfs2_cache_cluster_dealloc(struct ocfs2_cached_dealloc_ctxt *ctxt,
return ret;
}
- mlog(0, "Insert clusters: (bit %u, blk %llu)\n",
- bit, (unsigned long long)blkno);
+ trace_ocfs2_cache_cluster_dealloc((unsigned long long)blkno, bit);
item->free_blk = blkno;
item->free_bit = bit;
@@ -6480,8 +6449,8 @@ int ocfs2_run_deallocs(struct ocfs2_super *osb,
fl = ctxt->c_first_suballocator;
if (fl->f_first) {
- mlog(0, "Free items: (type %u, slot %d)\n",
- fl->f_inode_type, fl->f_slot);
+ trace_ocfs2_run_deallocs(fl->f_inode_type,
+ fl->f_slot);
ret2 = ocfs2_free_cached_blocks(osb,
fl->f_inode_type,
fl->f_slot,
@@ -6558,8 +6527,9 @@ int ocfs2_cache_block_dealloc(struct ocfs2_cached_dealloc_ctxt *ctxt,
goto out;
}
- mlog(0, "Insert: (type %d, slot %u, bit %u, blk %llu)\n",
- type, slot, bit, (unsigned long long)blkno);
+ trace_ocfs2_cache_block_dealloc(type, slot,
+ (unsigned long long)suballoc,
+ (unsigned long long)blkno, bit);
item->free_bg = suballoc;
item->free_blk = blkno;
@@ -7005,8 +6975,6 @@ int ocfs2_commit_truncate(struct ocfs2_super *osb,
struct ocfs2_extent_tree et;
struct ocfs2_cached_dealloc_ctxt dealloc;
- mlog_entry_void();
-
ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), di_bh);
ocfs2_init_dealloc_ctxt(&dealloc);
@@ -7041,8 +7009,11 @@ start:
goto bail;
}
- mlog(0, "inode->ip_clusters = %u, tree_depth = %u\n",
- OCFS2_I(inode)->ip_clusters, path->p_tree_depth);
+ trace_ocfs2_commit_truncate(
+ (unsigned long long)OCFS2_I(inode)->ip_blkno,
+ new_highest_cpos,
+ OCFS2_I(inode)->ip_clusters,
+ path->p_tree_depth);
/*
* By now, el will point to the extent list on the bottom most
@@ -7136,7 +7107,6 @@ bail:
ocfs2_free_path(path);
- mlog_exit(status);
return status;
}
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 1fbb0e2..ac97bca 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -29,7 +29,6 @@
#include <linux/mpage.h>
#include <linux/quotaops.h>
-#define MLOG_MASK_PREFIX ML_FILE_IO
#include <cluster/masklog.h>
#include "ocfs2.h"
@@ -45,6 +44,7 @@
#include "super.h"
#include "symlink.h"
#include "refcounttree.h"
+#include "ocfs2_trace.h"
#include "buffer_head_io.h"
@@ -59,8 +59,9 @@ static int ocfs2_symlink_get_block(struct inode *inode, sector_t iblock,
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
void *kaddr;
- mlog_entry("(0x%p, %llu, 0x%p, %d)\n", inode,
- (unsigned long long)iblock, bh_result, create);
+ trace_ocfs2_symlink_get_block(
+ (unsigned long long)OCFS2_I(inode)->ip_blkno,
+ (unsigned long long)iblock, bh_result, create);
BUG_ON(ocfs2_inode_is_fast_symlink(inode));
@@ -123,7 +124,6 @@ static int ocfs2_symlink_get_block(struct inode *inode, sector_t iblock,
bail:
brelse(bh);
- mlog_exit(err);
return err;
}
@@ -136,8 +136,8 @@ int ocfs2_get_block(struct inode *inode, sector_t iblock,
u64 p_blkno, count, past_eof;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
- mlog_entry("(0x%p, %llu, 0x%p, %d)\n", inode,
- (unsigned long long)iblock, bh_result, create);
+ trace_ocfs2_get_block((unsigned long long)OCFS2_I(inode)->ip_blkno,
+ (unsigned long long)iblock, bh_result, create);
if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_SYSTEM_FILE)
mlog(ML_NOTICE, "get_block on system inode 0x%p (%lu)\n",
@@ -199,8 +199,9 @@ int ocfs2_get_block(struct inode *inode, sector_t iblock,
}
past_eof = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode));
- mlog(0, "Inode %lu, past_eof = %llu\n", inode->i_ino,
- (unsigned long long)past_eof);
+
+ trace_ocfs2_get_block_end((unsigned long long)OCFS2_I(inode)->ip_blkno,
+ (unsigned long long)past_eof);
if (create && (iblock >= past_eof))
set_buffer_new(bh_result);
@@ -208,7 +209,6 @@ bail:
if (err < 0)
err = -EIO;
- mlog_exit(err);
return err;
}
@@ -278,7 +278,8 @@ static int ocfs2_readpage(struct file *file, struct page *page)
loff_t start = (loff_t)page->index << PAGE_CACHE_SHIFT;
int ret, unlock = 1;
- mlog_entry("(0x%p, %lu)\n", file, (page ? page->index : 0));
+ trace_ocfs2_readpage((unsigned long long)oi->ip_blkno,
+ (page ? page->index : 0));
ret = ocfs2_inode_lock_with_page(inode, NULL, 0, page);
if (ret != 0) {
@@ -323,7 +324,6 @@ out_inode_unlock:
out:
if (unlock)
unlock_page(page);
- mlog_exit(ret);
return ret;
}
@@ -396,15 +396,11 @@ out_unlock:
*/
static int ocfs2_writepage(struct page *page, struct writeback_control *wbc)
{
- int ret;
-
- mlog_entry("(0x%p)\n", page);
-
- ret = block_write_full_page(page, ocfs2_get_block, wbc);
+ trace_ocfs2_writepage(
+ (unsigned long long)OCFS2_I(page->mapping->host)->ip_blkno,
+ page->index);
- mlog_exit(ret);
-
- return ret;
+ return block_write_full_page(page, ocfs2_get_block, wbc);
}
/* Taken from ext3. We don't necessarily need the full blown
@@ -450,7 +446,8 @@ static sector_t ocfs2_bmap(struct address_space *mapping, sector_t block)
int err = 0;
struct inode *inode = mapping->host;
- mlog_entry("(block = %llu)\n", (unsigned long long)block);
+ trace_ocfs2_bmap((unsigned long long)OCFS2_I(inode)->ip_blkno,
+ (unsigned long long)block);
/* We don't need to lock journal system files, since they aren't
* accessed concurrently from multiple nodes.
@@ -484,8 +481,6 @@ static sector_t ocfs2_bmap(struct address_space *mapping, sector_t block)
bail:
status = err ? 0 : p_blkno;
- mlog_exit((int)status);
-
return status;
}
@@ -616,9 +611,6 @@ static ssize_t ocfs2_direct_IO(int rw,
{
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_path.dentry->d_inode->i_mapping->host;
- int ret;
-
- mlog_entry_void();
/*
* Fallback to buffered I/O if we see an inode without
@@ -631,13 +623,10 @@ static ssize_t ocfs2_direct_IO(int rw,
if (i_size_read(inode) <= offset)
return 0;
- ret = __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev,
- iov, offset, nr_segs,
- ocfs2_direct_IO_get_blocks,
- ocfs2_dio_end_io, NULL, 0);
-
- mlog_exit(ret);
- return ret;
+ return __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev,
+ iov, offset, nr_segs,
+ ocfs2_direct_IO_get_blocks,
+ ocfs2_dio_end_io, NULL, 0);
}
static void ocfs2_figure_cluster_boundaries(struct ocfs2_super *osb,
@@ -1026,6 +1015,12 @@ static int ocfs2_prepare_page_for_write(struct inode *inode, u64 *p_blkno,
ocfs2_figure_cluster_boundaries(OCFS2_SB(inode->i_sb), cpos,
&cluster_start, &cluster_end);
+ /* treat the write as new if the a hole/lseek spanned across
+ * the page boundary.
+ */
+ new = new | ((i_size_read(inode) <= page_offset(page)) &&
+ (page_offset(page) <= user_pos));
+
if (page == wc->w_target_page) {
map_from = user_pos & (PAGE_CACHE_SIZE - 1);
map_to = map_from + user_len;
@@ -1534,9 +1529,9 @@ static int ocfs2_try_to_write_inline_data(struct address_space *mapping,
struct ocfs2_inode_info *oi = OCFS2_I(inode);
struct ocfs2_dinode *di = NULL;
- mlog(0, "Inode %llu, write of %u bytes at off %llu. features: 0x%x\n",
- (unsigned long long)oi->ip_blkno, len, (unsigned long long)pos,
- oi->ip_dyn_features);
+ trace_ocfs2_try_to_write_inline_data((unsigned long long)oi->ip_blkno,
+ len, (unsigned long long)pos,
+ oi->ip_dyn_features);
/*
* Handle inodes which already have inline data 1st.
@@ -1739,6 +1734,13 @@ try_again:
di = (struct ocfs2_dinode *)wc->w_di_bh->b_data;
+ trace_ocfs2_write_begin_nolock(
+ (unsigned long long)OCFS2_I(inode)->ip_blkno,
+ (long long)i_size_read(inode),
+ le32_to_cpu(di->i_clusters),
+ pos, len, flags, mmap_page,
+ clusters_to_alloc, extents_to_split);
+
/*
* We set w_target_from, w_target_to here so that
* ocfs2_write_end() knows which range in the target page to
@@ -1751,12 +1753,6 @@ try_again:
* ocfs2_lock_allocators(). It greatly over-estimates
* the work to be done.
*/
- mlog(0, "extend inode %llu, i_size = %lld, di->i_clusters = %u,"
- " clusters_to_add = %u, extents_to_split = %u\n",
- (unsigned long long)OCFS2_I(inode)->ip_blkno,
- (long long)i_size_read(inode), le32_to_cpu(di->i_clusters),
- clusters_to_alloc, extents_to_split);
-
ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode),
wc->w_di_bh);
ret = ocfs2_lock_allocators(inode, &et,
@@ -1938,8 +1934,8 @@ static void ocfs2_write_end_inline(struct inode *inode, loff_t pos,
memcpy(di->id2.i_data.id_data + pos, kaddr + pos, *copied);
kunmap_atomic(kaddr, KM_USER0);
- mlog(0, "Data written to inode at offset %llu. "
- "id_count = %u, copied = %u, i_dyn_features = 0x%x\n",
+ trace_ocfs2_write_end_inline(
+ (unsigned long long)OCFS2_I(inode)->ip_blkno,
(unsigned long long)pos, *copied,
le16_to_cpu(di->id2.i_data.id_count),
le16_to_cpu(di->i_dyn_features));
@@ -2043,7 +2039,6 @@ const struct address_space_operations ocfs2_aops = {
.write_begin = ocfs2_write_begin,
.write_end = ocfs2_write_end,
.bmap = ocfs2_bmap,
- .sync_page = block_sync_page,
.direct_IO = ocfs2_direct_IO,
.invalidatepage = ocfs2_invalidatepage,
.releasepage = ocfs2_releasepage,
diff --git a/fs/ocfs2/aops.h b/fs/ocfs2/aops.h
index eceb456..75cf3ad 100644
--- a/fs/ocfs2/aops.h
+++ b/fs/ocfs2/aops.h
@@ -71,7 +71,7 @@ static inline void ocfs2_iocb_set_rw_locked(struct kiocb *iocb, int level)
/*
* Using a named enum representing lock types in terms of #N bit stored in
- * iocb->private, which is going to be used for communication bewteen
+ * iocb->private, which is going to be used for communication between
* ocfs2_dio_end_io() and ocfs2_file_aio_write/read().
*/
enum ocfs2_iocb_lock_bits {
diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c
index f9d5d3f..5d18ad1 100644
--- a/fs/ocfs2/buffer_head_io.c
+++ b/fs/ocfs2/buffer_head_io.c
@@ -35,8 +35,8 @@
#include "inode.h"
#include "journal.h"
#include "uptodate.h"
-
#include "buffer_head_io.h"
+#include "ocfs2_trace.h"
/*
* Bits on bh->b_state used by ocfs2.
@@ -55,8 +55,7 @@ int ocfs2_write_block(struct ocfs2_super *osb, struct buffer_head *bh,
{
int ret = 0;
- mlog_entry("(bh->b_blocknr = %llu, ci=%p)\n",
- (unsigned long long)bh->b_blocknr, ci);
+ trace_ocfs2_write_block((unsigned long long)bh->b_blocknr, ci);
BUG_ON(bh->b_blocknr < OCFS2_SUPER_BLOCK_BLKNO);
BUG_ON(buffer_jbd(bh));
@@ -66,6 +65,7 @@ int ocfs2_write_block(struct ocfs2_super *osb, struct buffer_head *bh,
* can get modified during recovery even if read-only. */
if (ocfs2_is_hard_readonly(osb)) {
ret = -EROFS;
+ mlog_errno(ret);
goto out;
}
@@ -91,11 +91,11 @@ int ocfs2_write_block(struct ocfs2_super *osb, struct buffer_head *bh,
* uptodate. */
ret = -EIO;
put_bh(bh);
+ mlog_errno(ret);
}
ocfs2_metadata_cache_io_unlock(ci);
out:
- mlog_exit(ret);
return ret;
}
@@ -106,10 +106,10 @@ int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block,
unsigned int i;
struct buffer_head *bh;
- if (!nr) {
- mlog(ML_BH_IO, "No buffers will be read!\n");
+ trace_ocfs2_read_blocks_sync((unsigned long long)block, nr);
+
+ if (!nr)
goto bail;
- }
for (i = 0 ; i < nr ; i++) {
if (bhs[i] == NULL) {
@@ -123,10 +123,8 @@ int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block,
bh = bhs[i];
if (buffer_jbd(bh)) {
- mlog(ML_BH_IO,
- "trying to sync read a jbd "
- "managed bh (blocknr = %llu), skipping\n",
- (unsigned long long)bh->b_blocknr);
+ trace_ocfs2_read_blocks_sync_jbd(
+ (unsigned long long)bh->b_blocknr);
continue;
}
@@ -186,8 +184,7 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
struct buffer_head *bh;
struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
- mlog_entry("(ci=%p, block=(%llu), nr=(%d), flags=%d)\n",
- ci, (unsigned long long)block, nr, flags);
+ trace_ocfs2_read_blocks_begin(ci, (unsigned long long)block, nr, flags);
BUG_ON(!ci);
BUG_ON((flags & OCFS2_BH_READAHEAD) &&
@@ -207,7 +204,6 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
}
if (nr == 0) {
- mlog(ML_BH_IO, "No buffers will be read!\n");
status = 0;
goto bail;
}
@@ -251,8 +247,7 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
*/
if (!ignore_cache && !ocfs2_buffer_uptodate(ci, bh)) {
- mlog(ML_UPTODATE,
- "bh (%llu), owner %llu not uptodate\n",
+ trace_ocfs2_read_blocks_from_disk(
(unsigned long long)bh->b_blocknr,
(unsigned long long)ocfs2_metadata_cache_owner(ci));
/* We're using ignore_cache here to say
@@ -260,11 +255,10 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
ignore_cache = 1;
}
+ trace_ocfs2_read_blocks_bh((unsigned long long)bh->b_blocknr,
+ ignore_cache, buffer_jbd(bh), buffer_dirty(bh));
+
if (buffer_jbd(bh)) {
- if (ignore_cache)
- mlog(ML_BH_IO, "trying to sync read a jbd "
- "managed bh (blocknr = %llu)\n",
- (unsigned long long)bh->b_blocknr);
continue;
}
@@ -272,9 +266,6 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
if (buffer_dirty(bh)) {
/* This should probably be a BUG, or
* at least return an error. */
- mlog(ML_BH_IO, "asking me to sync read a dirty "
- "buffer! (blocknr = %llu)\n",
- (unsigned long long)bh->b_blocknr);
continue;
}
@@ -367,14 +358,11 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
}
ocfs2_metadata_cache_io_unlock(ci);
- mlog(ML_BH_IO, "block=(%llu), nr=(%d), cached=%s, flags=0x%x\n",
- (unsigned long long)block, nr,
- ((flags & OCFS2_BH_IGNORE_CACHE) || ignore_cache) ? "no" : "yes",
- flags);
+ trace_ocfs2_read_blocks_end((unsigned long long)block, nr,
+ flags, ignore_cache);
bail:
- mlog_exit(status);
return status;
}
@@ -408,13 +396,12 @@ int ocfs2_write_super_or_backup(struct ocfs2_super *osb,
int ret = 0;
struct ocfs2_dinode *di = (struct ocfs2_dinode *)bh->b_data;
- mlog_entry_void();
-
BUG_ON(buffer_jbd(bh));
ocfs2_check_super_or_backup(osb->sb, bh->b_blocknr);
if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb)) {
ret = -EROFS;
+ mlog_errno(ret);
goto out;
}
@@ -434,9 +421,9 @@ int ocfs2_write_super_or_backup(struct ocfs2_super *osb,
if (!buffer_uptodate(bh)) {
ret = -EIO;
put_bh(bh);
+ mlog_errno(ret);
}
out:
- mlog_exit(ret);
return ret;
}
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index b108e86..6437202 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -367,11 +367,7 @@ static inline void o2hb_bio_wait_dec(struct o2hb_bio_wait_ctxt *wc,
static void o2hb_wait_on_io(struct o2hb_region *reg,
struct o2hb_bio_wait_ctxt *wc)
{
- struct address_space *mapping = reg->hr_bdev->bd_inode->i_mapping;
-
- blk_run_address_space(mapping);
o2hb_bio_wait_dec(wc, 1);
-
wait_for_completion(&wc->wc_io_complete);
}
@@ -1658,8 +1654,6 @@ static int o2hb_populate_slot_data(struct o2hb_region *reg)
struct o2hb_disk_slot *slot;
struct o2hb_disk_heartbeat_block *hb_block;
- mlog_entry_void();
-
ret = o2hb_read_slots(reg, reg->hr_blocks);
if (ret) {
mlog_errno(ret);
@@ -1681,7 +1675,6 @@ static int o2hb_populate_slot_data(struct o2hb_region *reg)
}
out:
- mlog_exit(ret);
return ret;
}
@@ -2282,7 +2275,7 @@ void o2hb_free_hb_set(struct config_group *group)
kfree(hs);
}
-/* hb callback registration and issueing */
+/* hb callback registration and issuing */
static struct o2hb_callback *hbcall_from_type(enum o2hb_callback_type type)
{
diff --git a/fs/ocfs2/cluster/masklog.c b/fs/ocfs2/cluster/masklog.c
index 6c61771..07ac24f 100644
--- a/fs/ocfs2/cluster/masklog.c
+++ b/fs/ocfs2/cluster/masklog.c
@@ -30,7 +30,7 @@
struct mlog_bits mlog_and_bits = MLOG_BITS_RHS(MLOG_INITIAL_AND_MASK);
EXPORT_SYMBOL_GPL(mlog_and_bits);
-struct mlog_bits mlog_not_bits = MLOG_BITS_RHS(MLOG_INITIAL_NOT_MASK);
+struct mlog_bits mlog_not_bits = MLOG_BITS_RHS(0);
EXPORT_SYMBOL_GPL(mlog_not_bits);
static ssize_t mlog_mask_show(u64 mask, char *buf)
@@ -80,8 +80,6 @@ struct mlog_attribute {
}
static struct mlog_attribute mlog_attrs[MLOG_MAX_BITS] = {
- define_mask(ENTRY),
- define_mask(EXIT),
define_mask(TCP),
define_mask(MSG),
define_mask(SOCKET),
@@ -93,27 +91,11 @@ static struct mlog_attribute mlog_attrs[MLOG_MAX_BITS] = {
define_mask(DLM_THREAD),
define_mask(DLM_MASTER),
define_mask(DLM_RECOVERY),
- define_mask(AIO),
- define_mask(JOURNAL),
- define_mask(DISK_ALLOC),
- define_mask(SUPER),
- define_mask(FILE_IO),
- define_mask(EXTENT_MAP),
define_mask(DLM_GLUE),
- define_mask(BH_IO),
- define_mask(UPTODATE),
- define_mask(NAMEI),
- define_mask(INODE),
define_mask(VOTE),
- define_mask(DCACHE),
define_mask(CONN),
define_mask(QUORUM),
- define_mask(EXPORT),
- define_mask(XATTR),
- define_mask(QUOTA),
- define_mask(REFCOUNT),
define_mask(BASTS),
- define_mask(RESERVATIONS),
define_mask(CLUSTER),
define_mask(ERROR),
define_mask(NOTICE),
diff --git a/fs/ocfs2/cluster/masklog.h b/fs/ocfs2/cluster/masklog.h
index 34d6544..baa2b9e 100644
--- a/fs/ocfs2/cluster/masklog.h
+++ b/fs/ocfs2/cluster/masklog.h
@@ -82,41 +82,23 @@
/* bits that are frequently given and infrequently matched in the low word */
/* NOTE: If you add a flag, you need to also update masklog.c! */
-#define ML_ENTRY 0x0000000000000001ULL /* func call entry */
-#define ML_EXIT 0x0000000000000002ULL /* func call exit */
-#define ML_TCP 0x0000000000000004ULL /* net cluster/tcp.c */
-#define ML_MSG 0x0000000000000008ULL /* net network messages */
-#define ML_SOCKET 0x0000000000000010ULL /* net socket lifetime */
-#define ML_HEARTBEAT 0x0000000000000020ULL /* hb all heartbeat tracking */
-#define ML_HB_BIO 0x0000000000000040ULL /* hb io tracing */
-#define ML_DLMFS 0x0000000000000080ULL /* dlm user dlmfs */
-#define ML_DLM 0x0000000000000100ULL /* dlm general debugging */
-#define ML_DLM_DOMAIN 0x0000000000000200ULL /* dlm domain debugging */
-#define ML_DLM_THREAD 0x0000000000000400ULL /* dlm domain thread */
-#define ML_DLM_MASTER 0x0000000000000800ULL /* dlm master functions */
-#define ML_DLM_RECOVERY 0x0000000000001000ULL /* dlm master functions */
-#define ML_AIO 0x0000000000002000ULL /* ocfs2 aio read and write */
-#define ML_JOURNAL 0x0000000000004000ULL /* ocfs2 journalling functions */
-#define ML_DISK_ALLOC 0x0000000000008000ULL /* ocfs2 disk allocation */
-#define ML_SUPER 0x0000000000010000ULL /* ocfs2 mount / umount */
-#define ML_FILE_IO 0x0000000000020000ULL /* ocfs2 file I/O */
-#define ML_EXTENT_MAP 0x0000000000040000ULL /* ocfs2 extent map caching */
-#define ML_DLM_GLUE 0x0000000000080000ULL /* ocfs2 dlm glue layer */
-#define ML_BH_IO 0x0000000000100000ULL /* ocfs2 buffer I/O */
-#define ML_UPTODATE 0x0000000000200000ULL /* ocfs2 caching sequence #'s */
-#define ML_NAMEI 0x0000000000400000ULL /* ocfs2 directory / namespace */
-#define ML_INODE 0x0000000000800000ULL /* ocfs2 inode manipulation */
-#define ML_VOTE 0x0000000001000000ULL /* ocfs2 node messaging */
-#define ML_DCACHE 0x0000000002000000ULL /* ocfs2 dcache operations */
-#define ML_CONN 0x0000000004000000ULL /* net connection management */
-#define ML_QUORUM 0x0000000008000000ULL /* net connection quorum */
-#define ML_EXPORT 0x0000000010000000ULL /* ocfs2 export operations */
-#define ML_XATTR 0x0000000020000000ULL /* ocfs2 extended attributes */
-#define ML_QUOTA 0x0000000040000000ULL /* ocfs2 quota operations */
-#define ML_REFCOUNT 0x0000000080000000ULL /* refcount tree operations */
-#define ML_BASTS 0x0000000100000000ULL /* dlmglue asts and basts */
-#define ML_RESERVATIONS 0x0000000200000000ULL /* ocfs2 alloc reservations */
-#define ML_CLUSTER 0x0000000400000000ULL /* cluster stack */
+#define ML_TCP 0x0000000000000001ULL /* net cluster/tcp.c */
+#define ML_MSG 0x0000000000000002ULL /* net network messages */
+#define ML_SOCKET 0x0000000000000004ULL /* net socket lifetime */
+#define ML_HEARTBEAT 0x0000000000000008ULL /* hb all heartbeat tracking */
+#define ML_HB_BIO 0x0000000000000010ULL /* hb io tracing */
+#define ML_DLMFS 0x0000000000000020ULL /* dlm user dlmfs */
+#define ML_DLM 0x0000000000000040ULL /* dlm general debugging */
+#define ML_DLM_DOMAIN 0x0000000000000080ULL /* dlm domain debugging */
+#define ML_DLM_THREAD 0x0000000000000100ULL /* dlm domain thread */
+#define ML_DLM_MASTER 0x0000000000000200ULL /* dlm master functions */
+#define ML_DLM_RECOVERY 0x0000000000000400ULL /* dlm master functions */
+#define ML_DLM_GLUE 0x0000000000000800ULL /* ocfs2 dlm glue layer */
+#define ML_VOTE 0x0000000000001000ULL /* ocfs2 node messaging */
+#define ML_CONN 0x0000000000002000ULL /* net connection management */
+#define ML_QUORUM 0x0000000000004000ULL /* net connection quorum */
+#define ML_BASTS 0x0000000000008000ULL /* dlmglue asts and basts */
+#define ML_CLUSTER 0x0000000000010000ULL /* cluster stack */
/* bits that are infrequently given and frequently matched in the high word */
#define ML_ERROR 0x1000000000000000ULL /* sent to KERN_ERR */
@@ -124,7 +106,6 @@
#define ML_KTHREAD 0x4000000000000000ULL /* kernel thread activity */
#define MLOG_INITIAL_AND_MASK (ML_ERROR|ML_NOTICE)
-#define MLOG_INITIAL_NOT_MASK (ML_ENTRY|ML_EXIT)
#ifndef MLOG_MASK_PREFIX
#define MLOG_MASK_PREFIX 0
#endif
@@ -222,58 +203,6 @@ extern struct mlog_bits mlog_and_bits, mlog_not_bits;
mlog(ML_ERROR, "status = %lld\n", (long long)_st); \
} while (0)
-#if defined(CONFIG_OCFS2_DEBUG_MASKLOG)
-#define mlog_entry(fmt, args...) do { \
- mlog(ML_ENTRY, "ENTRY:" fmt , ##args); \
-} while (0)
-
-#define mlog_entry_void() do { \
- mlog(ML_ENTRY, "ENTRY:\n"); \
-} while (0)
-
-/*
- * We disable this for sparse.
- */
-#if !defined(__CHECKER__)
-#define mlog_exit(st) do { \
- if (__builtin_types_compatible_p(typeof(st), unsigned long)) \
- mlog(ML_EXIT, "EXIT: %lu\n", (unsigned long) (st)); \
- else if (__builtin_types_compatible_p(typeof(st), signed long)) \
- mlog(ML_EXIT, "EXIT: %ld\n", (signed long) (st)); \
- else if (__builtin_types_compatible_p(typeof(st), unsigned int) \
- || __builtin_types_compatible_p(typeof(st), unsigned short) \
- || __builtin_types_compatible_p(typeof(st), unsigned char)) \
- mlog(ML_EXIT, "EXIT: %u\n", (unsigned int) (st)); \
- else if (__builtin_types_compatible_p(typeof(st), signed int) \
- || __builtin_types_compatible_p(typeof(st), signed short) \
- || __builtin_types_compatible_p(typeof(st), signed char)) \
- mlog(ML_EXIT, "EXIT: %d\n", (signed int) (st)); \
- else if (__builtin_types_compatible_p(typeof(st), long long)) \
- mlog(ML_EXIT, "EXIT: %lld\n", (long long) (st)); \
- else \
- mlog(ML_EXIT, "EXIT: %llu\n", (unsigned long long) (st)); \
-} while (0)
-#else
-#define mlog_exit(st) do { \
- mlog(ML_EXIT, "EXIT: %lld\n", (long long) (st)); \
-} while (0)
-#endif
-
-#define mlog_exit_ptr(ptr) do { \
- mlog(ML_EXIT, "EXIT: %p\n", ptr); \
-} while (0)
-
-#define mlog_exit_void() do { \
- mlog(ML_EXIT, "EXIT\n"); \
-} while (0)
-#else
-#define mlog_entry(...) do { } while (0)
-#define mlog_entry_void(...) do { } while (0)
-#define mlog_exit(...) do { } while (0)
-#define mlog_exit_ptr(...) do { } while (0)
-#define mlog_exit_void(...) do { } while (0)
-#endif /* defined(CONFIG_OCFS2_DEBUG_MASKLOG) */
-
#define mlog_bug_on_msg(cond, fmt, args...) do { \
if (cond) { \
mlog(ML_ERROR, "bug expression: " #cond "\n"); \
diff --git a/fs/ocfs2/cluster/quorum.c b/fs/ocfs2/cluster/quorum.c
index a873667..8f9cea1 100644
--- a/fs/ocfs2/cluster/quorum.c
+++ b/fs/ocfs2/cluster/quorum.c
@@ -89,7 +89,7 @@ static void o2quo_fence_self(void)
};
}
-/* Indicate that a timeout occured on a hearbeat region write. The
+/* Indicate that a timeout occurred on a hearbeat region write. The
* other nodes in the cluster may consider us dead at that time so we
* want to "fence" ourselves so that we don't scribble on the disk
* after they think they've recovered us. This can't solve all
@@ -261,7 +261,7 @@ void o2quo_hb_still_up(u8 node)
spin_unlock(&qs->qs_lock);
}
-/* This is analagous to hb_up. as a node's connection comes up we delay the
+/* This is analogous to hb_up. as a node's connection comes up we delay the
* quorum decision until we see it heartbeating. the hold will be droped in
* hb_up or hb_down. it might be perpetuated by con_err until hb_down. if
* it's already heartbeating we we might be dropping a hold that conn_up got.
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index 3b11cb1..db5ee4b 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -210,10 +210,6 @@ static inline void o2net_set_func_stop_time(struct o2net_sock_container *sc)
sc->sc_tv_func_stop = ktime_get();
}
-static ktime_t o2net_get_func_run_time(struct o2net_sock_container *sc)
-{
- return ktime_sub(sc->sc_tv_func_stop, sc->sc_tv_func_start);
-}
#else /* CONFIG_DEBUG_FS */
# define o2net_init_nst(a, b, c, d, e)
# define o2net_set_nst_sock_time(a)
@@ -227,10 +223,14 @@ static ktime_t o2net_get_func_run_time(struct o2net_sock_container *sc)
# define o2net_set_advance_stop_time(a)
# define o2net_set_func_start_time(a)
# define o2net_set_func_stop_time(a)
-# define o2net_get_func_run_time(a) (ktime_t)0
#endif /* CONFIG_DEBUG_FS */
#ifdef CONFIG_OCFS2_FS_STATS
+static ktime_t o2net_get_func_run_time(struct o2net_sock_container *sc)
+{
+ return ktime_sub(sc->sc_tv_func_stop, sc->sc_tv_func_start);
+}
+
static void o2net_update_send_stats(struct o2net_send_tracking *nst,
struct o2net_sock_container *sc)
{
@@ -565,7 +565,7 @@ static void o2net_set_nn_state(struct o2net_node *nn,
* the work queue actually being up. */
if (!valid && o2net_wq) {
unsigned long delay;
- /* delay if we're withing a RECONNECT_DELAY of the
+ /* delay if we're within a RECONNECT_DELAY of the
* last attempt */
delay = (nn->nn_last_connect_attempt +
msecs_to_jiffies(o2net_reconnect_delay()))
diff --git a/fs/ocfs2/dcache.c b/fs/ocfs2/dcache.c
index 7eb9040..e5ba348 100644
--- a/fs/ocfs2/dcache.c
+++ b/fs/ocfs2/dcache.c
@@ -28,7 +28,6 @@
#include <linux/slab.h>
#include <linux/namei.h>
-#define MLOG_MASK_PREFIX ML_DCACHE
#include <cluster/masklog.h>
#include "ocfs2.h"
@@ -39,6 +38,7 @@
#include "file.h"
#include "inode.h"
#include "super.h"
+#include "ocfs2_trace.h"
void ocfs2_dentry_attach_gen(struct dentry *dentry)
{
@@ -62,8 +62,8 @@ static int ocfs2_dentry_revalidate(struct dentry *dentry,
inode = dentry->d_inode;
osb = OCFS2_SB(dentry->d_sb);
- mlog_entry("(0x%p, '%.*s')\n", dentry,
- dentry->d_name.len, dentry->d_name.name);
+ trace_ocfs2_dentry_revalidate(dentry, dentry->d_name.len,
+ dentry->d_name.name);
/* For a negative dentry -
* check the generation number of the parent and compare with the
@@ -73,9 +73,10 @@ static int ocfs2_dentry_revalidate(struct dentry *dentry,
unsigned long gen = (unsigned long) dentry->d_fsdata;
unsigned long pgen =
OCFS2_I(dentry->d_parent->d_inode)->ip_dir_lock_gen;
- mlog(0, "negative dentry: %.*s parent gen: %lu "
- "dentry gen: %lu\n",
- dentry->d_name.len, dentry->d_name.name, pgen, gen);
+
+ trace_ocfs2_dentry_revalidate_negative(dentry->d_name.len,
+ dentry->d_name.name,
+ pgen, gen);
if (gen != pgen)
goto bail;
goto valid;
@@ -90,8 +91,8 @@ static int ocfs2_dentry_revalidate(struct dentry *dentry,
/* did we or someone else delete this inode? */
if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_DELETED) {
spin_unlock(&OCFS2_I(inode)->ip_lock);
- mlog(0, "inode (%llu) deleted, returning false\n",
- (unsigned long long)OCFS2_I(inode)->ip_blkno);
+ trace_ocfs2_dentry_revalidate_delete(
+ (unsigned long long)OCFS2_I(inode)->ip_blkno);
goto bail;
}
spin_unlock(&OCFS2_I(inode)->ip_lock);
@@ -101,10 +102,9 @@ static int ocfs2_dentry_revalidate(struct dentry *dentry,
* inode nlink hits zero, it never goes back.
*/
if (inode->i_nlink == 0) {
- mlog(0, "Inode %llu orphaned, returning false "
- "dir = %d\n",
- (unsigned long long)OCFS2_I(inode)->ip_blkno,
- S_ISDIR(inode->i_mode));
+ trace_ocfs2_dentry_revalidate_orphaned(
+ (unsigned long long)OCFS2_I(inode)->ip_blkno,
+ S_ISDIR(inode->i_mode));
goto bail;
}
@@ -113,9 +113,8 @@ static int ocfs2_dentry_revalidate(struct dentry *dentry,
* redo it.
*/
if (!dentry->d_fsdata) {
- mlog(0, "Inode %llu doesn't have dentry lock, "
- "returning false\n",
- (unsigned long long)OCFS2_I(inode)->ip_blkno);
+ trace_ocfs2_dentry_revalidate_nofsdata(
+ (unsigned long long)OCFS2_I(inode)->ip_blkno);
goto bail;
}
@@ -123,8 +122,7 @@ valid:
ret = 1;
bail:
- mlog_exit(ret);
-
+ trace_ocfs2_dentry_revalidate_ret(ret);
return ret;
}
@@ -181,8 +179,8 @@ struct dentry *ocfs2_find_local_alias(struct inode *inode,
spin_lock(&dentry->d_lock);
if (ocfs2_match_dentry(dentry, parent_blkno, skip_unhashed)) {
- mlog(0, "dentry found: %.*s\n",
- dentry->d_name.len, dentry->d_name.name);
+ trace_ocfs2_find_local_alias(dentry->d_name.len,
+ dentry->d_name.name);
dget_dlock(dentry);
spin_unlock(&dentry->d_lock);
@@ -240,9 +238,8 @@ int ocfs2_dentry_attach_lock(struct dentry *dentry,
struct dentry *alias;
struct ocfs2_dentry_lock *dl = dentry->d_fsdata;
- mlog(0, "Attach \"%.*s\", parent %llu, fsdata: %p\n",
- dentry->d_name.len, dentry->d_name.name,
- (unsigned long long)parent_blkno, dl);
+ trace_ocfs2_dentry_attach_lock(dentry->d_name.len, dentry->d_name.name,
+ (unsigned long long)parent_blkno, dl);
/*
* Negative dentry. We ignore these for now.
@@ -292,7 +289,9 @@ int ocfs2_dentry_attach_lock(struct dentry *dentry,
(unsigned long long)parent_blkno,
(unsigned long long)dl->dl_parent_blkno);
- mlog(0, "Found: %s\n", dl->dl_lockres.l_name);
+ trace_ocfs2_dentry_attach_lock_found(dl->dl_lockres.l_name,
+ (unsigned long long)parent_blkno,
+ (unsigned long long)OCFS2_I(inode)->ip_blkno);
goto out_attach;
}
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c
index f97b6f1..9fe5b8fd 100644
--- a/fs/ocfs2/dir.c
+++ b/fs/ocfs2/dir.c
@@ -43,7 +43,6 @@
#include <linux/quotaops.h>
#include <linux/sort.h>
-#define MLOG_MASK_PREFIX ML_NAMEI
#include <cluster/masklog.h>
#include "ocfs2.h"
@@ -61,6 +60,7 @@
#include "super.h"
#include "sysfile.h"
#include "uptodate.h"
+#include "ocfs2_trace.h"
#include "buffer_head_io.h"
@@ -322,21 +322,23 @@ static int ocfs2_check_dir_entry(struct inode * dir,
const char *error_msg = NULL;
const int rlen = le16_to_cpu(de->rec_len);
- if (rlen < OCFS2_DIR_REC_LEN(1))
+ if (unlikely(rlen < OCFS2_DIR_REC_LEN(1)))
error_msg = "rec_len is smaller than minimal";
- else if (rlen % 4 != 0)
+ else if (unlikely(rlen % 4 != 0))
error_msg = "rec_len % 4 != 0";
- else if (rlen < OCFS2_DIR_REC_LEN(de->name_len))
+ else if (unlikely(rlen < OCFS2_DIR_REC_LEN(de->name_len)))
error_msg = "rec_len is too small for name_len";
- else if (((char *) de - bh->b_data) + rlen > dir->i_sb->s_blocksize)
+ else if (unlikely(
+ ((char *) de - bh->b_data) + rlen > dir->i_sb->s_blocksize))
error_msg = "directory entry across blocks";
- if (error_msg != NULL)
+ if (unlikely(error_msg != NULL))
mlog(ML_ERROR, "bad entry in directory #%llu: %s - "
"offset=%lu, inode=%llu, rec_len=%d, name_len=%d\n",
(unsigned long long)OCFS2_I(dir)->ip_blkno, error_msg,
offset, (unsigned long long)le64_to_cpu(de->inode), rlen,
de->name_len);
+
return error_msg == NULL ? 1 : 0;
}
@@ -367,8 +369,6 @@ static inline int ocfs2_search_dirblock(struct buffer_head *bh,
int de_len;
int ret = 0;
- mlog_entry_void();
-
de_buf = first_de;
dlimit = de_buf + bytes;
@@ -402,7 +402,7 @@ static inline int ocfs2_search_dirblock(struct buffer_head *bh,
}
bail:
- mlog_exit(ret);
+ trace_ocfs2_search_dirblock(ret);
return ret;
}
@@ -447,8 +447,7 @@ static int ocfs2_validate_dir_block(struct super_block *sb,
* We don't validate dirents here, that's handled
* in-place when the code walks them.
*/
- mlog(0, "Validating dirblock %llu\n",
- (unsigned long long)bh->b_blocknr);
+ trace_ocfs2_validate_dir_block((unsigned long long)bh->b_blocknr);
BUG_ON(!buffer_uptodate(bh));
@@ -706,8 +705,6 @@ static struct buffer_head *ocfs2_find_entry_el(const char *name, int namelen,
int num = 0;
int nblocks, i, err;
- mlog_entry_void();
-
sb = dir->i_sb;
nblocks = i_size_read(dir) >> sb->s_blocksize_bits;
@@ -788,7 +785,7 @@ cleanup_and_exit:
for (; ra_ptr < ra_max; ra_ptr++)
brelse(bh_use[ra_ptr]);
- mlog_exit_ptr(ret);
+ trace_ocfs2_find_entry_el(ret);
return ret;
}
@@ -950,11 +947,9 @@ static int ocfs2_dx_dir_search(const char *name, int namelen,
goto out;
}
- mlog(0, "Dir %llu: name: \"%.*s\", lookup of hash: %u.0x%x "
- "returns: %llu\n",
- (unsigned long long)OCFS2_I(dir)->ip_blkno,
- namelen, name, hinfo->major_hash, hinfo->minor_hash,
- (unsigned long long)phys);
+ trace_ocfs2_dx_dir_search((unsigned long long)OCFS2_I(dir)->ip_blkno,
+ namelen, name, hinfo->major_hash,
+ hinfo->minor_hash, (unsigned long long)phys);
ret = ocfs2_read_dx_leaf(dir, phys, &dx_leaf_bh);
if (ret) {
@@ -964,9 +959,9 @@ static int ocfs2_dx_dir_search(const char *name, int namelen,
dx_leaf = (struct ocfs2_dx_leaf *) dx_leaf_bh->b_data;
- mlog(0, "leaf info: num_used: %d, count: %d\n",
- le16_to_cpu(dx_leaf->dl_list.de_num_used),
- le16_to_cpu(dx_leaf->dl_list.de_count));
+ trace_ocfs2_dx_dir_search_leaf_info(
+ le16_to_cpu(dx_leaf->dl_list.de_num_used),
+ le16_to_cpu(dx_leaf->dl_list.de_count));
entry_list = &dx_leaf->dl_list;
@@ -1166,8 +1161,6 @@ static int __ocfs2_delete_entry(handle_t *handle, struct inode *dir,
int i, status = -ENOENT;
ocfs2_journal_access_func access = ocfs2_journal_access_db;
- mlog_entry("(0x%p, 0x%p, 0x%p, 0x%p)\n", handle, dir, de_del, bh);
-
if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
access = ocfs2_journal_access_di;
@@ -1202,7 +1195,6 @@ static int __ocfs2_delete_entry(handle_t *handle, struct inode *dir,
de = (struct ocfs2_dir_entry *)((char *)de + le16_to_cpu(de->rec_len));
}
bail:
- mlog_exit(status);
return status;
}
@@ -1348,8 +1340,8 @@ static int ocfs2_delete_entry_dx(handle_t *handle, struct inode *dir,
}
}
- mlog(0, "Dir %llu: delete entry at index: %d\n",
- (unsigned long long)OCFS2_I(dir)->ip_blkno, index);
+ trace_ocfs2_delete_entry_dx((unsigned long long)OCFS2_I(dir)->ip_blkno,
+ index);
ret = __ocfs2_delete_entry(handle, dir, lookup->dl_entry,
leaf_bh, leaf_bh->b_data, leaf_bh->b_size);
@@ -1632,8 +1624,6 @@ int __ocfs2_add_entry(handle_t *handle,
struct buffer_head *insert_bh = lookup->dl_leaf_bh;
char *data_start = insert_bh->b_data;
- mlog_entry_void();
-
if (!namelen)
return -EINVAL;
@@ -1765,8 +1755,9 @@ int __ocfs2_add_entry(handle_t *handle,
* from ever getting here. */
retval = -ENOSPC;
bail:
+ if (retval)
+ mlog_errno(retval);
- mlog_exit(retval);
return retval;
}
@@ -2028,8 +2019,7 @@ int ocfs2_readdir(struct file * filp, void * dirent, filldir_t filldir)
struct inode *inode = filp->f_path.dentry->d_inode;
int lock_level = 0;
- mlog_entry("dirino=%llu\n",
- (unsigned long long)OCFS2_I(inode)->ip_blkno);
+ trace_ocfs2_readdir((unsigned long long)OCFS2_I(inode)->ip_blkno);
error = ocfs2_inode_lock_atime(inode, filp->f_vfsmnt, &lock_level);
if (lock_level && error >= 0) {
@@ -2051,9 +2041,10 @@ int ocfs2_readdir(struct file * filp, void * dirent, filldir_t filldir)
dirent, filldir, NULL);
ocfs2_inode_unlock(inode, lock_level);
+ if (error)
+ mlog_errno(error);
bail_nolock:
- mlog_exit(error);
return error;
}
@@ -2069,8 +2060,8 @@ int ocfs2_find_files_on_disk(const char *name,
{
int status = -ENOENT;
- mlog(0, "name=%.*s, blkno=%p, inode=%llu\n", namelen, name, blkno,
- (unsigned long long)OCFS2_I(inode)->ip_blkno);
+ trace_ocfs2_find_files_on_disk(namelen, name, blkno,
+ (unsigned long long)OCFS2_I(inode)->ip_blkno);
status = ocfs2_find_entry(name, namelen, inode, lookup);
if (status)
@@ -2114,8 +2105,8 @@ int ocfs2_check_dir_for_entry(struct inode *dir,
int ret;
struct ocfs2_dir_lookup_result lookup = { NULL, };
- mlog_entry("dir %llu, name '%.*s'\n",
- (unsigned long long)OCFS2_I(dir)->ip_blkno, namelen, name);
+ trace_ocfs2_check_dir_for_entry(
+ (unsigned long long)OCFS2_I(dir)->ip_blkno, namelen, name);
ret = -EEXIST;
if (ocfs2_find_entry(name, namelen, dir, &lookup) == 0)
@@ -2125,7 +2116,8 @@ int ocfs2_check_dir_for_entry(struct inode *dir,
bail:
ocfs2_free_dir_lookup_result(&lookup);
- mlog_exit(ret);
+ if (ret)
+ mlog_errno(ret);
return ret;
}
@@ -2324,8 +2316,6 @@ static int ocfs2_fill_new_dir_el(struct ocfs2_super *osb,
struct buffer_head *new_bh = NULL;
struct ocfs2_dir_entry *de;
- mlog_entry_void();
-
if (ocfs2_new_dir_wants_trailer(inode))
size = ocfs2_dir_trailer_blk_off(parent->i_sb);
@@ -2380,7 +2370,6 @@ static int ocfs2_fill_new_dir_el(struct ocfs2_super *osb,
bail:
brelse(new_bh);
- mlog_exit(status);
return status;
}
@@ -2409,9 +2398,9 @@ static int ocfs2_dx_dir_attach_index(struct ocfs2_super *osb,
goto out;
}
- mlog(0, "Dir %llu, attach new index block: %llu\n",
- (unsigned long long)OCFS2_I(dir)->ip_blkno,
- (unsigned long long)dr_blkno);
+ trace_ocfs2_dx_dir_attach_index(
+ (unsigned long long)OCFS2_I(dir)->ip_blkno,
+ (unsigned long long)dr_blkno);
dx_root_bh = sb_getblk(osb->sb, dr_blkno);
if (dx_root_bh == NULL) {
@@ -2511,11 +2500,10 @@ static int ocfs2_dx_dir_format_cluster(struct ocfs2_super *osb,
dx_leaf->dl_list.de_count =
cpu_to_le16(ocfs2_dx_entries_per_leaf(osb->sb));
- mlog(0,
- "Dir %llu, format dx_leaf: %llu, entry count: %u\n",
- (unsigned long long)OCFS2_I(dir)->ip_blkno,
- (unsigned long long)bh->b_blocknr,
- le16_to_cpu(dx_leaf->dl_list.de_count));
+ trace_ocfs2_dx_dir_format_cluster(
+ (unsigned long long)OCFS2_I(dir)->ip_blkno,
+ (unsigned long long)bh->b_blocknr,
+ le16_to_cpu(dx_leaf->dl_list.de_count));
ocfs2_journal_dirty(handle, bh);
}
@@ -2759,12 +2747,11 @@ static void ocfs2_dx_dir_index_root_block(struct inode *dir,
ocfs2_dx_dir_name_hash(dir, de->name, de->name_len, &hinfo);
- mlog(0,
- "dir: %llu, major: 0x%x minor: 0x%x, index: %u, name: %.*s\n",
- (unsigned long long)dir->i_ino, hinfo.major_hash,
- hinfo.minor_hash,
- le16_to_cpu(dx_root->dr_entries.de_num_used),
- de->name_len, de->name);
+ trace_ocfs2_dx_dir_index_root_block(
+ (unsigned long long)dir->i_ino,
+ hinfo.major_hash, hinfo.minor_hash,
+ de->name_len, de->name,
+ le16_to_cpu(dx_root->dr_entries.de_num_used));
ocfs2_dx_entry_list_insert(&dx_root->dr_entries, &hinfo,
dirent_blk);
@@ -3235,7 +3222,6 @@ static int ocfs2_do_extend_dir(struct super_block *sb,
bail:
if (did_quota && status < 0)
dquot_free_space_nodirty(dir, ocfs2_clusters_to_bytes(sb, 1));
- mlog_exit(status);
return status;
}
@@ -3270,8 +3256,6 @@ static int ocfs2_extend_dir(struct ocfs2_super *osb,
struct ocfs2_extent_tree et;
struct buffer_head *dx_root_bh = lookup->dl_dx_root_bh;
- mlog_entry_void();
-
if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
/*
* This would be a code error as an inline directory should
@@ -3320,8 +3304,8 @@ static int ocfs2_extend_dir(struct ocfs2_super *osb,
down_write(&OCFS2_I(dir)->ip_alloc_sem);
drop_alloc_sem = 1;
dir_i_size = i_size_read(dir);
- mlog(0, "extending dir %llu (i_size = %lld)\n",
- (unsigned long long)OCFS2_I(dir)->ip_blkno, dir_i_size);
+ trace_ocfs2_extend_dir((unsigned long long)OCFS2_I(dir)->ip_blkno,
+ dir_i_size);
/* dir->i_size is always block aligned. */
spin_lock(&OCFS2_I(dir)->ip_lock);
@@ -3436,7 +3420,6 @@ bail:
brelse(new_bh);
- mlog_exit(status);
return status;
}
@@ -3583,8 +3566,9 @@ next:
status = 0;
bail:
brelse(bh);
+ if (status)
+ mlog_errno(status);
- mlog_exit(status);
return status;
}
@@ -3815,9 +3799,9 @@ static int ocfs2_dx_dir_rebalance(struct ocfs2_super *osb, struct inode *dir,
struct ocfs2_dx_root_block *dx_root;
struct ocfs2_dx_leaf *tmp_dx_leaf = NULL;
- mlog(0, "DX Dir: %llu, rebalance leaf leaf_blkno: %llu insert: %u\n",
- (unsigned long long)OCFS2_I(dir)->ip_blkno,
- (unsigned long long)leaf_blkno, insert_hash);
+ trace_ocfs2_dx_dir_rebalance((unsigned long long)OCFS2_I(dir)->ip_blkno,
+ (unsigned long long)leaf_blkno,
+ insert_hash);
ocfs2_init_dx_root_extent_tree(&et, INODE_CACHE(dir), dx_root_bh);
@@ -3897,8 +3881,7 @@ static int ocfs2_dx_dir_rebalance(struct ocfs2_super *osb, struct inode *dir,
goto out_commit;
}
- mlog(0, "Split leaf (%u) at %u, insert major hash is %u\n",
- leaf_cpos, split_hash, insert_hash);
+ trace_ocfs2_dx_dir_rebalance_split(leaf_cpos, split_hash, insert_hash);
/*
* We have to carefully order operations here. There are items
@@ -4355,8 +4338,8 @@ int ocfs2_prepare_dir_for_insert(struct ocfs2_super *osb,
unsigned int blocks_wanted = 1;
struct buffer_head *bh = NULL;
- mlog(0, "getting ready to insert namelen %d into dir %llu\n",
- namelen, (unsigned long long)OCFS2_I(dir)->ip_blkno);
+ trace_ocfs2_prepare_dir_for_insert(
+ (unsigned long long)OCFS2_I(dir)->ip_blkno, namelen);
if (!namelen) {
ret = -EINVAL;
diff --git a/fs/ocfs2/dlm/Makefile b/fs/ocfs2/dlm/Makefile
index dcebf0d..c8a044e 100644
--- a/fs/ocfs2/dlm/Makefile
+++ b/fs/ocfs2/dlm/Makefile
@@ -1,4 +1,4 @@
-EXTRA_CFLAGS += -Ifs/ocfs2
+ccflags-y := -Ifs/ocfs2
obj-$(CONFIG_OCFS2_FS_O2CB) += ocfs2_dlm.o
diff --git a/fs/ocfs2/dlm/dlmconvert.c b/fs/ocfs2/dlm/dlmconvert.c
index 9f30491..29a886d 100644
--- a/fs/ocfs2/dlm/dlmconvert.c
+++ b/fs/ocfs2/dlm/dlmconvert.c
@@ -128,8 +128,8 @@ static enum dlm_status __dlmconvert_master(struct dlm_ctxt *dlm,
assert_spin_locked(&res->spinlock);
- mlog_entry("type=%d, convert_type=%d, new convert_type=%d\n",
- lock->ml.type, lock->ml.convert_type, type);
+ mlog(0, "type=%d, convert_type=%d, new convert_type=%d\n",
+ lock->ml.type, lock->ml.convert_type, type);
spin_lock(&lock->spinlock);
@@ -353,7 +353,7 @@ static enum dlm_status dlm_send_remote_convert_request(struct dlm_ctxt *dlm,
struct kvec vec[2];
size_t veclen = 1;
- mlog_entry("%.*s\n", res->lockname.len, res->lockname.name);
+ mlog(0, "%.*s\n", res->lockname.len, res->lockname.name);
memset(&convert, 0, sizeof(struct dlm_convert_lock));
convert.node_idx = dlm->node_num;
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index 7e38a07..7540a49 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -188,7 +188,7 @@ struct dlm_lock_resource * __dlm_lookup_lockres_full(struct dlm_ctxt *dlm,
struct hlist_head *bucket;
struct hlist_node *list;
- mlog_entry("%.*s\n", len, name);
+ mlog(0, "%.*s\n", len, name);
assert_spin_locked(&dlm->spinlock);
@@ -222,7 +222,7 @@ struct dlm_lock_resource * __dlm_lookup_lockres(struct dlm_ctxt *dlm,
{
struct dlm_lock_resource *res = NULL;
- mlog_entry("%.*s\n", len, name);
+ mlog(0, "%.*s\n", len, name);
assert_spin_locked(&dlm->spinlock);
@@ -531,7 +531,7 @@ static int dlm_exit_domain_handler(struct o2net_msg *msg, u32 len, void *data,
unsigned int node;
struct dlm_exit_domain *exit_msg = (struct dlm_exit_domain *) msg->buf;
- mlog_entry("%p %u %p", msg, len, data);
+ mlog(0, "%p %u %p", msg, len, data);
if (!dlm_grab(dlm))
return 0;
@@ -926,9 +926,10 @@ static int dlm_assert_joined_handler(struct o2net_msg *msg, u32 len, void *data,
}
static int dlm_match_regions(struct dlm_ctxt *dlm,
- struct dlm_query_region *qr)
+ struct dlm_query_region *qr,
+ char *local, int locallen)
{
- char *local = NULL, *remote = qr->qr_regions;
+ char *remote = qr->qr_regions;
char *l, *r;
int localnr, i, j, foundit;
int status = 0;
@@ -957,13 +958,8 @@ static int dlm_match_regions(struct dlm_ctxt *dlm,
r += O2HB_MAX_REGION_NAME_LEN;
}
- local = kmalloc(sizeof(qr->qr_regions), GFP_ATOMIC);
- if (!local) {
- status = -ENOMEM;
- goto bail;
- }
-
- localnr = o2hb_get_all_regions(local, O2NM_MAX_REGIONS);
+ localnr = min(O2NM_MAX_REGIONS, locallen/O2HB_MAX_REGION_NAME_LEN);
+ localnr = o2hb_get_all_regions(local, (u8)localnr);
/* compare local regions with remote */
l = local;
@@ -1012,8 +1008,6 @@ static int dlm_match_regions(struct dlm_ctxt *dlm,
}
bail:
- kfree(local);
-
return status;
}
@@ -1075,6 +1069,7 @@ static int dlm_query_region_handler(struct o2net_msg *msg, u32 len,
{
struct dlm_query_region *qr;
struct dlm_ctxt *dlm = NULL;
+ char *local = NULL;
int status = 0;
int locked = 0;
@@ -1083,6 +1078,13 @@ static int dlm_query_region_handler(struct o2net_msg *msg, u32 len,
mlog(0, "Node %u queries hb regions on domain %s\n", qr->qr_node,
qr->qr_domain);
+ /* buffer used in dlm_mast_regions() */
+ local = kmalloc(sizeof(qr->qr_regions), GFP_KERNEL);
+ if (!local) {
+ status = -ENOMEM;
+ goto bail;
+ }
+
status = -EINVAL;
spin_lock(&dlm_domain_lock);
@@ -1112,13 +1114,15 @@ static int dlm_query_region_handler(struct o2net_msg *msg, u32 len,
goto bail;
}
- status = dlm_match_regions(dlm, qr);
+ status = dlm_match_regions(dlm, qr, local, sizeof(qr->qr_regions));
bail:
if (locked)
spin_unlock(&dlm->spinlock);
spin_unlock(&dlm_domain_lock);
+ kfree(local);
+
return status;
}
@@ -1553,7 +1557,7 @@ static int dlm_try_to_join_domain(struct dlm_ctxt *dlm)
struct domain_join_ctxt *ctxt;
enum dlm_query_join_response_code response = JOIN_DISALLOW;
- mlog_entry("%p", dlm);
+ mlog(0, "%p", dlm);
ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
if (!ctxt) {
diff --git a/fs/ocfs2/dlm/dlmlock.c b/fs/ocfs2/dlm/dlmlock.c
index 7009292..8d39e0fd6 100644
--- a/fs/ocfs2/dlm/dlmlock.c
+++ b/fs/ocfs2/dlm/dlmlock.c
@@ -128,7 +128,7 @@ static enum dlm_status dlmlock_master(struct dlm_ctxt *dlm,
int call_ast = 0, kick_thread = 0;
enum dlm_status status = DLM_NORMAL;
- mlog_entry("type=%d\n", lock->ml.type);
+ mlog(0, "type=%d\n", lock->ml.type);
spin_lock(&res->spinlock);
/* if called from dlm_create_lock_handler, need to
@@ -227,8 +227,8 @@ static enum dlm_status dlmlock_remote(struct dlm_ctxt *dlm,
enum dlm_status status = DLM_DENIED;
int lockres_changed = 1;
- mlog_entry("type=%d\n", lock->ml.type);
- mlog(0, "lockres %.*s, flags = 0x%x\n", res->lockname.len,
+ mlog(0, "type=%d, lockres %.*s, flags = 0x%x\n",
+ lock->ml.type, res->lockname.len,
res->lockname.name, flags);
spin_lock(&res->spinlock);
@@ -308,8 +308,6 @@ static enum dlm_status dlm_send_remote_lock_request(struct dlm_ctxt *dlm,
int tmpret, status = 0;
enum dlm_status ret;
- mlog_entry_void();
-
memset(&create, 0, sizeof(create));
create.node_idx = dlm->node_num;
create.requested_type = lock->ml.type;
@@ -477,8 +475,6 @@ int dlm_create_lock_handler(struct o2net_msg *msg, u32 len, void *data,
BUG_ON(!dlm);
- mlog_entry_void();
-
if (!dlm_grab(dlm))
return DLM_REJECTED;
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index 59f0f6b..fede57e 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -426,8 +426,6 @@ static void dlm_mle_release(struct kref *kref)
struct dlm_master_list_entry *mle;
struct dlm_ctxt *dlm;
- mlog_entry_void();
-
mle = container_of(kref, struct dlm_master_list_entry, mle_refs);
dlm = mle->dlm;
@@ -810,7 +808,7 @@ lookup:
dlm_mle_detach_hb_events(dlm, mle);
dlm_put_mle(mle);
mle = NULL;
- /* this is lame, but we cant wait on either
+ /* this is lame, but we can't wait on either
* the mle or lockres waitqueue here */
if (mig)
msleep(100);
@@ -845,7 +843,7 @@ lookup:
/* finally add the lockres to its hash bucket */
__dlm_insert_lockres(dlm, res);
- /* since this lockres is new it doesnt not require the spinlock */
+ /* since this lockres is new it doesn't not require the spinlock */
dlm_lockres_grab_inflight_ref_new(dlm, res);
/* if this node does not become the master make sure to drop
@@ -3120,8 +3118,6 @@ static int dlm_add_migration_mle(struct dlm_ctxt *dlm,
*oldmle = NULL;
- mlog_entry_void();
-
assert_spin_locked(&dlm->spinlock);
assert_spin_locked(&dlm->master_lock);
@@ -3261,7 +3257,7 @@ void dlm_clean_master_list(struct dlm_ctxt *dlm, u8 dead_node)
struct hlist_node *list;
unsigned int i;
- mlog_entry("dlm=%s, dead node=%u\n", dlm->name, dead_node);
+ mlog(0, "dlm=%s, dead node=%u\n", dlm->name, dead_node);
top:
assert_spin_locked(&dlm->spinlock);
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index aaaffbc..f1beb6f 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -727,7 +727,6 @@ static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node)
if (destroy)
dlm_destroy_recovery_area(dlm, dead_node);
- mlog_exit(status);
return status;
}
@@ -1496,9 +1495,9 @@ leave:
kfree(buf);
if (item)
kfree(item);
+ mlog_errno(ret);
}
- mlog_exit(ret);
return ret;
}
@@ -1567,7 +1566,6 @@ leave:
dlm_lockres_put(res);
}
kfree(data);
- mlog_exit(ret);
}
@@ -1986,7 +1984,6 @@ leave:
dlm_lock_put(newlock);
}
- mlog_exit(ret);
return ret;
}
@@ -2083,8 +2080,6 @@ static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
struct hlist_head *bucket;
struct dlm_lock_resource *res, *next;
- mlog_entry_void();
-
assert_spin_locked(&dlm->spinlock);
list_for_each_entry_safe(res, next, &dlm->reco.resources, recovering) {
@@ -2607,8 +2602,6 @@ static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node)
int nodenum;
int status;
- mlog_entry("%u\n", dead_node);
-
mlog(0, "%s: dead node is %u\n", dlm->name, dead_node);
spin_lock(&dlm->spinlock);
diff --git a/fs/ocfs2/dlm/dlmunlock.c b/fs/ocfs2/dlm/dlmunlock.c
index 817287c..850aa7e 100644
--- a/fs/ocfs2/dlm/dlmunlock.c
+++ b/fs/ocfs2/dlm/dlmunlock.c
@@ -317,7 +317,7 @@ static enum dlm_status dlm_send_remote_unlock_request(struct dlm_ctxt *dlm,
struct kvec vec[2];
size_t veclen = 1;
- mlog_entry("%.*s\n", res->lockname.len, res->lockname.name);
+ mlog(0, "%.*s\n", res->lockname.len, res->lockname.name);
if (owner == dlm->node_num) {
/* ended up trying to contact ourself. this means
@@ -588,8 +588,6 @@ enum dlm_status dlmunlock(struct dlm_ctxt *dlm, struct dlm_lockstatus *lksb,
struct dlm_lock *lock = NULL;
int call_ast, is_master;
- mlog_entry_void();
-
if (!lksb) {
dlm_error(DLM_BADARGS);
return DLM_BADARGS;
diff --git a/fs/ocfs2/dlmfs/Makefile b/fs/ocfs2/dlmfs/Makefile
index df69b48..f14be89 100644
--- a/fs/ocfs2/dlmfs/Makefile
+++ b/fs/ocfs2/dlmfs/Makefile
@@ -1,4 +1,4 @@
-EXTRA_CFLAGS += -Ifs/ocfs2
+ccflags-y := -Ifs/ocfs2
obj-$(CONFIG_OCFS2_FS) += ocfs2_dlmfs.o
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index e8d94d7..7642d7c 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -64,7 +64,7 @@ struct ocfs2_mask_waiter {
unsigned long mw_mask;
unsigned long mw_goal;
#ifdef CONFIG_OCFS2_FS_STATS
- unsigned long long mw_lock_start;
+ ktime_t mw_lock_start;
#endif
};
@@ -397,8 +397,6 @@ static void ocfs2_build_lock_name(enum ocfs2_lock_type type,
{
int len;
- mlog_entry_void();
-
BUG_ON(type >= OCFS2_NUM_LOCK_TYPES);
len = snprintf(name, OCFS2_LOCK_ID_MAX_LEN, "%c%s%016llx%08x",
@@ -408,8 +406,6 @@ static void ocfs2_build_lock_name(enum ocfs2_lock_type type,
BUG_ON(len != (OCFS2_LOCK_ID_MAX_LEN - 1));
mlog(0, "built lock resource with name: %s\n", name);
-
- mlog_exit_void();
}
static DEFINE_SPINLOCK(ocfs2_dlm_tracking_lock);
@@ -435,44 +431,41 @@ static void ocfs2_remove_lockres_tracking(struct ocfs2_lock_res *res)
#ifdef CONFIG_OCFS2_FS_STATS
static void ocfs2_init_lock_stats(struct ocfs2_lock_res *res)
{
- res->l_lock_num_prmode = 0;
- res->l_lock_num_prmode_failed = 0;
- res->l_lock_total_prmode = 0;
- res->l_lock_max_prmode = 0;
- res->l_lock_num_exmode = 0;
- res->l_lock_num_exmode_failed = 0;
- res->l_lock_total_exmode = 0;
- res->l_lock_max_exmode = 0;
res->l_lock_refresh = 0;
+ memset(&res->l_lock_prmode, 0, sizeof(struct ocfs2_lock_stats));
+ memset(&res->l_lock_exmode, 0, sizeof(struct ocfs2_lock_stats));
}
static void ocfs2_update_lock_stats(struct ocfs2_lock_res *res, int level,
struct ocfs2_mask_waiter *mw, int ret)
{
- unsigned long long *num, *sum;
- unsigned int *max, *failed;
- struct timespec ts = current_kernel_time();
- unsigned long long time = timespec_to_ns(&ts) - mw->mw_lock_start;
-
- if (level == LKM_PRMODE) {
- num = &res->l_lock_num_prmode;
- sum = &res->l_lock_total_prmode;
- max = &res->l_lock_max_prmode;
- failed = &res->l_lock_num_prmode_failed;
- } else if (level == LKM_EXMODE) {
- num = &res->l_lock_num_exmode;
- sum = &res->l_lock_total_exmode;
- max = &res->l_lock_max_exmode;
- failed = &res->l_lock_num_exmode_failed;
- } else
+ u32 usec;
+ ktime_t kt;
+ struct ocfs2_lock_stats *stats;
+
+ if (level == LKM_PRMODE)
+ stats = &res->l_lock_prmode;
+ else if (level == LKM_EXMODE)
+ stats = &res->l_lock_exmode;
+ else
return;
- (*num)++;
- (*sum) += time;
- if (time > *max)
- *max = time;
+ kt = ktime_sub(ktime_get(), mw->mw_lock_start);
+ usec = ktime_to_us(kt);
+
+ stats->ls_gets++;
+ stats->ls_total += ktime_to_ns(kt);
+ /* overflow */
+ if (unlikely(stats->ls_gets) == 0) {
+ stats->ls_gets++;
+ stats->ls_total = ktime_to_ns(kt);
+ }
+
+ if (stats->ls_max < usec)
+ stats->ls_max = usec;
+
if (ret)
- (*failed)++;
+ stats->ls_fail++;
}
static inline void ocfs2_track_lock_refresh(struct ocfs2_lock_res *lockres)
@@ -482,8 +475,7 @@ static inline void ocfs2_track_lock_refresh(struct ocfs2_lock_res *lockres)
static inline void ocfs2_init_start_time(struct ocfs2_mask_waiter *mw)
{
- struct timespec ts = current_kernel_time();
- mw->mw_lock_start = timespec_to_ns(&ts);
+ mw->mw_lock_start = ktime_get();
}
#else
static inline void ocfs2_init_lock_stats(struct ocfs2_lock_res *res)
@@ -729,8 +721,6 @@ void ocfs2_refcount_lock_res_init(struct ocfs2_lock_res *lockres,
void ocfs2_lock_res_free(struct ocfs2_lock_res *res)
{
- mlog_entry_void();
-
if (!(res->l_flags & OCFS2_LOCK_INITIALIZED))
return;
@@ -756,14 +746,11 @@ void ocfs2_lock_res_free(struct ocfs2_lock_res *res)
memset(&res->l_lksb, 0, sizeof(res->l_lksb));
res->l_flags = 0UL;
- mlog_exit_void();
}
static inline void ocfs2_inc_holders(struct ocfs2_lock_res *lockres,
int level)
{
- mlog_entry_void();
-
BUG_ON(!lockres);
switch(level) {
@@ -776,15 +763,11 @@ static inline void ocfs2_inc_holders(struct ocfs2_lock_res *lockres,
default:
BUG();
}
-
- mlog_exit_void();
}
static inline void ocfs2_dec_holders(struct ocfs2_lock_res *lockres,
int level)
{
- mlog_entry_void();
-
BUG_ON(!lockres);
switch(level) {
@@ -799,7 +782,6 @@ static inline void ocfs2_dec_holders(struct ocfs2_lock_res *lockres,
default:
BUG();
}
- mlog_exit_void();
}
/* WARNING: This function lives in a world where the only three lock
@@ -846,8 +828,6 @@ static void lockres_clear_flags(struct ocfs2_lock_res *lockres,
static inline void ocfs2_generic_handle_downconvert_action(struct ocfs2_lock_res *lockres)
{
- mlog_entry_void();
-
BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY));
BUG_ON(!(lockres->l_flags & OCFS2_LOCK_ATTACHED));
BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED));
@@ -860,14 +840,10 @@ static inline void ocfs2_generic_handle_downconvert_action(struct ocfs2_lock_res
lockres_clear_flags(lockres, OCFS2_LOCK_BLOCKED);
}
lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
-
- mlog_exit_void();
}
static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lockres)
{
- mlog_entry_void();
-
BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY));
BUG_ON(!(lockres->l_flags & OCFS2_LOCK_ATTACHED));
@@ -889,14 +865,10 @@ static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lo
lockres_or_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
-
- mlog_exit_void();
}
static inline void ocfs2_generic_handle_attach_action(struct ocfs2_lock_res *lockres)
{
- mlog_entry_void();
-
BUG_ON((!(lockres->l_flags & OCFS2_LOCK_BUSY)));
BUG_ON(lockres->l_flags & OCFS2_LOCK_ATTACHED);
@@ -908,15 +880,12 @@ static inline void ocfs2_generic_handle_attach_action(struct ocfs2_lock_res *loc
lockres->l_level = lockres->l_requested;
lockres_or_flags(lockres, OCFS2_LOCK_ATTACHED);
lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
-
- mlog_exit_void();
}
static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres,
int level)
{
int needs_downconvert = 0;
- mlog_entry_void();
assert_spin_locked(&lockres->l_lock);
@@ -938,8 +907,7 @@ static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres,
if (needs_downconvert)
lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED);
-
- mlog_exit(needs_downconvert);
+ mlog(0, "needs_downconvert = %d\n", needs_downconvert);
return needs_downconvert;
}
@@ -1151,8 +1119,6 @@ static void ocfs2_unlock_ast(struct ocfs2_dlm_lksb *lksb, int error)
struct ocfs2_lock_res *lockres = ocfs2_lksb_to_lock_res(lksb);
unsigned long flags;
- mlog_entry_void();
-
mlog(ML_BASTS, "UNLOCK AST fired for lockres %s, action = %d\n",
lockres->l_name, lockres->l_unlock_action);
@@ -1162,7 +1128,6 @@ static void ocfs2_unlock_ast(struct ocfs2_dlm_lksb *lksb, int error)
"unlock_action %d\n", error, lockres->l_name,
lockres->l_unlock_action);
spin_unlock_irqrestore(&lockres->l_lock, flags);
- mlog_exit_void();
return;
}
@@ -1186,8 +1151,6 @@ static void ocfs2_unlock_ast(struct ocfs2_dlm_lksb *lksb, int error)
lockres->l_unlock_action = OCFS2_UNLOCK_INVALID;
wake_up(&lockres->l_event);
spin_unlock_irqrestore(&lockres->l_lock, flags);
-
- mlog_exit_void();
}
/*
@@ -1233,7 +1196,6 @@ static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres,
{
unsigned long flags;
- mlog_entry_void();
spin_lock_irqsave(&lockres->l_lock, flags);
lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
lockres_clear_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
@@ -1244,7 +1206,6 @@ static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres,
spin_unlock_irqrestore(&lockres->l_lock, flags);
wake_up(&lockres->l_event);
- mlog_exit_void();
}
/* Note: If we detect another process working on the lock (i.e.,
@@ -1260,8 +1221,6 @@ static int ocfs2_lock_create(struct ocfs2_super *osb,
unsigned long flags;
unsigned int gen;
- mlog_entry_void();
-
mlog(0, "lock %s, level = %d, flags = %u\n", lockres->l_name, level,
dlm_flags);
@@ -1293,7 +1252,6 @@ static int ocfs2_lock_create(struct ocfs2_super *osb,
mlog(0, "lock %s, return from ocfs2_dlm_lock\n", lockres->l_name);
bail:
- mlog_exit(ret);
return ret;
}
@@ -1416,8 +1374,6 @@ static int __ocfs2_cluster_lock(struct ocfs2_super *osb,
unsigned int gen;
int noqueue_attempted = 0;
- mlog_entry_void();
-
ocfs2_init_mask_waiter(&mw);
if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB)
@@ -1583,7 +1539,6 @@ out:
caller_ip);
}
#endif
- mlog_exit(ret);
return ret;
}
@@ -1605,7 +1560,6 @@ static void __ocfs2_cluster_unlock(struct ocfs2_super *osb,
{
unsigned long flags;
- mlog_entry_void();
spin_lock_irqsave(&lockres->l_lock, flags);
ocfs2_dec_holders(lockres, level);
ocfs2_downconvert_on_unlock(osb, lockres);
@@ -1614,7 +1568,6 @@ static void __ocfs2_cluster_unlock(struct ocfs2_super *osb,
if (lockres->l_lockdep_map.key != NULL)
rwsem_release(&lockres->l_lockdep_map, 1, caller_ip);
#endif
- mlog_exit_void();
}
static int ocfs2_create_new_lock(struct ocfs2_super *osb,
@@ -1648,8 +1601,6 @@ int ocfs2_create_new_inode_locks(struct inode *inode)
BUG_ON(!inode);
BUG_ON(!ocfs2_inode_is_new(inode));
- mlog_entry_void();
-
mlog(0, "Inode %llu\n", (unsigned long long)OCFS2_I(inode)->ip_blkno);
/* NOTE: That we don't increment any of the holder counts, nor
@@ -1683,7 +1634,6 @@ int ocfs2_create_new_inode_locks(struct inode *inode)
}
bail:
- mlog_exit(ret);
return ret;
}
@@ -1695,16 +1645,12 @@ int ocfs2_rw_lock(struct inode *inode, int write)
BUG_ON(!inode);
- mlog_entry_void();
-
mlog(0, "inode %llu take %s RW lock\n",
(unsigned long long)OCFS2_I(inode)->ip_blkno,
write ? "EXMODE" : "PRMODE");
- if (ocfs2_mount_local(osb)) {
- mlog_exit(0);
+ if (ocfs2_mount_local(osb))
return 0;
- }
lockres = &OCFS2_I(inode)->ip_rw_lockres;
@@ -1715,7 +1661,6 @@ int ocfs2_rw_lock(struct inode *inode, int write)
if (status < 0)
mlog_errno(status);
- mlog_exit(status);
return status;
}
@@ -1725,16 +1670,12 @@ void ocfs2_rw_unlock(struct inode *inode, int write)
struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_rw_lockres;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
- mlog_entry_void();
-
mlog(0, "inode %llu drop %s RW lock\n",
(unsigned long long)OCFS2_I(inode)->ip_blkno,
write ? "EXMODE" : "PRMODE");
if (!ocfs2_mount_local(osb))
ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level);
-
- mlog_exit_void();
}
/*
@@ -1748,8 +1689,6 @@ int ocfs2_open_lock(struct inode *inode)
BUG_ON(!inode);
- mlog_entry_void();
-
mlog(0, "inode %llu take PRMODE open lock\n",
(unsigned long long)OCFS2_I(inode)->ip_blkno);
@@ -1764,7 +1703,6 @@ int ocfs2_open_lock(struct inode *inode)
mlog_errno(status);
out:
- mlog_exit(status);
return status;
}
@@ -1776,8 +1714,6 @@ int ocfs2_try_open_lock(struct inode *inode, int write)
BUG_ON(!inode);
- mlog_entry_void();
-
mlog(0, "inode %llu try to take %s open lock\n",
(unsigned long long)OCFS2_I(inode)->ip_blkno,
write ? "EXMODE" : "PRMODE");
@@ -1799,7 +1735,6 @@ int ocfs2_try_open_lock(struct inode *inode, int write)
level, DLM_LKF_NOQUEUE, 0);
out:
- mlog_exit(status);
return status;
}
@@ -1811,8 +1746,6 @@ void ocfs2_open_unlock(struct inode *inode)
struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_open_lockres;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
- mlog_entry_void();
-
mlog(0, "inode %llu drop open lock\n",
(unsigned long long)OCFS2_I(inode)->ip_blkno);
@@ -1827,7 +1760,7 @@ void ocfs2_open_unlock(struct inode *inode)
DLM_LOCK_EX);
out:
- mlog_exit_void();
+ return;
}
static int ocfs2_flock_handle_signal(struct ocfs2_lock_res *lockres,
@@ -2043,8 +1976,6 @@ static void ocfs2_downconvert_on_unlock(struct ocfs2_super *osb,
{
int kick = 0;
- mlog_entry_void();
-
/* If we know that another node is waiting on our lock, kick
* the downconvert thread * pre-emptively when we reach a release
* condition. */
@@ -2065,8 +1996,6 @@ static void ocfs2_downconvert_on_unlock(struct ocfs2_super *osb,
if (kick)
ocfs2_wake_downconvert_thread(osb);
-
- mlog_exit_void();
}
#define OCFS2_SEC_BITS 34
@@ -2095,8 +2024,6 @@ static void __ocfs2_stuff_meta_lvb(struct inode *inode)
struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres;
struct ocfs2_meta_lvb *lvb;
- mlog_entry_void();
-
lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
/*
@@ -2128,8 +2055,6 @@ static void __ocfs2_stuff_meta_lvb(struct inode *inode)
out:
mlog_meta_lvb(0, lockres);
-
- mlog_exit_void();
}
static void ocfs2_unpack_timespec(struct timespec *spec,
@@ -2145,8 +2070,6 @@ static void ocfs2_refresh_inode_from_lvb(struct inode *inode)
struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres;
struct ocfs2_meta_lvb *lvb;
- mlog_entry_void();
-
mlog_meta_lvb(0, lockres);
lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
@@ -2177,8 +2100,6 @@ static void ocfs2_refresh_inode_from_lvb(struct inode *inode)
ocfs2_unpack_timespec(&inode->i_ctime,
be64_to_cpu(lvb->lvb_ictime_packed));
spin_unlock(&oi->ip_lock);
-
- mlog_exit_void();
}
static inline int ocfs2_meta_lvb_is_trustable(struct inode *inode,
@@ -2205,8 +2126,6 @@ static int ocfs2_should_refresh_lock_res(struct ocfs2_lock_res *lockres)
unsigned long flags;
int status = 0;
- mlog_entry_void();
-
refresh_check:
spin_lock_irqsave(&lockres->l_lock, flags);
if (!(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH)) {
@@ -2227,7 +2146,7 @@ refresh_check:
status = 1;
bail:
- mlog_exit(status);
+ mlog(0, "status %d\n", status);
return status;
}
@@ -2237,7 +2156,6 @@ static inline void ocfs2_complete_lock_res_refresh(struct ocfs2_lock_res *lockre
int status)
{
unsigned long flags;
- mlog_entry_void();
spin_lock_irqsave(&lockres->l_lock, flags);
lockres_clear_flags(lockres, OCFS2_LOCK_REFRESHING);
@@ -2246,8 +2164,6 @@ static inline void ocfs2_complete_lock_res_refresh(struct ocfs2_lock_res *lockre
spin_unlock_irqrestore(&lockres->l_lock, flags);
wake_up(&lockres->l_event);
-
- mlog_exit_void();
}
/* may or may not return a bh if it went to disk. */
@@ -2260,8 +2176,6 @@ static int ocfs2_inode_lock_update(struct inode *inode,
struct ocfs2_dinode *fe;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
- mlog_entry_void();
-
if (ocfs2_mount_local(osb))
goto bail;
@@ -2330,7 +2244,6 @@ static int ocfs2_inode_lock_update(struct inode *inode,
bail_refresh:
ocfs2_complete_lock_res_refresh(lockres, status);
bail:
- mlog_exit(status);
return status;
}
@@ -2374,8 +2287,6 @@ int ocfs2_inode_lock_full_nested(struct inode *inode,
BUG_ON(!inode);
- mlog_entry_void();
-
mlog(0, "inode %llu, take %s META lock\n",
(unsigned long long)OCFS2_I(inode)->ip_blkno,
ex ? "EXMODE" : "PRMODE");
@@ -2467,7 +2378,6 @@ bail:
if (local_bh)
brelse(local_bh);
- mlog_exit(status);
return status;
}
@@ -2517,7 +2427,6 @@ int ocfs2_inode_lock_atime(struct inode *inode,
{
int ret;
- mlog_entry_void();
ret = ocfs2_inode_lock(inode, NULL, 0);
if (ret < 0) {
mlog_errno(ret);
@@ -2545,7 +2454,6 @@ int ocfs2_inode_lock_atime(struct inode *inode,
} else
*level = 0;
- mlog_exit(ret);
return ret;
}
@@ -2556,8 +2464,6 @@ void ocfs2_inode_unlock(struct inode *inode,
struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_inode_lockres;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
- mlog_entry_void();
-
mlog(0, "inode %llu drop %s META lock\n",
(unsigned long long)OCFS2_I(inode)->ip_blkno,
ex ? "EXMODE" : "PRMODE");
@@ -2565,8 +2471,6 @@ void ocfs2_inode_unlock(struct inode *inode,
if (!ocfs2_is_hard_readonly(OCFS2_SB(inode->i_sb)) &&
!ocfs2_mount_local(osb))
ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level);
-
- mlog_exit_void();
}
int ocfs2_orphan_scan_lock(struct ocfs2_super *osb, u32 *seqno)
@@ -2617,8 +2521,6 @@ int ocfs2_super_lock(struct ocfs2_super *osb,
int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
struct ocfs2_lock_res *lockres = &osb->osb_super_lockres;
- mlog_entry_void();
-
if (ocfs2_is_hard_readonly(osb))
return -EROFS;
@@ -2650,7 +2552,6 @@ int ocfs2_super_lock(struct ocfs2_super *osb,
ocfs2_track_lock_refresh(lockres);
}
bail:
- mlog_exit(status);
return status;
}
@@ -2869,8 +2770,15 @@ static void *ocfs2_dlm_seq_next(struct seq_file *m, void *v, loff_t *pos)
return iter;
}
-/* So that debugfs.ocfs2 can determine which format is being used */
-#define OCFS2_DLM_DEBUG_STR_VERSION 2
+/*
+ * Version is used by debugfs.ocfs2 to determine the format being used
+ *
+ * New in version 2
+ * - Lock stats printed
+ * New in version 3
+ * - Max time in lock stats is in usecs (instead of nsecs)
+ */
+#define OCFS2_DLM_DEBUG_STR_VERSION 3
static int ocfs2_dlm_seq_show(struct seq_file *m, void *v)
{
int i;
@@ -2912,18 +2820,18 @@ static int ocfs2_dlm_seq_show(struct seq_file *m, void *v)
seq_printf(m, "0x%x\t", lvb[i]);
#ifdef CONFIG_OCFS2_FS_STATS
-# define lock_num_prmode(_l) (_l)->l_lock_num_prmode
-# define lock_num_exmode(_l) (_l)->l_lock_num_exmode
-# define lock_num_prmode_failed(_l) (_l)->l_lock_num_prmode_failed
-# define lock_num_exmode_failed(_l) (_l)->l_lock_num_exmode_failed
-# define lock_total_prmode(_l) (_l)->l_lock_total_prmode
-# define lock_total_exmode(_l) (_l)->l_lock_total_exmode
-# define lock_max_prmode(_l) (_l)->l_lock_max_prmode
-# define lock_max_exmode(_l) (_l)->l_lock_max_exmode
-# define lock_refresh(_l) (_l)->l_lock_refresh
+# define lock_num_prmode(_l) ((_l)->l_lock_prmode.ls_gets)
+# define lock_num_exmode(_l) ((_l)->l_lock_exmode.ls_gets)
+# define lock_num_prmode_failed(_l) ((_l)->l_lock_prmode.ls_fail)
+# define lock_num_exmode_failed(_l) ((_l)->l_lock_exmode.ls_fail)
+# define lock_total_prmode(_l) ((_l)->l_lock_prmode.ls_total)
+# define lock_total_exmode(_l) ((_l)->l_lock_exmode.ls_total)
+# define lock_max_prmode(_l) ((_l)->l_lock_prmode.ls_max)
+# define lock_max_exmode(_l) ((_l)->l_lock_exmode.ls_max)
+# define lock_refresh(_l) ((_l)->l_lock_refresh)
#else
-# define lock_num_prmode(_l) (0ULL)
-# define lock_num_exmode(_l) (0ULL)
+# define lock_num_prmode(_l) (0)
+# define lock_num_exmode(_l) (0)
# define lock_num_prmode_failed(_l) (0)
# define lock_num_exmode_failed(_l) (0)
# define lock_total_prmode(_l) (0ULL)
@@ -2933,8 +2841,8 @@ static int ocfs2_dlm_seq_show(struct seq_file *m, void *v)
# define lock_refresh(_l) (0)
#endif
/* The following seq_print was added in version 2 of this output */
- seq_printf(m, "%llu\t"
- "%llu\t"
+ seq_printf(m, "%u\t"
+ "%u\t"
"%u\t"
"%u\t"
"%llu\t"
@@ -3054,8 +2962,6 @@ int ocfs2_dlm_init(struct ocfs2_super *osb)
int status = 0;
struct ocfs2_cluster_connection *conn = NULL;
- mlog_entry_void();
-
if (ocfs2_mount_local(osb)) {
osb->node_num = 0;
goto local;
@@ -3112,15 +3018,12 @@ bail:
kthread_stop(osb->dc_task);
}
- mlog_exit(status);
return status;
}
void ocfs2_dlm_shutdown(struct ocfs2_super *osb,
int hangup_pending)
{
- mlog_entry_void();
-
ocfs2_drop_osb_locks(osb);
/*
@@ -3143,8 +3046,6 @@ void ocfs2_dlm_shutdown(struct ocfs2_super *osb,
osb->cconn = NULL;
ocfs2_dlm_shutdown_debug(osb);
-
- mlog_exit_void();
}
static int ocfs2_drop_lock(struct ocfs2_super *osb,
@@ -3226,7 +3127,6 @@ static int ocfs2_drop_lock(struct ocfs2_super *osb,
ocfs2_wait_on_busy_lock(lockres);
out:
- mlog_exit(0);
return 0;
}
@@ -3284,8 +3184,6 @@ int ocfs2_drop_inode_locks(struct inode *inode)
{
int status, err;
- mlog_entry_void();
-
/* No need to call ocfs2_mark_lockres_freeing here -
* ocfs2_clear_inode has done it for us. */
@@ -3310,7 +3208,6 @@ int ocfs2_drop_inode_locks(struct inode *inode)
if (err < 0 && !status)
status = err;
- mlog_exit(status);
return status;
}
@@ -3352,8 +3249,6 @@ static int ocfs2_downconvert_lock(struct ocfs2_super *osb,
int ret;
u32 dlm_flags = DLM_LKF_CONVERT;
- mlog_entry_void();
-
mlog(ML_BASTS, "lockres %s, level %d => %d\n", lockres->l_name,
lockres->l_level, new_level);
@@ -3375,7 +3270,6 @@ static int ocfs2_downconvert_lock(struct ocfs2_super *osb,
ret = 0;
bail:
- mlog_exit(ret);
return ret;
}
@@ -3385,8 +3279,6 @@ static int ocfs2_prepare_cancel_convert(struct ocfs2_super *osb,
{
assert_spin_locked(&lockres->l_lock);
- mlog_entry_void();
-
if (lockres->l_unlock_action == OCFS2_UNLOCK_CANCEL_CONVERT) {
/* If we're already trying to cancel a lock conversion
* then just drop the spinlock and allow the caller to
@@ -3416,8 +3308,6 @@ static int ocfs2_cancel_convert(struct ocfs2_super *osb,
{
int ret;
- mlog_entry_void();
-
ret = ocfs2_dlm_unlock(osb->cconn, &lockres->l_lksb,
DLM_LKF_CANCEL);
if (ret) {
@@ -3427,7 +3317,6 @@ static int ocfs2_cancel_convert(struct ocfs2_super *osb,
mlog(ML_BASTS, "lockres %s\n", lockres->l_name);
- mlog_exit(ret);
return ret;
}
@@ -3443,8 +3332,6 @@ static int ocfs2_unblock_lock(struct ocfs2_super *osb,
int set_lvb = 0;
unsigned int gen;
- mlog_entry_void();
-
spin_lock_irqsave(&lockres->l_lock, flags);
recheck:
@@ -3619,14 +3506,14 @@ downconvert:
gen);
leave:
- mlog_exit(ret);
+ if (ret)
+ mlog_errno(ret);
return ret;
leave_requeue:
spin_unlock_irqrestore(&lockres->l_lock, flags);
ctl->requeue = 1;
- mlog_exit(0);
return 0;
}
@@ -3859,8 +3746,6 @@ static void ocfs2_set_qinfo_lvb(struct ocfs2_lock_res *lockres)
struct mem_dqinfo *info = sb_dqinfo(oinfo->dqi_gi.dqi_sb,
oinfo->dqi_gi.dqi_type);
- mlog_entry_void();
-
lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
lvb->lvb_version = OCFS2_QINFO_LVB_VERSION;
lvb->lvb_bgrace = cpu_to_be32(info->dqi_bgrace);
@@ -3869,8 +3754,6 @@ static void ocfs2_set_qinfo_lvb(struct ocfs2_lock_res *lockres)
lvb->lvb_blocks = cpu_to_be32(oinfo->dqi_gi.dqi_blocks);
lvb->lvb_free_blk = cpu_to_be32(oinfo->dqi_gi.dqi_free_blk);
lvb->lvb_free_entry = cpu_to_be32(oinfo->dqi_gi.dqi_free_entry);
-
- mlog_exit_void();
}
void ocfs2_qinfo_unlock(struct ocfs2_mem_dqinfo *oinfo, int ex)
@@ -3879,10 +3762,8 @@ void ocfs2_qinfo_unlock(struct ocfs2_mem_dqinfo *oinfo, int ex)
struct ocfs2_super *osb = OCFS2_SB(oinfo->dqi_gi.dqi_sb);
int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
- mlog_entry_void();
if (!ocfs2_is_hard_readonly(osb) && !ocfs2_mount_local(osb))
ocfs2_cluster_unlock(osb, lockres, level);
- mlog_exit_void();
}
static int ocfs2_refresh_qinfo(struct ocfs2_mem_dqinfo *oinfo)
@@ -3937,8 +3818,6 @@ int ocfs2_qinfo_lock(struct ocfs2_mem_dqinfo *oinfo, int ex)
int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
int status = 0;
- mlog_entry_void();
-
/* On RO devices, locking really isn't needed... */
if (ocfs2_is_hard_readonly(osb)) {
if (ex)
@@ -3961,7 +3840,6 @@ int ocfs2_qinfo_lock(struct ocfs2_mem_dqinfo *oinfo, int ex)
ocfs2_qinfo_unlock(oinfo, ex);
ocfs2_complete_lock_res_refresh(lockres, status);
bail:
- mlog_exit(status);
return status;
}
@@ -4007,8 +3885,6 @@ static void ocfs2_process_blocked_lock(struct ocfs2_super *osb,
* considered valid until we remove the OCFS2_LOCK_QUEUED
* flag. */
- mlog_entry_void();
-
BUG_ON(!lockres);
BUG_ON(!lockres->l_ops);
@@ -4042,15 +3918,11 @@ unqueue:
if (ctl.unblock_action != UNBLOCK_CONTINUE
&& lockres->l_ops->post_unlock)
lockres->l_ops->post_unlock(osb, lockres);
-
- mlog_exit_void();
}
static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb,
struct ocfs2_lock_res *lockres)
{
- mlog_entry_void();
-
assert_spin_locked(&lockres->l_lock);
if (lockres->l_flags & OCFS2_LOCK_FREEING) {
@@ -4071,8 +3943,6 @@ static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb,
osb->blocked_lock_count++;
}
spin_unlock(&osb->dc_task_lock);
-
- mlog_exit_void();
}
static void ocfs2_downconvert_thread_do_work(struct ocfs2_super *osb)
@@ -4080,8 +3950,6 @@ static void ocfs2_downconvert_thread_do_work(struct ocfs2_super *osb)
unsigned long processed;
struct ocfs2_lock_res *lockres;
- mlog_entry_void();
-
spin_lock(&osb->dc_task_lock);
/* grab this early so we know to try again if a state change and
* wake happens part-way through our work */
@@ -4105,8 +3973,6 @@ static void ocfs2_downconvert_thread_do_work(struct ocfs2_super *osb)
spin_lock(&osb->dc_task_lock);
}
spin_unlock(&osb->dc_task_lock);
-
- mlog_exit_void();
}
static int ocfs2_downconvert_thread_lists_empty(struct ocfs2_super *osb)
diff --git a/fs/ocfs2/export.c b/fs/ocfs2/export.c
index 254652a..745db42 100644
--- a/fs/ocfs2/export.c
+++ b/fs/ocfs2/export.c
@@ -26,7 +26,6 @@
#include <linux/fs.h>
#include <linux/types.h>
-#define MLOG_MASK_PREFIX ML_EXPORT
#include <cluster/masklog.h>
#include "ocfs2.h"
@@ -40,6 +39,7 @@
#include "buffer_head_io.h"
#include "suballoc.h"
+#include "ocfs2_trace.h"
struct ocfs2_inode_handle
{
@@ -56,10 +56,9 @@ static struct dentry *ocfs2_get_dentry(struct super_block *sb,
int status, set;
struct dentry *result;
- mlog_entry("(0x%p, 0x%p)\n", sb, handle);
+ trace_ocfs2_get_dentry_begin(sb, handle, (unsigned long long)blkno);
if (blkno == 0) {
- mlog(0, "nfs wants inode with blkno: 0\n");
result = ERR_PTR(-ESTALE);
goto bail;
}
@@ -83,6 +82,7 @@ static struct dentry *ocfs2_get_dentry(struct super_block *sb,
}
status = ocfs2_test_inode_bit(osb, blkno, &set);
+ trace_ocfs2_get_dentry_test_bit(status, set);
if (status < 0) {
if (status == -EINVAL) {
/*
@@ -90,18 +90,14 @@ static struct dentry *ocfs2_get_dentry(struct super_block *sb,
* as an inode, we return -ESTALE to be
* nice
*/
- mlog(0, "test inode bit failed %d\n", status);
status = -ESTALE;
- } else {
+ } else
mlog(ML_ERROR, "test inode bit failed %d\n", status);
- }
goto unlock_nfs_sync;
}
/* If the inode allocator bit is clear, this inode must be stale */
if (!set) {
- mlog(0, "inode %llu suballoc bit is clear\n",
- (unsigned long long)blkno);
status = -ESTALE;
goto unlock_nfs_sync;
}
@@ -114,8 +110,8 @@ unlock_nfs_sync:
check_err:
if (status < 0) {
if (status == -ESTALE) {
- mlog(0, "stale inode ino: %llu generation: %u\n",
- (unsigned long long)blkno, handle->ih_generation);
+ trace_ocfs2_get_dentry_stale((unsigned long long)blkno,
+ handle->ih_generation);
}
result = ERR_PTR(status);
goto bail;
@@ -130,8 +126,9 @@ check_err:
check_gen:
if (handle->ih_generation != inode->i_generation) {
iput(inode);
- mlog(0, "stale inode ino: %llu generation: %u\n",
- (unsigned long long)blkno, handle->ih_generation);
+ trace_ocfs2_get_dentry_generation((unsigned long long)blkno,
+ handle->ih_generation,
+ inode->i_generation);
result = ERR_PTR(-ESTALE);
goto bail;
}
@@ -141,7 +138,7 @@ check_gen:
mlog_errno(PTR_ERR(result));
bail:
- mlog_exit_ptr(result);
+ trace_ocfs2_get_dentry_end(result);
return result;
}
@@ -152,11 +149,8 @@ static struct dentry *ocfs2_get_parent(struct dentry *child)
struct dentry *parent;
struct inode *dir = child->d_inode;
- mlog_entry("(0x%p, '%.*s')\n", child,
- child->d_name.len, child->d_name.name);
-
- mlog(0, "find parent of directory %llu\n",
- (unsigned long long)OCFS2_I(dir)->ip_blkno);
+ trace_ocfs2_get_parent(child, child->d_name.len, child->d_name.name,
+ (unsigned long long)OCFS2_I(dir)->ip_blkno);
status = ocfs2_inode_lock(dir, NULL, 0);
if (status < 0) {
@@ -178,7 +172,7 @@ bail_unlock:
ocfs2_inode_unlock(dir, 0);
bail:
- mlog_exit_ptr(parent);
+ trace_ocfs2_get_parent_end(parent);
return parent;
}
@@ -193,9 +187,9 @@ static int ocfs2_encode_fh(struct dentry *dentry, u32 *fh_in, int *max_len,
u32 generation;
__le32 *fh = (__force __le32 *) fh_in;
- mlog_entry("(0x%p, '%.*s', 0x%p, %d, %d)\n", dentry,
- dentry->d_name.len, dentry->d_name.name,
- fh, len, connectable);
+ trace_ocfs2_encode_fh_begin(dentry, dentry->d_name.len,
+ dentry->d_name.name,
+ fh, len, connectable);
if (connectable && (len < 6)) {
*max_len = 6;
@@ -210,8 +204,7 @@ static int ocfs2_encode_fh(struct dentry *dentry, u32 *fh_in, int *max_len,
blkno = OCFS2_I(inode)->ip_blkno;
generation = inode->i_generation;
- mlog(0, "Encoding fh: blkno: %llu, generation: %u\n",
- (unsigned long long)blkno, generation);
+ trace_ocfs2_encode_fh_self((unsigned long long)blkno, generation);
len = 3;
fh[0] = cpu_to_le32((u32)(blkno >> 32));
@@ -236,14 +229,14 @@ static int ocfs2_encode_fh(struct dentry *dentry, u32 *fh_in, int *max_len,
len = 6;
type = 2;
- mlog(0, "Encoding parent: blkno: %llu, generation: %u\n",
- (unsigned long long)blkno, generation);
+ trace_ocfs2_encode_fh_parent((unsigned long long)blkno,
+ generation);
}
*max_len = len;
bail:
- mlog_exit(type);
+ trace_ocfs2_encode_fh_type(type);
return type;
}
diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c
index 09e3fdf..23457b4 100644
--- a/fs/ocfs2/extent_map.c
+++ b/fs/ocfs2/extent_map.c
@@ -28,7 +28,6 @@
#include <linux/types.h>
#include <linux/fiemap.h>
-#define MLOG_MASK_PREFIX ML_EXTENT_MAP
#include <cluster/masklog.h>
#include "ocfs2.h"
@@ -39,6 +38,7 @@
#include "inode.h"
#include "super.h"
#include "symlink.h"
+#include "ocfs2_trace.h"
#include "buffer_head_io.h"
@@ -841,10 +841,9 @@ int ocfs2_read_virt_blocks(struct inode *inode, u64 v_block, int nr,
u64 p_block, p_count;
int i, count, done = 0;
- mlog_entry("(inode = %p, v_block = %llu, nr = %d, bhs = %p, "
- "flags = %x, validate = %p)\n",
- inode, (unsigned long long)v_block, nr, bhs, flags,
- validate);
+ trace_ocfs2_read_virt_blocks(
+ inode, (unsigned long long)v_block, nr, bhs, flags,
+ validate);
if (((v_block + nr - 1) << inode->i_sb->s_blocksize_bits) >=
i_size_read(inode)) {
@@ -897,7 +896,6 @@ int ocfs2_read_virt_blocks(struct inode *inode, u64 v_block, int nr,
}
out:
- mlog_exit(rc);
return rc;
}
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index a665195..41565ae 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -38,7 +38,6 @@
#include <linux/quotaops.h>
#include <linux/blkdev.h>
-#define MLOG_MASK_PREFIX ML_INODE
#include <cluster/masklog.h>
#include "ocfs2.h"
@@ -61,6 +60,7 @@
#include "acl.h"
#include "quota.h"
#include "refcounttree.h"
+#include "ocfs2_trace.h"
#include "buffer_head_io.h"
@@ -99,8 +99,10 @@ static int ocfs2_file_open(struct inode *inode, struct file *file)
int mode = file->f_flags;
struct ocfs2_inode_info *oi = OCFS2_I(inode);
- mlog_entry("(0x%p, 0x%p, '%.*s')\n", inode, file,
- file->f_path.dentry->d_name.len, file->f_path.dentry->d_name.name);
+ trace_ocfs2_file_open(inode, file, file->f_path.dentry,
+ (unsigned long long)OCFS2_I(inode)->ip_blkno,
+ file->f_path.dentry->d_name.len,
+ file->f_path.dentry->d_name.name, mode);
if (file->f_mode & FMODE_WRITE)
dquot_initialize(inode);
@@ -135,7 +137,6 @@ static int ocfs2_file_open(struct inode *inode, struct file *file)
}
leave:
- mlog_exit(status);
return status;
}
@@ -143,19 +144,19 @@ static int ocfs2_file_release(struct inode *inode, struct file *file)
{
struct ocfs2_inode_info *oi = OCFS2_I(inode);
- mlog_entry("(0x%p, 0x%p, '%.*s')\n", inode, file,
- file->f_path.dentry->d_name.len,
- file->f_path.dentry->d_name.name);
-
spin_lock(&oi->ip_lock);
if (!--oi->ip_open_count)
oi->ip_flags &= ~OCFS2_INODE_OPEN_DIRECT;
+
+ trace_ocfs2_file_release(inode, file, file->f_path.dentry,
+ oi->ip_blkno,
+ file->f_path.dentry->d_name.len,
+ file->f_path.dentry->d_name.name,
+ oi->ip_open_count);
spin_unlock(&oi->ip_lock);
ocfs2_free_file_private(inode, file);
- mlog_exit(0);
-
return 0;
}
@@ -177,9 +178,11 @@ static int ocfs2_sync_file(struct file *file, int datasync)
struct inode *inode = file->f_mapping->host;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
- mlog_entry("(0x%p, %d, 0x%p, '%.*s')\n", file, datasync,
- file->f_path.dentry, file->f_path.dentry->d_name.len,
- file->f_path.dentry->d_name.name);
+ trace_ocfs2_sync_file(inode, file, file->f_path.dentry,
+ OCFS2_I(inode)->ip_blkno,
+ file->f_path.dentry->d_name.len,
+ file->f_path.dentry->d_name.name,
+ (unsigned long long)datasync);
if (datasync && !(inode->i_state & I_DIRTY_DATASYNC)) {
/*
@@ -195,7 +198,8 @@ static int ocfs2_sync_file(struct file *file, int datasync)
err = jbd2_journal_force_commit(journal);
bail:
- mlog_exit(err);
+ if (err)
+ mlog_errno(err);
return (err < 0) ? -EIO : 0;
}
@@ -251,8 +255,6 @@ int ocfs2_update_inode_atime(struct inode *inode,
handle_t *handle;
struct ocfs2_dinode *di = (struct ocfs2_dinode *) bh->b_data;
- mlog_entry_void();
-
handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
@@ -280,7 +282,6 @@ int ocfs2_update_inode_atime(struct inode *inode,
out_commit:
ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
out:
- mlog_exit(ret);
return ret;
}
@@ -291,7 +292,6 @@ static int ocfs2_set_inode_size(handle_t *handle,
{
int status;
- mlog_entry_void();
i_size_write(inode, new_i_size);
inode->i_blocks = ocfs2_inode_sector_count(inode);
inode->i_ctime = inode->i_mtime = CURRENT_TIME;
@@ -303,7 +303,6 @@ static int ocfs2_set_inode_size(handle_t *handle,
}
bail:
- mlog_exit(status);
return status;
}
@@ -375,8 +374,6 @@ static int ocfs2_orphan_for_truncate(struct ocfs2_super *osb,
struct ocfs2_dinode *di;
u64 cluster_bytes;
- mlog_entry_void();
-
/*
* We need to CoW the cluster contains the offset if it is reflinked
* since we will call ocfs2_zero_range_for_truncate later which will
@@ -429,8 +426,6 @@ static int ocfs2_orphan_for_truncate(struct ocfs2_super *osb,
out_commit:
ocfs2_commit_trans(osb, handle);
out:
-
- mlog_exit(status);
return status;
}
@@ -442,14 +437,14 @@ static int ocfs2_truncate_file(struct inode *inode,
struct ocfs2_dinode *fe = NULL;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
- mlog_entry("(inode = %llu, new_i_size = %llu\n",
- (unsigned long long)OCFS2_I(inode)->ip_blkno,
- (unsigned long long)new_i_size);
-
/* We trust di_bh because it comes from ocfs2_inode_lock(), which
* already validated it */
fe = (struct ocfs2_dinode *) di_bh->b_data;
+ trace_ocfs2_truncate_file((unsigned long long)OCFS2_I(inode)->ip_blkno,
+ (unsigned long long)le64_to_cpu(fe->i_size),
+ (unsigned long long)new_i_size);
+
mlog_bug_on_msg(le64_to_cpu(fe->i_size) != i_size_read(inode),
"Inode %llu, inode i_size = %lld != di "
"i_size = %llu, i_flags = 0x%x\n",
@@ -459,19 +454,14 @@ static int ocfs2_truncate_file(struct inode *inode,
le32_to_cpu(fe->i_flags));
if (new_i_size > le64_to_cpu(fe->i_size)) {
- mlog(0, "asked to truncate file with size (%llu) to size (%llu)!\n",
- (unsigned long long)le64_to_cpu(fe->i_size),
- (unsigned long long)new_i_size);
+ trace_ocfs2_truncate_file_error(
+ (unsigned long long)le64_to_cpu(fe->i_size),
+ (unsigned long long)new_i_size);
status = -EINVAL;
mlog_errno(status);
goto bail;
}
- mlog(0, "inode %llu, i_size = %llu, new_i_size = %llu\n",
- (unsigned long long)le64_to_cpu(fe->i_blkno),
- (unsigned long long)le64_to_cpu(fe->i_size),
- (unsigned long long)new_i_size);
-
/* lets handle the simple truncate cases before doing any more
* cluster locking. */
if (new_i_size == le64_to_cpu(fe->i_size))
@@ -525,7 +515,6 @@ bail:
if (!status && OCFS2_I(inode)->ip_clusters == 0)
status = ocfs2_try_remove_refcount_tree(inode, di_bh);
- mlog_exit(status);
return status;
}
@@ -578,8 +567,6 @@ static int __ocfs2_extend_allocation(struct inode *inode, u32 logical_start,
struct ocfs2_extent_tree et;
int did_quota = 0;
- mlog_entry("(clusters_to_add = %u)\n", clusters_to_add);
-
/*
* This function only exists for file systems which don't
* support holes.
@@ -596,11 +583,6 @@ static int __ocfs2_extend_allocation(struct inode *inode, u32 logical_start,
restart_all:
BUG_ON(le32_to_cpu(fe->i_clusters) != OCFS2_I(inode)->ip_clusters);
- mlog(0, "extend inode %llu, i_size = %lld, di->i_clusters = %u, "
- "clusters_to_add = %u\n",
- (unsigned long long)OCFS2_I(inode)->ip_blkno,
- (long long)i_size_read(inode), le32_to_cpu(fe->i_clusters),
- clusters_to_add);
ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), bh);
status = ocfs2_lock_allocators(inode, &et, clusters_to_add, 0,
&data_ac, &meta_ac);
@@ -620,6 +602,12 @@ restart_all:
}
restarted_transaction:
+ trace_ocfs2_extend_allocation(
+ (unsigned long long)OCFS2_I(inode)->ip_blkno,
+ (unsigned long long)i_size_read(inode),
+ le32_to_cpu(fe->i_clusters), clusters_to_add,
+ why, restart_func);
+
status = dquot_alloc_space_nodirty(inode,
ocfs2_clusters_to_bytes(osb->sb, clusters_to_add));
if (status)
@@ -666,13 +654,11 @@ restarted_transaction:
if (why != RESTART_NONE && clusters_to_add) {
if (why == RESTART_META) {
- mlog(0, "restarting function.\n");
restart_func = 1;
status = 0;
} else {
BUG_ON(why != RESTART_TRANS);
- mlog(0, "restarting transaction.\n");
/* TODO: This can be more intelligent. */
credits = ocfs2_calc_extend_credits(osb->sb,
&fe->id2.i_list,
@@ -689,11 +675,11 @@ restarted_transaction:
}
}
- mlog(0, "fe: i_clusters = %u, i_size=%llu\n",
+ trace_ocfs2_extend_allocation_end(OCFS2_I(inode)->ip_blkno,
le32_to_cpu(fe->i_clusters),
- (unsigned long long)le64_to_cpu(fe->i_size));
- mlog(0, "inode: ip_clusters=%u, i_size=%lld\n",
- OCFS2_I(inode)->ip_clusters, (long long)i_size_read(inode));
+ (unsigned long long)le64_to_cpu(fe->i_size),
+ OCFS2_I(inode)->ip_clusters,
+ (unsigned long long)i_size_read(inode));
leave:
if (status < 0 && did_quota)
@@ -718,7 +704,6 @@ leave:
brelse(bh);
bh = NULL;
- mlog_exit(status);
return status;
}
@@ -785,10 +770,11 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
if (!zero_to)
zero_to = PAGE_CACHE_SIZE;
- mlog(0,
- "abs_from = %llu, abs_to = %llu, index = %lu, zero_from = %u, zero_to = %u\n",
- (unsigned long long)abs_from, (unsigned long long)abs_to,
- index, zero_from, zero_to);
+ trace_ocfs2_write_zero_page(
+ (unsigned long long)OCFS2_I(inode)->ip_blkno,
+ (unsigned long long)abs_from,
+ (unsigned long long)abs_to,
+ index, zero_from, zero_to);
/* We know that zero_from is block aligned */
for (block_start = zero_from; block_start < zero_to;
@@ -928,9 +914,10 @@ static int ocfs2_zero_extend_range(struct inode *inode, u64 range_start,
u64 next_pos;
u64 zero_pos = range_start;
- mlog(0, "range_start = %llu, range_end = %llu\n",
- (unsigned long long)range_start,
- (unsigned long long)range_end);
+ trace_ocfs2_zero_extend_range(
+ (unsigned long long)OCFS2_I(inode)->ip_blkno,
+ (unsigned long long)range_start,
+ (unsigned long long)range_end);
BUG_ON(range_start >= range_end);
while (zero_pos < range_end) {
@@ -962,9 +949,9 @@ int ocfs2_zero_extend(struct inode *inode, struct buffer_head *di_bh,
struct super_block *sb = inode->i_sb;
zero_start = ocfs2_align_bytes_to_blocks(sb, i_size_read(inode));
- mlog(0, "zero_start %llu for i_size %llu\n",
- (unsigned long long)zero_start,
- (unsigned long long)i_size_read(inode));
+ trace_ocfs2_zero_extend((unsigned long long)OCFS2_I(inode)->ip_blkno,
+ (unsigned long long)zero_start,
+ (unsigned long long)i_size_read(inode));
while (zero_start < zero_to_size) {
ret = ocfs2_zero_extend_get_range(inode, di_bh, zero_start,
zero_to_size,
@@ -1113,30 +1100,20 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
struct dquot *transfer_to[MAXQUOTAS] = { };
int qtype;
- mlog_entry("(0x%p, '%.*s')\n", dentry,
- dentry->d_name.len, dentry->d_name.name);
+ trace_ocfs2_setattr(inode, dentry,
+ (unsigned long long)OCFS2_I(inode)->ip_blkno,
+ dentry->d_name.len, dentry->d_name.name,
+ attr->ia_valid, attr->ia_mode,
+ attr->ia_uid, attr->ia_gid);
/* ensuring we don't even attempt to truncate a symlink */
if (S_ISLNK(inode->i_mode))
attr->ia_valid &= ~ATTR_SIZE;
- if (attr->ia_valid & ATTR_MODE)
- mlog(0, "mode change: %d\n", attr->ia_mode);
- if (attr->ia_valid & ATTR_UID)
- mlog(0, "uid change: %d\n", attr->ia_uid);
- if (attr->ia_valid & ATTR_GID)
- mlog(0, "gid change: %d\n", attr->ia_gid);
- if (attr->ia_valid & ATTR_SIZE)
- mlog(0, "size change...\n");
- if (attr->ia_valid & (ATTR_ATIME | ATTR_MTIME | ATTR_CTIME))
- mlog(0, "time change...\n");
-
#define OCFS2_VALID_ATTRS (ATTR_ATIME | ATTR_MTIME | ATTR_CTIME | ATTR_SIZE \
| ATTR_GID | ATTR_UID | ATTR_MODE)
- if (!(attr->ia_valid & OCFS2_VALID_ATTRS)) {
- mlog(0, "can't handle attrs: 0x%x\n", attr->ia_valid);
+ if (!(attr->ia_valid & OCFS2_VALID_ATTRS))
return 0;
- }
status = inode_change_ok(inode, attr);
if (status)
@@ -1274,7 +1251,6 @@ bail:
mlog_errno(status);
}
- mlog_exit(status);
return status;
}
@@ -1287,8 +1263,6 @@ int ocfs2_getattr(struct vfsmount *mnt,
struct ocfs2_super *osb = sb->s_fs_info;
int err;
- mlog_entry_void();
-
err = ocfs2_inode_revalidate(dentry);
if (err) {
if (err != -ENOENT)
@@ -1302,8 +1276,6 @@ int ocfs2_getattr(struct vfsmount *mnt,
stat->blksize = osb->s_clustersize;
bail:
- mlog_exit(err);
-
return err;
}
@@ -1314,8 +1286,6 @@ int ocfs2_permission(struct inode *inode, int mask, unsigned int flags)
if (flags & IPERM_FLAG_RCU)
return -ECHILD;
- mlog_entry_void();
-
ret = ocfs2_inode_lock(inode, NULL, 0);
if (ret) {
if (ret != -ENOENT)
@@ -1327,7 +1297,6 @@ int ocfs2_permission(struct inode *inode, int mask, unsigned int flags)
ocfs2_inode_unlock(inode, 0);
out:
- mlog_exit(ret);
return ret;
}
@@ -1339,8 +1308,9 @@ static int __ocfs2_write_remove_suid(struct inode *inode,
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct ocfs2_dinode *di;
- mlog_entry("(Inode %llu, mode 0%o)\n",
- (unsigned long long)OCFS2_I(inode)->ip_blkno, inode->i_mode);
+ trace_ocfs2_write_remove_suid(
+ (unsigned long long)OCFS2_I(inode)->ip_blkno,
+ inode->i_mode);
handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
if (IS_ERR(handle)) {
@@ -1368,7 +1338,6 @@ static int __ocfs2_write_remove_suid(struct inode *inode,
out_trans:
ocfs2_commit_trans(osb, handle);
out:
- mlog_exit(ret);
return ret;
}
@@ -1547,8 +1516,9 @@ static int ocfs2_zero_partial_clusters(struct inode *inode,
* partial clusters here. There's no need to worry about
* physical allocation - the zeroing code knows to skip holes.
*/
- mlog(0, "byte start: %llu, end: %llu\n",
- (unsigned long long)start, (unsigned long long)end);
+ trace_ocfs2_zero_partial_clusters(
+ (unsigned long long)OCFS2_I(inode)->ip_blkno,
+ (unsigned long long)start, (unsigned long long)end);
/*
* If both edges are on a cluster boundary then there's no
@@ -1572,8 +1542,8 @@ static int ocfs2_zero_partial_clusters(struct inode *inode,
if (tmpend > end)
tmpend = end;
- mlog(0, "1st range: start: %llu, tmpend: %llu\n",
- (unsigned long long)start, (unsigned long long)tmpend);
+ trace_ocfs2_zero_partial_clusters_range1((unsigned long long)start,
+ (unsigned long long)tmpend);
ret = ocfs2_zero_range_for_truncate(inode, handle, start, tmpend);
if (ret)
@@ -1587,8 +1557,8 @@ static int ocfs2_zero_partial_clusters(struct inode *inode,
*/
start = end & ~(osb->s_clustersize - 1);
- mlog(0, "2nd range: start: %llu, end: %llu\n",
- (unsigned long long)start, (unsigned long long)end);
+ trace_ocfs2_zero_partial_clusters_range2(
+ (unsigned long long)start, (unsigned long long)end);
ret = ocfs2_zero_range_for_truncate(inode, handle, start, end);
if (ret)
@@ -1688,6 +1658,11 @@ static int ocfs2_remove_inode_range(struct inode *inode,
ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), di_bh);
ocfs2_init_dealloc_ctxt(&dealloc);
+ trace_ocfs2_remove_inode_range(
+ (unsigned long long)OCFS2_I(inode)->ip_blkno,
+ (unsigned long long)byte_start,
+ (unsigned long long)byte_len);
+
if (byte_len == 0)
return 0;
@@ -1734,11 +1709,6 @@ static int ocfs2_remove_inode_range(struct inode *inode,
trunc_end = (byte_start + byte_len) >> osb->s_clustersize_bits;
cluster_in_el = trunc_end;
- mlog(0, "Inode: %llu, start: %llu, len: %llu, cstart: %u, cend: %u\n",
- (unsigned long long)OCFS2_I(inode)->ip_blkno,
- (unsigned long long)byte_start,
- (unsigned long long)byte_len, trunc_start, trunc_end);
-
ret = ocfs2_zero_partial_clusters(inode, byte_start, byte_len);
if (ret) {
mlog_errno(ret);
@@ -2093,7 +2063,7 @@ static int ocfs2_prepare_inode_for_write(struct file *file,
int ret = 0, meta_level = 0;
struct dentry *dentry = file->f_path.dentry;
struct inode *inode = dentry->d_inode;
- loff_t saved_pos, end;
+ loff_t saved_pos = 0, end;
/*
* We start with a read level meta lock and only jump to an ex
@@ -2132,12 +2102,10 @@ static int ocfs2_prepare_inode_for_write(struct file *file,
/* work on a copy of ppos until we're sure that we won't have
* to recalculate it due to relocking. */
- if (appending) {
+ if (appending)
saved_pos = i_size_read(inode);
- mlog(0, "O_APPEND: inode->i_size=%llu\n", saved_pos);
- } else {
+ else
saved_pos = *ppos;
- }
end = saved_pos + count;
@@ -2208,6 +2176,10 @@ static int ocfs2_prepare_inode_for_write(struct file *file,
*ppos = saved_pos;
out_unlock:
+ trace_ocfs2_prepare_inode_for_write(OCFS2_I(inode)->ip_blkno,
+ saved_pos, appending, count,
+ direct_io, has_refcount);
+
if (meta_level >= 0)
ocfs2_inode_unlock(inode, meta_level);
@@ -2233,10 +2205,11 @@ static ssize_t ocfs2_file_aio_write(struct kiocb *iocb,
int full_coherency = !(osb->s_mount_opt &
OCFS2_MOUNT_COHERENCY_BUFFERED);
- mlog_entry("(0x%p, %u, '%.*s')\n", file,
- (unsigned int)nr_segs,
- file->f_path.dentry->d_name.len,
- file->f_path.dentry->d_name.name);
+ trace_ocfs2_file_aio_write(inode, file, file->f_path.dentry,
+ (unsigned long long)OCFS2_I(inode)->ip_blkno,
+ file->f_path.dentry->d_name.len,
+ file->f_path.dentry->d_name.name,
+ (unsigned int)nr_segs);
if (iocb->ki_left == 0)
return 0;
@@ -2402,7 +2375,6 @@ out_sems:
if (written)
ret = written;
- mlog_exit(ret);
return ret;
}
@@ -2438,10 +2410,11 @@ static ssize_t ocfs2_file_splice_write(struct pipe_inode_info *pipe,
.u.file = out,
};
- mlog_entry("(0x%p, 0x%p, %u, '%.*s')\n", out, pipe,
- (unsigned int)len,
- out->f_path.dentry->d_name.len,
- out->f_path.dentry->d_name.name);
+
+ trace_ocfs2_file_splice_write(inode, out, out->f_path.dentry,
+ (unsigned long long)OCFS2_I(inode)->ip_blkno,
+ out->f_path.dentry->d_name.len,
+ out->f_path.dentry->d_name.name, len);
if (pipe->inode)
mutex_lock_nested(&pipe->inode->i_mutex, I_MUTEX_PARENT);
@@ -2485,7 +2458,6 @@ static ssize_t ocfs2_file_splice_write(struct pipe_inode_info *pipe,
balance_dirty_pages_ratelimited_nr(mapping, nr_pages);
}
- mlog_exit(ret);
return ret;
}
@@ -2498,10 +2470,10 @@ static ssize_t ocfs2_file_splice_read(struct file *in,
int ret = 0, lock_level = 0;
struct inode *inode = in->f_path.dentry->d_inode;
- mlog_entry("(0x%p, 0x%p, %u, '%.*s')\n", in, pipe,
- (unsigned int)len,
- in->f_path.dentry->d_name.len,
- in->f_path.dentry->d_name.name);
+ trace_ocfs2_file_splice_read(inode, in, in->f_path.dentry,
+ (unsigned long long)OCFS2_I(inode)->ip_blkno,
+ in->f_path.dentry->d_name.len,
+ in->f_path.dentry->d_name.name, len);
/*
* See the comment in ocfs2_file_aio_read()
@@ -2516,7 +2488,6 @@ static ssize_t ocfs2_file_splice_read(struct file *in,
ret = generic_file_splice_read(in, ppos, pipe, len, flags);
bail:
- mlog_exit(ret);
return ret;
}
@@ -2529,10 +2500,11 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
struct file *filp = iocb->ki_filp;
struct inode *inode = filp->f_path.dentry->d_inode;
- mlog_entry("(0x%p, %u, '%.*s')\n", filp,
- (unsigned int)nr_segs,
- filp->f_path.dentry->d_name.len,
- filp->f_path.dentry->d_name.name);
+ trace_ocfs2_file_aio_read(inode, filp, filp->f_path.dentry,
+ (unsigned long long)OCFS2_I(inode)->ip_blkno,
+ filp->f_path.dentry->d_name.len,
+ filp->f_path.dentry->d_name.name, nr_segs);
+
if (!inode) {
ret = -EINVAL;
@@ -2578,8 +2550,7 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
ocfs2_inode_unlock(inode, lock_level);
ret = generic_file_aio_read(iocb, iov, nr_segs, iocb->ki_pos);
- if (ret == -EINVAL)
- mlog(0, "generic_file_aio_read returned -EINVAL\n");
+ trace_generic_file_aio_read_ret(ret);
/* buffered aio wouldn't have proper lock coverage today */
BUG_ON(ret == -EIOCBQUEUED && !(filp->f_flags & O_DIRECT));
@@ -2597,7 +2568,6 @@ bail:
}
if (rw_level != -1)
ocfs2_rw_unlock(inode, rw_level);
- mlog_exit(ret);
return ret;
}
diff --git a/fs/ocfs2/heartbeat.c b/fs/ocfs2/heartbeat.c
index 1aa863d..d8208b2 100644
--- a/fs/ocfs2/heartbeat.c
+++ b/fs/ocfs2/heartbeat.c
@@ -28,7 +28,6 @@
#include <linux/types.h>
#include <linux/highmem.h>
-#define MLOG_MASK_PREFIX ML_SUPER
#include <cluster/masklog.h>
#include "ocfs2.h"
@@ -37,6 +36,7 @@
#include "heartbeat.h"
#include "inode.h"
#include "journal.h"
+#include "ocfs2_trace.h"
#include "buffer_head_io.h"
@@ -66,7 +66,7 @@ void ocfs2_do_node_down(int node_num, void *data)
BUG_ON(osb->node_num == node_num);
- mlog(0, "ocfs2: node down event for %d\n", node_num);
+ trace_ocfs2_do_node_down(node_num);
if (!osb->cconn) {
/*
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c
index 4068c6c..b4c8bb6 100644
--- a/fs/ocfs2/inode.c
+++ b/fs/ocfs2/inode.c
@@ -31,7 +31,6 @@
#include <asm/byteorder.h>
-#define MLOG_MASK_PREFIX ML_INODE
#include <cluster/masklog.h>
#include "ocfs2.h"
@@ -53,6 +52,7 @@
#include "uptodate.h"
#include "xattr.h"
#include "refcounttree.h"
+#include "ocfs2_trace.h"
#include "buffer_head_io.h"
@@ -131,7 +131,8 @@ struct inode *ocfs2_iget(struct ocfs2_super *osb, u64 blkno, unsigned flags,
struct super_block *sb = osb->sb;
struct ocfs2_find_inode_args args;
- mlog_entry("(blkno = %llu)\n", (unsigned long long)blkno);
+ trace_ocfs2_iget_begin((unsigned long long)blkno, flags,
+ sysfile_type);
/* Ok. By now we've either got the offsets passed to us by the
* caller, or we just pulled them off the bh. Lets do some
@@ -152,16 +153,16 @@ struct inode *ocfs2_iget(struct ocfs2_super *osb, u64 blkno, unsigned flags,
/* inode was *not* in the inode cache. 2.6.x requires
* us to do our own read_inode call and unlock it
* afterwards. */
- if (inode && inode->i_state & I_NEW) {
- mlog(0, "Inode was not in inode cache, reading it.\n");
- ocfs2_read_locked_inode(inode, &args);
- unlock_new_inode(inode);
- }
if (inode == NULL) {
inode = ERR_PTR(-ENOMEM);
mlog_errno(PTR_ERR(inode));
goto bail;
}
+ trace_ocfs2_iget5_locked(inode->i_state);
+ if (inode->i_state & I_NEW) {
+ ocfs2_read_locked_inode(inode, &args);
+ unlock_new_inode(inode);
+ }
if (is_bad_inode(inode)) {
iput(inode);
inode = ERR_PTR(-ESTALE);
@@ -170,9 +171,8 @@ struct inode *ocfs2_iget(struct ocfs2_super *osb, u64 blkno, unsigned flags,
bail:
if (!IS_ERR(inode)) {
- mlog(0, "returning inode with number %llu\n",
- (unsigned long long)OCFS2_I(inode)->ip_blkno);
- mlog_exit_ptr(inode);
+ trace_ocfs2_iget_end(inode,
+ (unsigned long long)OCFS2_I(inode)->ip_blkno);
}
return inode;
@@ -192,18 +192,17 @@ static int ocfs2_find_actor(struct inode *inode, void *opaque)
struct ocfs2_inode_info *oi = OCFS2_I(inode);
int ret = 0;
- mlog_entry("(0x%p, %lu, 0x%p)\n", inode, inode->i_ino, opaque);
-
args = opaque;
mlog_bug_on_msg(!inode, "No inode in find actor!\n");
+ trace_ocfs2_find_actor(inode, inode->i_ino, opaque, args->fi_blkno);
+
if (oi->ip_blkno != args->fi_blkno)
goto bail;
ret = 1;
bail:
- mlog_exit(ret);
return ret;
}
@@ -218,8 +217,6 @@ static int ocfs2_init_locked_inode(struct inode *inode, void *opaque)
static struct lock_class_key ocfs2_quota_ip_alloc_sem_key,
ocfs2_file_ip_alloc_sem_key;
- mlog_entry("inode = %p, opaque = %p\n", inode, opaque);
-
inode->i_ino = args->fi_ino;
OCFS2_I(inode)->ip_blkno = args->fi_blkno;
if (args->fi_sysfile_type != 0)
@@ -235,7 +232,6 @@ static int ocfs2_init_locked_inode(struct inode *inode, void *opaque)
lockdep_set_class(&OCFS2_I(inode)->ip_alloc_sem,
&ocfs2_file_ip_alloc_sem_key);
- mlog_exit(0);
return 0;
}
@@ -246,9 +242,6 @@ void ocfs2_populate_inode(struct inode *inode, struct ocfs2_dinode *fe,
struct ocfs2_super *osb;
int use_plocks = 1;
- mlog_entry("(0x%p, size:%llu)\n", inode,
- (unsigned long long)le64_to_cpu(fe->i_size));
-
sb = inode->i_sb;
osb = OCFS2_SB(sb);
@@ -300,20 +293,20 @@ void ocfs2_populate_inode(struct inode *inode, struct ocfs2_dinode *fe,
inode->i_nlink = ocfs2_read_links_count(fe);
+ trace_ocfs2_populate_inode(OCFS2_I(inode)->ip_blkno,
+ le32_to_cpu(fe->i_flags));
if (fe->i_flags & cpu_to_le32(OCFS2_SYSTEM_FL)) {
OCFS2_I(inode)->ip_flags |= OCFS2_INODE_SYSTEM_FILE;
inode->i_flags |= S_NOQUOTA;
}
-
+
if (fe->i_flags & cpu_to_le32(OCFS2_LOCAL_ALLOC_FL)) {
OCFS2_I(inode)->ip_flags |= OCFS2_INODE_BITMAP;
- mlog(0, "local alloc inode: i_ino=%lu\n", inode->i_ino);
} else if (fe->i_flags & cpu_to_le32(OCFS2_BITMAP_FL)) {
OCFS2_I(inode)->ip_flags |= OCFS2_INODE_BITMAP;
} else if (fe->i_flags & cpu_to_le32(OCFS2_QUOTA_FL)) {
inode->i_flags |= S_NOQUOTA;
} else if (fe->i_flags & cpu_to_le32(OCFS2_SUPER_BLOCK_FL)) {
- mlog(0, "superblock inode: i_ino=%lu\n", inode->i_ino);
/* we can't actually hit this as read_inode can't
* handle superblocks today ;-) */
BUG();
@@ -381,7 +374,6 @@ void ocfs2_populate_inode(struct inode *inode, struct ocfs2_dinode *fe,
if (S_ISDIR(inode->i_mode))
ocfs2_resv_set_type(&OCFS2_I(inode)->ip_la_data_resv,
OCFS2_RESV_FLAG_DIR);
- mlog_exit_void();
}
static int ocfs2_read_locked_inode(struct inode *inode,
@@ -394,8 +386,6 @@ static int ocfs2_read_locked_inode(struct inode *inode,
int status, can_lock;
u32 generation = 0;
- mlog_entry("(0x%p, 0x%p)\n", inode, args);
-
status = -EINVAL;
if (inode == NULL || inode->i_sb == NULL) {
mlog(ML_ERROR, "bad inode\n");
@@ -443,6 +433,9 @@ static int ocfs2_read_locked_inode(struct inode *inode,
&& !(args->fi_flags & OCFS2_FI_FLAG_ORPHAN_RECOVERY)
&& !ocfs2_mount_local(osb);
+ trace_ocfs2_read_locked_inode(
+ (unsigned long long)OCFS2_I(inode)->ip_blkno, can_lock);
+
/*
* To maintain backwards compatibility with older versions of
* ocfs2-tools, we still store the generation value for system
@@ -534,7 +527,6 @@ bail:
if (args && bh)
brelse(bh);
- mlog_exit(status);
return status;
}
@@ -551,8 +543,6 @@ static int ocfs2_truncate_for_delete(struct ocfs2_super *osb,
struct ocfs2_dinode *fe;
handle_t *handle = NULL;
- mlog_entry_void();
-
fe = (struct ocfs2_dinode *) fe_bh->b_data;
/*
@@ -600,7 +590,6 @@ static int ocfs2_truncate_for_delete(struct ocfs2_super *osb,
out:
if (handle)
ocfs2_commit_trans(osb, handle);
- mlog_exit(status);
return status;
}
@@ -696,8 +685,6 @@ static int ocfs2_check_orphan_recovery_state(struct ocfs2_super *osb,
spin_lock(&osb->osb_lock);
if (ocfs2_node_map_test_bit(osb, &osb->osb_recovering_orphan_dirs, slot)) {
- mlog(0, "Recovery is happening on orphan dir %d, will skip "
- "this inode\n", slot);
ret = -EDEADLK;
goto out;
}
@@ -706,6 +693,7 @@ static int ocfs2_check_orphan_recovery_state(struct ocfs2_super *osb,
osb->osb_orphan_wipes[slot]++;
out:
spin_unlock(&osb->osb_lock);
+ trace_ocfs2_check_orphan_recovery_state(slot, ret);
return ret;
}
@@ -816,6 +804,10 @@ static int ocfs2_inode_is_valid_to_delete(struct inode *inode)
struct ocfs2_inode_info *oi = OCFS2_I(inode);
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ trace_ocfs2_inode_is_valid_to_delete(current, osb->dc_task,
+ (unsigned long long)oi->ip_blkno,
+ oi->ip_flags);
+
/* We shouldn't be getting here for the root directory
* inode.. */
if (inode == osb->root_inode) {
@@ -828,11 +820,8 @@ static int ocfs2_inode_is_valid_to_delete(struct inode *inode)
* have to skip deleting this guy. That's OK though because
* the node who's doing the actual deleting should handle it
* anyway. */
- if (current == osb->dc_task) {
- mlog(0, "Skipping delete of %lu because we're currently "
- "in downconvert\n", inode->i_ino);
+ if (current == osb->dc_task)
goto bail;
- }
spin_lock(&oi->ip_lock);
/* OCFS2 *never* deletes system files. This should technically
@@ -846,12 +835,9 @@ static int ocfs2_inode_is_valid_to_delete(struct inode *inode)
/* If we have allowd wipe of this inode for another node, it
* will be marked here so we can safely skip it. Recovery will
- * cleanup any inodes we might inadvertantly skip here. */
- if (oi->ip_flags & OCFS2_INODE_SKIP_DELETE) {
- mlog(0, "Skipping delete of %lu because another node "
- "has done this for us.\n", inode->i_ino);
+ * cleanup any inodes we might inadvertently skip here. */
+ if (oi->ip_flags & OCFS2_INODE_SKIP_DELETE)
goto bail_unlock;
- }
ret = 1;
bail_unlock:
@@ -868,28 +854,27 @@ static int ocfs2_query_inode_wipe(struct inode *inode,
struct buffer_head *di_bh,
int *wipe)
{
- int status = 0;
+ int status = 0, reason = 0;
struct ocfs2_inode_info *oi = OCFS2_I(inode);
struct ocfs2_dinode *di;
*wipe = 0;
+ trace_ocfs2_query_inode_wipe_begin((unsigned long long)oi->ip_blkno,
+ inode->i_nlink);
+
/* While we were waiting for the cluster lock in
* ocfs2_delete_inode, another node might have asked to delete
* the inode. Recheck our flags to catch this. */
if (!ocfs2_inode_is_valid_to_delete(inode)) {
- mlog(0, "Skipping delete of %llu because flags changed\n",
- (unsigned long long)oi->ip_blkno);
+ reason = 1;
goto bail;
}
/* Now that we have an up to date inode, we can double check
* the link count. */
- if (inode->i_nlink) {
- mlog(0, "Skipping delete of %llu because nlink = %u\n",
- (unsigned long long)oi->ip_blkno, inode->i_nlink);
+ if (inode->i_nlink)
goto bail;
- }
/* Do some basic inode verification... */
di = (struct ocfs2_dinode *) di_bh->b_data;
@@ -904,9 +889,7 @@ static int ocfs2_query_inode_wipe(struct inode *inode,
* ORPHANED_FL not.
*/
if (di->i_dyn_features & cpu_to_le16(OCFS2_HAS_REFCOUNT_FL)) {
- mlog(0, "Reflinked inode %llu is no longer orphaned. "
- "it shouldn't be deleted\n",
- (unsigned long long)oi->ip_blkno);
+ reason = 2;
goto bail;
}
@@ -934,7 +917,7 @@ static int ocfs2_query_inode_wipe(struct inode *inode,
* the inode open lock in ocfs2_read_locked_inode(). When we
* get to ->delete_inode(), each node tries to convert it's
* lock to an exclusive. Trylocks are serialized by the inode
- * meta data lock. If the upconvert suceeds, we know the inode
+ * meta data lock. If the upconvert succeeds, we know the inode
* is no longer live and can be deleted.
*
* Though we call this with the meta data lock held, the
@@ -943,8 +926,7 @@ static int ocfs2_query_inode_wipe(struct inode *inode,
status = ocfs2_try_open_lock(inode, 1);
if (status == -EAGAIN) {
status = 0;
- mlog(0, "Skipping delete of %llu because it is in use on "
- "other nodes\n", (unsigned long long)oi->ip_blkno);
+ reason = 3;
goto bail;
}
if (status < 0) {
@@ -953,11 +935,10 @@ static int ocfs2_query_inode_wipe(struct inode *inode,
}
*wipe = 1;
- mlog(0, "Inode %llu is ok to wipe from orphan dir %u\n",
- (unsigned long long)oi->ip_blkno,
- le16_to_cpu(di->i_orphaned_slot));
+ trace_ocfs2_query_inode_wipe_succ(le16_to_cpu(di->i_orphaned_slot));
bail:
+ trace_ocfs2_query_inode_wipe_end(status, reason);
return status;
}
@@ -967,8 +948,8 @@ bail:
static void ocfs2_cleanup_delete_inode(struct inode *inode,
int sync_data)
{
- mlog(0, "Cleanup inode %llu, sync = %d\n",
- (unsigned long long)OCFS2_I(inode)->ip_blkno, sync_data);
+ trace_ocfs2_cleanup_delete_inode(
+ (unsigned long long)OCFS2_I(inode)->ip_blkno, sync_data);
if (sync_data)
write_inode_now(inode, 1);
truncate_inode_pages(&inode->i_data, 0);
@@ -980,15 +961,15 @@ static void ocfs2_delete_inode(struct inode *inode)
sigset_t oldset;
struct buffer_head *di_bh = NULL;
- mlog_entry("(inode->i_ino = %lu)\n", inode->i_ino);
+ trace_ocfs2_delete_inode(inode->i_ino,
+ (unsigned long long)OCFS2_I(inode)->ip_blkno,
+ is_bad_inode(inode));
/* When we fail in read_inode() we mark inode as bad. The second test
* catches the case when inode allocation fails before allocating
* a block for inode. */
- if (is_bad_inode(inode) || !OCFS2_I(inode)->ip_blkno) {
- mlog(0, "Skipping delete of bad inode\n");
+ if (is_bad_inode(inode) || !OCFS2_I(inode)->ip_blkno)
goto bail;
- }
dquot_initialize(inode);
@@ -1080,7 +1061,7 @@ bail_unlock_nfs_sync:
bail_unblock:
ocfs2_unblock_signals(&oldset);
bail:
- mlog_exit_void();
+ return;
}
static void ocfs2_clear_inode(struct inode *inode)
@@ -1088,11 +1069,9 @@ static void ocfs2_clear_inode(struct inode *inode)
int status;
struct ocfs2_inode_info *oi = OCFS2_I(inode);
- mlog_entry_void();
-
end_writeback(inode);
- mlog(0, "Clearing inode: %llu, nlink = %u\n",
- (unsigned long long)OCFS2_I(inode)->ip_blkno, inode->i_nlink);
+ trace_ocfs2_clear_inode((unsigned long long)oi->ip_blkno,
+ inode->i_nlink);
mlog_bug_on_msg(OCFS2_SB(inode->i_sb) == NULL,
"Inode=%lu\n", inode->i_ino);
@@ -1181,8 +1160,6 @@ static void ocfs2_clear_inode(struct inode *inode)
*/
jbd2_journal_release_jbd_inode(OCFS2_SB(inode->i_sb)->journal->j_journal,
&oi->ip_jinode);
-
- mlog_exit_void();
}
void ocfs2_evict_inode(struct inode *inode)
@@ -1204,17 +1181,14 @@ int ocfs2_drop_inode(struct inode *inode)
struct ocfs2_inode_info *oi = OCFS2_I(inode);
int res;
- mlog_entry_void();
-
- mlog(0, "Drop inode %llu, nlink = %u, ip_flags = 0x%x\n",
- (unsigned long long)oi->ip_blkno, inode->i_nlink, oi->ip_flags);
+ trace_ocfs2_drop_inode((unsigned long long)oi->ip_blkno,
+ inode->i_nlink, oi->ip_flags);
if (oi->ip_flags & OCFS2_INODE_MAYBE_ORPHANED)
res = 1;
else
res = generic_drop_inode(inode);
- mlog_exit_void();
return res;
}
@@ -1226,11 +1200,11 @@ int ocfs2_inode_revalidate(struct dentry *dentry)
struct inode *inode = dentry->d_inode;
int status = 0;
- mlog_entry("(inode = 0x%p, ino = %llu)\n", inode,
- inode ? (unsigned long long)OCFS2_I(inode)->ip_blkno : 0ULL);
+ trace_ocfs2_inode_revalidate(inode,
+ inode ? (unsigned long long)OCFS2_I(inode)->ip_blkno : 0ULL,
+ inode ? (unsigned long long)OCFS2_I(inode)->ip_flags : 0);
if (!inode) {
- mlog(0, "eep, no inode!\n");
status = -ENOENT;
goto bail;
}
@@ -1238,7 +1212,6 @@ int ocfs2_inode_revalidate(struct dentry *dentry)
spin_lock(&OCFS2_I(inode)->ip_lock);
if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_DELETED) {
spin_unlock(&OCFS2_I(inode)->ip_lock);
- mlog(0, "inode deleted!\n");
status = -ENOENT;
goto bail;
}
@@ -1254,8 +1227,6 @@ int ocfs2_inode_revalidate(struct dentry *dentry)
}
ocfs2_inode_unlock(inode, 0);
bail:
- mlog_exit(status);
-
return status;
}
@@ -1271,8 +1242,7 @@ int ocfs2_mark_inode_dirty(handle_t *handle,
int status;
struct ocfs2_dinode *fe = (struct ocfs2_dinode *) bh->b_data;
- mlog_entry("(inode %llu)\n",
- (unsigned long long)OCFS2_I(inode)->ip_blkno);
+ trace_ocfs2_mark_inode_dirty((unsigned long long)OCFS2_I(inode)->ip_blkno);
status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh,
OCFS2_JOURNAL_ACCESS_WRITE);
@@ -1302,7 +1272,6 @@ int ocfs2_mark_inode_dirty(handle_t *handle,
ocfs2_journal_dirty(handle, bh);
leave:
- mlog_exit(status);
return status;
}
@@ -1345,8 +1314,7 @@ int ocfs2_validate_inode_block(struct super_block *sb,
int rc;
struct ocfs2_dinode *di = (struct ocfs2_dinode *)bh->b_data;
- mlog(0, "Validating dinode %llu\n",
- (unsigned long long)bh->b_blocknr);
+ trace_ocfs2_validate_inode_block((unsigned long long)bh->b_blocknr);
BUG_ON(!buffer_uptodate(bh));
diff --git a/fs/ocfs2/ioctl.c b/fs/ocfs2/ioctl.c
index 7a48681..8f13c59 100644
--- a/fs/ocfs2/ioctl.c
+++ b/fs/ocfs2/ioctl.c
@@ -9,7 +9,6 @@
#include <linux/mount.h>
#include <linux/compat.h>
-#define MLOG_MASK_PREFIX ML_INODE
#include <cluster/masklog.h>
#include "ocfs2.h"
@@ -46,6 +45,22 @@ static inline void __o2info_set_request_error(struct ocfs2_info_request *kreq,
#define o2info_set_request_error(a, b) \
__o2info_set_request_error((struct ocfs2_info_request *)&(a), b)
+static inline void __o2info_set_request_filled(struct ocfs2_info_request *req)
+{
+ req->ir_flags |= OCFS2_INFO_FL_FILLED;
+}
+
+#define o2info_set_request_filled(a) \
+ __o2info_set_request_filled((struct ocfs2_info_request *)&(a))
+
+static inline void __o2info_clear_request_filled(struct ocfs2_info_request *req)
+{
+ req->ir_flags &= ~OCFS2_INFO_FL_FILLED;
+}
+
+#define o2info_clear_request_filled(a) \
+ __o2info_clear_request_filled((struct ocfs2_info_request *)&(a))
+
static int ocfs2_get_inode_attr(struct inode *inode, unsigned *flags)
{
int status;
@@ -59,7 +74,6 @@ static int ocfs2_get_inode_attr(struct inode *inode, unsigned *flags)
*flags = OCFS2_I(inode)->ip_attr;
ocfs2_inode_unlock(inode, 0);
- mlog_exit(status);
return status;
}
@@ -82,7 +96,7 @@ static int ocfs2_set_inode_attr(struct inode *inode, unsigned flags,
}
status = -EACCES;
- if (!is_owner_or_cap(inode))
+ if (!inode_owner_or_capable(inode))
goto bail_unlock;
if (!S_ISDIR(inode->i_mode))
@@ -125,7 +139,6 @@ bail:
brelse(bh);
- mlog_exit(status);
return status;
}
@@ -139,7 +152,8 @@ int ocfs2_info_handle_blocksize(struct inode *inode,
goto bail;
oib.ib_blocksize = inode->i_sb->s_blocksize;
- oib.ib_req.ir_flags |= OCFS2_INFO_FL_FILLED;
+
+ o2info_set_request_filled(oib);
if (o2info_to_user(oib, req))
goto bail;
@@ -163,7 +177,8 @@ int ocfs2_info_handle_clustersize(struct inode *inode,
goto bail;
oic.ic_clustersize = osb->s_clustersize;
- oic.ic_req.ir_flags |= OCFS2_INFO_FL_FILLED;
+
+ o2info_set_request_filled(oic);
if (o2info_to_user(oic, req))
goto bail;
@@ -187,7 +202,8 @@ int ocfs2_info_handle_maxslots(struct inode *inode,
goto bail;
oim.im_max_slots = osb->max_slots;
- oim.im_req.ir_flags |= OCFS2_INFO_FL_FILLED;
+
+ o2info_set_request_filled(oim);
if (o2info_to_user(oim, req))
goto bail;
@@ -211,7 +227,8 @@ int ocfs2_info_handle_label(struct inode *inode,
goto bail;
memcpy(oil.il_label, osb->vol_label, OCFS2_MAX_VOL_LABEL_LEN);
- oil.il_req.ir_flags |= OCFS2_INFO_FL_FILLED;
+
+ o2info_set_request_filled(oil);
if (o2info_to_user(oil, req))
goto bail;
@@ -235,7 +252,8 @@ int ocfs2_info_handle_uuid(struct inode *inode,
goto bail;
memcpy(oiu.iu_uuid_str, osb->uuid_str, OCFS2_TEXT_UUID_LEN + 1);
- oiu.iu_req.ir_flags |= OCFS2_INFO_FL_FILLED;
+
+ o2info_set_request_filled(oiu);
if (o2info_to_user(oiu, req))
goto bail;
@@ -261,7 +279,8 @@ int ocfs2_info_handle_fs_features(struct inode *inode,
oif.if_compat_features = osb->s_feature_compat;
oif.if_incompat_features = osb->s_feature_incompat;
oif.if_ro_compat_features = osb->s_feature_ro_compat;
- oif.if_req.ir_flags |= OCFS2_INFO_FL_FILLED;
+
+ o2info_set_request_filled(oif);
if (o2info_to_user(oif, req))
goto bail;
@@ -286,7 +305,7 @@ int ocfs2_info_handle_journal_size(struct inode *inode,
oij.ij_journal_size = osb->journal->j_inode->i_size;
- oij.ij_req.ir_flags |= OCFS2_INFO_FL_FILLED;
+ o2info_set_request_filled(oij);
if (o2info_to_user(oij, req))
goto bail;
@@ -308,7 +327,7 @@ int ocfs2_info_handle_unknown(struct inode *inode,
if (o2info_from_user(oir, req))
goto bail;
- oir.ir_flags &= ~OCFS2_INFO_FL_FILLED;
+ o2info_clear_request_filled(oir);
if (o2info_to_user(oir, req))
goto bail;
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index faa2303..b141a44 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -31,7 +31,6 @@
#include <linux/time.h>
#include <linux/random.h>
-#define MLOG_MASK_PREFIX ML_JOURNAL
#include <cluster/masklog.h>
#include "ocfs2.h"
@@ -52,6 +51,7 @@
#include "quota.h"
#include "buffer_head_io.h"
+#include "ocfs2_trace.h"
DEFINE_SPINLOCK(trans_inc_lock);
@@ -303,16 +303,15 @@ static int ocfs2_commit_cache(struct ocfs2_super *osb)
unsigned int flushed;
struct ocfs2_journal *journal = NULL;
- mlog_entry_void();
-
journal = osb->journal;
/* Flush all pending commits and checkpoint the journal. */
down_write(&journal->j_trans_barrier);
- if (atomic_read(&journal->j_num_trans) == 0) {
+ flushed = atomic_read(&journal->j_num_trans);
+ trace_ocfs2_commit_cache_begin(flushed);
+ if (flushed == 0) {
up_write(&journal->j_trans_barrier);
- mlog(0, "No transactions for me to flush!\n");
goto finally;
}
@@ -331,13 +330,11 @@ static int ocfs2_commit_cache(struct ocfs2_super *osb)
atomic_set(&journal->j_num_trans, 0);
up_write(&journal->j_trans_barrier);
- mlog(0, "commit_thread: flushed transaction %lu (%u handles)\n",
- journal->j_trans_id, flushed);
+ trace_ocfs2_commit_cache_end(journal->j_trans_id, flushed);
ocfs2_wake_downconvert_thread(osb);
wake_up(&journal->j_checkpointed);
finally:
- mlog_exit(status);
return status;
}
@@ -425,9 +422,8 @@ int ocfs2_extend_trans(handle_t *handle, int nblocks)
return 0;
old_nblocks = handle->h_buffer_credits;
- mlog_entry_void();
- mlog(0, "Trying to extend transaction by %d blocks\n", nblocks);
+ trace_ocfs2_extend_trans(old_nblocks, nblocks);
#ifdef CONFIG_OCFS2_DEBUG_FS
status = 1;
@@ -440,9 +436,7 @@ int ocfs2_extend_trans(handle_t *handle, int nblocks)
#endif
if (status > 0) {
- mlog(0,
- "jbd2_journal_extend failed, trying "
- "jbd2_journal_restart\n");
+ trace_ocfs2_extend_trans_restart(old_nblocks + nblocks);
status = jbd2_journal_restart(handle,
old_nblocks + nblocks);
if (status < 0) {
@@ -453,8 +447,6 @@ int ocfs2_extend_trans(handle_t *handle, int nblocks)
status = 0;
bail:
-
- mlog_exit(status);
return status;
}
@@ -622,12 +614,9 @@ static int __ocfs2_journal_access(handle_t *handle,
BUG_ON(!handle);
BUG_ON(!bh);
- mlog_entry("bh->b_blocknr=%llu, type=%d (\"%s\"), bh->b_size = %zu\n",
- (unsigned long long)bh->b_blocknr, type,
- (type == OCFS2_JOURNAL_ACCESS_CREATE) ?
- "OCFS2_JOURNAL_ACCESS_CREATE" :
- "OCFS2_JOURNAL_ACCESS_WRITE",
- bh->b_size);
+ trace_ocfs2_journal_access(
+ (unsigned long long)ocfs2_metadata_cache_owner(ci),
+ (unsigned long long)bh->b_blocknr, type, bh->b_size);
/* we can safely remove this assertion after testing. */
if (!buffer_uptodate(bh)) {
@@ -668,7 +657,6 @@ static int __ocfs2_journal_access(handle_t *handle,
mlog(ML_ERROR, "Error %d getting %d access to buffer!\n",
status, type);
- mlog_exit(status);
return status;
}
@@ -737,13 +725,10 @@ void ocfs2_journal_dirty(handle_t *handle, struct buffer_head *bh)
{
int status;
- mlog_entry("(bh->b_blocknr=%llu)\n",
- (unsigned long long)bh->b_blocknr);
+ trace_ocfs2_journal_dirty((unsigned long long)bh->b_blocknr);
status = jbd2_journal_dirty_metadata(handle, bh);
BUG_ON(status);
-
- mlog_exit_void();
}
#define OCFS2_DEFAULT_COMMIT_INTERVAL (HZ * JBD2_DEFAULT_MAX_COMMIT_AGE)
@@ -775,8 +760,6 @@ int ocfs2_journal_init(struct ocfs2_journal *journal, int *dirty)
struct ocfs2_super *osb;
int inode_lock = 0;
- mlog_entry_void();
-
BUG_ON(!journal);
osb = journal->j_osb;
@@ -820,10 +803,9 @@ int ocfs2_journal_init(struct ocfs2_journal *journal, int *dirty)
goto done;
}
- mlog(0, "inode->i_size = %lld\n", inode->i_size);
- mlog(0, "inode->i_blocks = %llu\n",
- (unsigned long long)inode->i_blocks);
- mlog(0, "inode->ip_clusters = %u\n", OCFS2_I(inode)->ip_clusters);
+ trace_ocfs2_journal_init(inode->i_size,
+ (unsigned long long)inode->i_blocks,
+ OCFS2_I(inode)->ip_clusters);
/* call the kernels journal init function now */
j_journal = jbd2_journal_init_inode(inode);
@@ -833,8 +815,7 @@ int ocfs2_journal_init(struct ocfs2_journal *journal, int *dirty)
goto done;
}
- mlog(0, "Returned from jbd2_journal_init_inode\n");
- mlog(0, "j_journal->j_maxlen = %u\n", j_journal->j_maxlen);
+ trace_ocfs2_journal_init_maxlen(j_journal->j_maxlen);
*dirty = (le32_to_cpu(di->id1.journal1.ij_flags) &
OCFS2_JOURNAL_DIRTY_FL);
@@ -859,7 +840,6 @@ done:
}
}
- mlog_exit(status);
return status;
}
@@ -882,8 +862,6 @@ static int ocfs2_journal_toggle_dirty(struct ocfs2_super *osb,
struct buffer_head *bh = journal->j_bh;
struct ocfs2_dinode *fe;
- mlog_entry_void();
-
fe = (struct ocfs2_dinode *)bh->b_data;
/* The journal bh on the osb always comes from ocfs2_journal_init()
@@ -906,7 +884,6 @@ static int ocfs2_journal_toggle_dirty(struct ocfs2_super *osb,
if (status < 0)
mlog_errno(status);
- mlog_exit(status);
return status;
}
@@ -921,8 +898,6 @@ void ocfs2_journal_shutdown(struct ocfs2_super *osb)
struct inode *inode = NULL;
int num_running_trans = 0;
- mlog_entry_void();
-
BUG_ON(!osb);
journal = osb->journal;
@@ -939,10 +914,7 @@ void ocfs2_journal_shutdown(struct ocfs2_super *osb)
BUG();
num_running_trans = atomic_read(&(osb->journal->j_num_trans));
- if (num_running_trans > 0)
- mlog(0, "Shutting down journal: must wait on %d "
- "running transactions!\n",
- num_running_trans);
+ trace_ocfs2_journal_shutdown(num_running_trans);
/* Do a commit_cache here. It will flush our journal, *and*
* release any locks that are still held.
@@ -955,7 +927,7 @@ void ocfs2_journal_shutdown(struct ocfs2_super *osb)
* completely destroy the journal. */
if (osb->commit_task) {
/* Wait for the commit thread */
- mlog(0, "Waiting for ocfs2commit to exit....\n");
+ trace_ocfs2_journal_shutdown_wait(osb->commit_task);
kthread_stop(osb->commit_task);
osb->commit_task = NULL;
}
@@ -998,7 +970,6 @@ void ocfs2_journal_shutdown(struct ocfs2_super *osb)
done:
if (inode)
iput(inode);
- mlog_exit_void();
}
static void ocfs2_clear_journal_error(struct super_block *sb,
@@ -1024,8 +995,6 @@ int ocfs2_journal_load(struct ocfs2_journal *journal, int local, int replayed)
int status = 0;
struct ocfs2_super *osb;
- mlog_entry_void();
-
BUG_ON(!journal);
osb = journal->j_osb;
@@ -1059,7 +1028,6 @@ int ocfs2_journal_load(struct ocfs2_journal *journal, int local, int replayed)
osb->commit_task = NULL;
done:
- mlog_exit(status);
return status;
}
@@ -1070,8 +1038,6 @@ int ocfs2_journal_wipe(struct ocfs2_journal *journal, int full)
{
int status;
- mlog_entry_void();
-
BUG_ON(!journal);
status = jbd2_journal_wipe(journal->j_journal, full);
@@ -1085,7 +1051,6 @@ int ocfs2_journal_wipe(struct ocfs2_journal *journal, int full)
mlog_errno(status);
bail:
- mlog_exit(status);
return status;
}
@@ -1124,8 +1089,6 @@ static int ocfs2_force_read_journal(struct inode *inode)
#define CONCURRENT_JOURNAL_FILL 32ULL
struct buffer_head *bhs[CONCURRENT_JOURNAL_FILL];
- mlog_entry_void();
-
memset(bhs, 0, sizeof(struct buffer_head *) * CONCURRENT_JOURNAL_FILL);
num_blocks = ocfs2_blocks_for_bytes(inode->i_sb, inode->i_size);
@@ -1161,7 +1124,6 @@ static int ocfs2_force_read_journal(struct inode *inode)
bail:
for(i = 0; i < CONCURRENT_JOURNAL_FILL; i++)
brelse(bhs[i]);
- mlog_exit(status);
return status;
}
@@ -1185,7 +1147,7 @@ struct ocfs2_la_recovery_item {
*/
void ocfs2_complete_recovery(struct work_struct *work)
{
- int ret;
+ int ret = 0;
struct ocfs2_journal *journal =
container_of(work, struct ocfs2_journal, j_recovery_work);
struct ocfs2_super *osb = journal->j_osb;
@@ -1194,9 +1156,8 @@ void ocfs2_complete_recovery(struct work_struct *work)
struct ocfs2_quota_recovery *qrec;
LIST_HEAD(tmp_la_list);
- mlog_entry_void();
-
- mlog(0, "completing recovery from keventd\n");
+ trace_ocfs2_complete_recovery(
+ (unsigned long long)OCFS2_I(journal->j_inode)->ip_blkno);
spin_lock(&journal->j_lock);
list_splice_init(&journal->j_la_cleanups, &tmp_la_list);
@@ -1205,15 +1166,18 @@ void ocfs2_complete_recovery(struct work_struct *work)
list_for_each_entry_safe(item, n, &tmp_la_list, lri_list) {
list_del_init(&item->lri_list);
- mlog(0, "Complete recovery for slot %d\n", item->lri_slot);
-
ocfs2_wait_on_quotas(osb);
la_dinode = item->lri_la_dinode;
- if (la_dinode) {
- mlog(0, "Clean up local alloc %llu\n",
- (unsigned long long)le64_to_cpu(la_dinode->i_blkno));
+ tl_dinode = item->lri_tl_dinode;
+ qrec = item->lri_qrec;
+
+ trace_ocfs2_complete_recovery_slot(item->lri_slot,
+ la_dinode ? le64_to_cpu(la_dinode->i_blkno) : 0,
+ tl_dinode ? le64_to_cpu(tl_dinode->i_blkno) : 0,
+ qrec);
+ if (la_dinode) {
ret = ocfs2_complete_local_alloc_recovery(osb,
la_dinode);
if (ret < 0)
@@ -1222,11 +1186,7 @@ void ocfs2_complete_recovery(struct work_struct *work)
kfree(la_dinode);
}
- tl_dinode = item->lri_tl_dinode;
if (tl_dinode) {
- mlog(0, "Clean up truncate log %llu\n",
- (unsigned long long)le64_to_cpu(tl_dinode->i_blkno));
-
ret = ocfs2_complete_truncate_log_recovery(osb,
tl_dinode);
if (ret < 0)
@@ -1239,9 +1199,7 @@ void ocfs2_complete_recovery(struct work_struct *work)
if (ret < 0)
mlog_errno(ret);
- qrec = item->lri_qrec;
if (qrec) {
- mlog(0, "Recovering quota files");
ret = ocfs2_finish_quota_recovery(osb, qrec,
item->lri_slot);
if (ret < 0)
@@ -1252,8 +1210,7 @@ void ocfs2_complete_recovery(struct work_struct *work)
kfree(item);
}
- mlog(0, "Recovery completion\n");
- mlog_exit_void();
+ trace_ocfs2_complete_recovery_end(ret);
}
/* NOTE: This function always eats your references to la_dinode and
@@ -1339,8 +1296,6 @@ static int __ocfs2_recovery_thread(void *arg)
int rm_quota_used = 0, i;
struct ocfs2_quota_recovery *qrec;
- mlog_entry_void();
-
status = ocfs2_wait_on_mount(osb);
if (status < 0) {
goto bail;
@@ -1372,15 +1327,12 @@ restart:
* clear it until ocfs2_recover_node() has succeeded. */
node_num = rm->rm_entries[0];
spin_unlock(&osb->osb_lock);
- mlog(0, "checking node %d\n", node_num);
slot_num = ocfs2_node_num_to_slot(osb, node_num);
+ trace_ocfs2_recovery_thread_node(node_num, slot_num);
if (slot_num == -ENOENT) {
status = 0;
- mlog(0, "no slot for this node, so no recovery"
- "required.\n");
goto skip_recovery;
}
- mlog(0, "node %d was using slot %d\n", node_num, slot_num);
/* It is a bit subtle with quota recovery. We cannot do it
* immediately because we have to obtain cluster locks from
@@ -1407,7 +1359,7 @@ skip_recovery:
spin_lock(&osb->osb_lock);
}
spin_unlock(&osb->osb_lock);
- mlog(0, "All nodes recovered\n");
+ trace_ocfs2_recovery_thread_end(status);
/* Refresh all journal recovery generations from disk */
status = ocfs2_check_journals_nolocks(osb);
@@ -1416,7 +1368,7 @@ skip_recovery:
mlog_errno(status);
/* Now it is right time to recover quotas... We have to do this under
- * superblock lock so that noone can start using the slot (and crash)
+ * superblock lock so that no one can start using the slot (and crash)
* before we recover it */
for (i = 0; i < rm_quota_used; i++) {
qrec = ocfs2_begin_quota_recovery(osb, rm_quota[i]);
@@ -1451,7 +1403,6 @@ bail:
if (rm_quota)
kfree(rm_quota);
- mlog_exit(status);
/* no one is callint kthread_stop() for us so the kthread() api
* requires that we call do_exit(). And it isn't exported, but
* complete_and_exit() seems to be a minimal wrapper around it. */
@@ -1461,19 +1412,15 @@ bail:
void ocfs2_recovery_thread(struct ocfs2_super *osb, int node_num)
{
- mlog_entry("(node_num=%d, osb->node_num = %d)\n",
- node_num, osb->node_num);
-
mutex_lock(&osb->recovery_lock);
- if (osb->disable_recovery)
- goto out;
- /* People waiting on recovery will wait on
- * the recovery map to empty. */
- if (ocfs2_recovery_map_set(osb, node_num))
- mlog(0, "node %d already in recovery map.\n", node_num);
+ trace_ocfs2_recovery_thread(node_num, osb->node_num,
+ osb->disable_recovery, osb->recovery_thread_task,
+ osb->disable_recovery ?
+ -1 : ocfs2_recovery_map_set(osb, node_num));
- mlog(0, "starting recovery thread...\n");
+ if (osb->disable_recovery)
+ goto out;
if (osb->recovery_thread_task)
goto out;
@@ -1488,8 +1435,6 @@ void ocfs2_recovery_thread(struct ocfs2_super *osb, int node_num)
out:
mutex_unlock(&osb->recovery_lock);
wake_up(&osb->recovery_event);
-
- mlog_exit_void();
}
static int ocfs2_read_journal_inode(struct ocfs2_super *osb,
@@ -1563,7 +1508,7 @@ static int ocfs2_replay_journal(struct ocfs2_super *osb,
* If not, it needs recovery.
*/
if (osb->slot_recovery_generations[slot_num] != slot_reco_gen) {
- mlog(0, "Slot %u already recovered (old/new=%u/%u)\n", slot_num,
+ trace_ocfs2_replay_journal_recovered(slot_num,
osb->slot_recovery_generations[slot_num], slot_reco_gen);
osb->slot_recovery_generations[slot_num] = slot_reco_gen;
status = -EBUSY;
@@ -1574,7 +1519,7 @@ static int ocfs2_replay_journal(struct ocfs2_super *osb,
status = ocfs2_inode_lock_full(inode, &bh, 1, OCFS2_META_LOCK_RECOVERY);
if (status < 0) {
- mlog(0, "status returned from ocfs2_inode_lock=%d\n", status);
+ trace_ocfs2_replay_journal_lock_err(status);
if (status != -ERESTARTSYS)
mlog(ML_ERROR, "Could not lock journal!\n");
goto done;
@@ -1587,7 +1532,7 @@ static int ocfs2_replay_journal(struct ocfs2_super *osb,
slot_reco_gen = ocfs2_get_recovery_generation(fe);
if (!(flags & OCFS2_JOURNAL_DIRTY_FL)) {
- mlog(0, "No recovery required for node %d\n", node_num);
+ trace_ocfs2_replay_journal_skip(node_num);
/* Refresh recovery generation for the slot */
osb->slot_recovery_generations[slot_num] = slot_reco_gen;
goto done;
@@ -1608,7 +1553,6 @@ static int ocfs2_replay_journal(struct ocfs2_super *osb,
goto done;
}
- mlog(0, "calling journal_init_inode\n");
journal = jbd2_journal_init_inode(inode);
if (journal == NULL) {
mlog(ML_ERROR, "Linux journal layer error\n");
@@ -1628,7 +1572,6 @@ static int ocfs2_replay_journal(struct ocfs2_super *osb,
ocfs2_clear_journal_error(osb->sb, journal, slot_num);
/* wipe the journal */
- mlog(0, "flushing the journal.\n");
jbd2_journal_lock_updates(journal);
status = jbd2_journal_flush(journal);
jbd2_journal_unlock_updates(journal);
@@ -1665,7 +1608,6 @@ done:
brelse(bh);
- mlog_exit(status);
return status;
}
@@ -1688,8 +1630,7 @@ static int ocfs2_recover_node(struct ocfs2_super *osb,
struct ocfs2_dinode *la_copy = NULL;
struct ocfs2_dinode *tl_copy = NULL;
- mlog_entry("(node_num=%d, slot_num=%d, osb->node_num = %d)\n",
- node_num, slot_num, osb->node_num);
+ trace_ocfs2_recover_node(node_num, slot_num, osb->node_num);
/* Should not ever be called to recover ourselves -- in that
* case we should've called ocfs2_journal_load instead. */
@@ -1698,9 +1639,7 @@ static int ocfs2_recover_node(struct ocfs2_super *osb,
status = ocfs2_replay_journal(osb, node_num, slot_num);
if (status < 0) {
if (status == -EBUSY) {
- mlog(0, "Skipping recovery for slot %u (node %u) "
- "as another node has recovered it\n", slot_num,
- node_num);
+ trace_ocfs2_recover_node_skip(slot_num, node_num);
status = 0;
goto done;
}
@@ -1735,7 +1674,6 @@ static int ocfs2_recover_node(struct ocfs2_super *osb,
status = 0;
done:
- mlog_exit(status);
return status;
}
@@ -1808,8 +1746,8 @@ int ocfs2_mark_dead_nodes(struct ocfs2_super *osb)
spin_lock(&osb->osb_lock);
osb->slot_recovery_generations[i] = gen;
- mlog(0, "Slot %u recovery generation is %u\n", i,
- osb->slot_recovery_generations[i]);
+ trace_ocfs2_mark_dead_nodes(i,
+ osb->slot_recovery_generations[i]);
if (i == osb->slot_num) {
spin_unlock(&osb->osb_lock);
@@ -1845,7 +1783,6 @@ int ocfs2_mark_dead_nodes(struct ocfs2_super *osb)
status = 0;
bail:
- mlog_exit(status);
return status;
}
@@ -1884,11 +1821,12 @@ void ocfs2_queue_orphan_scan(struct ocfs2_super *osb)
os = &osb->osb_orphan_scan;
- mlog(0, "Begin orphan scan\n");
-
if (atomic_read(&os->os_state) == ORPHAN_SCAN_INACTIVE)
goto out;
+ trace_ocfs2_queue_orphan_scan_begin(os->os_count, os->os_seqno,
+ atomic_read(&os->os_state));
+
status = ocfs2_orphan_scan_lock(osb, &seqno);
if (status < 0) {
if (status != -EAGAIN)
@@ -1918,7 +1856,8 @@ void ocfs2_queue_orphan_scan(struct ocfs2_super *osb)
unlock:
ocfs2_orphan_scan_unlock(osb, seqno);
out:
- mlog(0, "Orphan scan completed\n");
+ trace_ocfs2_queue_orphan_scan_end(os->os_count, os->os_seqno,
+ atomic_read(&os->os_state));
return;
}
@@ -2002,8 +1941,7 @@ static int ocfs2_orphan_filldir(void *priv, const char *name, int name_len,
if (IS_ERR(iter))
return 0;
- mlog(0, "queue orphan %llu\n",
- (unsigned long long)OCFS2_I(iter)->ip_blkno);
+ trace_ocfs2_orphan_filldir((unsigned long long)OCFS2_I(iter)->ip_blkno);
/* No locking is required for the next_orphan queue as there
* is only ever a single process doing orphan recovery. */
OCFS2_I(iter)->ip_next_orphan = p->head;
@@ -2119,7 +2057,7 @@ static int ocfs2_recover_orphans(struct ocfs2_super *osb,
struct inode *iter;
struct ocfs2_inode_info *oi;
- mlog(0, "Recover inodes from orphan dir in slot %d\n", slot);
+ trace_ocfs2_recover_orphans(slot);
ocfs2_mark_recovering_orphan_dir(osb, slot);
ret = ocfs2_queue_orphans(osb, slot, &inode);
@@ -2132,7 +2070,8 @@ static int ocfs2_recover_orphans(struct ocfs2_super *osb,
while (inode) {
oi = OCFS2_I(inode);
- mlog(0, "iput orphan %llu\n", (unsigned long long)oi->ip_blkno);
+ trace_ocfs2_recover_orphans_iput(
+ (unsigned long long)oi->ip_blkno);
iter = oi->ip_next_orphan;
@@ -2170,6 +2109,7 @@ static int __ocfs2_wait_on_mount(struct ocfs2_super *osb, int quota)
* MOUNTED flag, but this is set right before
* dismount_volume() so we can trust it. */
if (atomic_read(&osb->vol_state) == VOLUME_DISABLED) {
+ trace_ocfs2_wait_on_mount(VOLUME_DISABLED);
mlog(0, "mount error, exiting!\n");
return -EBUSY;
}
diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h
index 6180da1..68cf2f6 100644
--- a/fs/ocfs2/journal.h
+++ b/fs/ocfs2/journal.h
@@ -215,7 +215,7 @@ static inline void ocfs2_checkpoint_inode(struct inode *inode)
/* WARNING: This only kicks off a single
* checkpoint. If someone races you and adds more
* metadata to the journal, you won't know, and will
- * wind up waiting *alot* longer than necessary. Right
+ * wind up waiting *a lot* longer than necessary. Right
* now we only use this in clear_inode so that's
* OK. */
ocfs2_start_checkpoint(osb);
diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
index ec6adbf..210c352 100644
--- a/fs/ocfs2/localalloc.c
+++ b/fs/ocfs2/localalloc.c
@@ -29,7 +29,6 @@
#include <linux/highmem.h>
#include <linux/bitops.h>
-#define MLOG_MASK_PREFIX ML_DISK_ALLOC
#include <cluster/masklog.h>
#include "ocfs2.h"
@@ -43,6 +42,7 @@
#include "suballoc.h"
#include "super.h"
#include "sysfile.h"
+#include "ocfs2_trace.h"
#include "buffer_head_io.h"
@@ -201,8 +201,7 @@ void ocfs2_la_set_sizes(struct ocfs2_super *osb, int requested_mb)
la_max_mb = ocfs2_clusters_to_megabytes(sb,
ocfs2_local_alloc_size(sb) * 8);
- mlog(0, "requested: %dM, max: %uM, default: %uM\n",
- requested_mb, la_max_mb, la_default_mb);
+ trace_ocfs2_la_set_sizes(requested_mb, la_max_mb, la_default_mb);
if (requested_mb == -1) {
/* No user request - use defaults */
@@ -276,8 +275,8 @@ int ocfs2_alloc_should_use_local(struct ocfs2_super *osb, u64 bits)
ret = 1;
bail:
- mlog(0, "state=%d, bits=%llu, la_bits=%d, ret=%d\n",
- osb->local_alloc_state, (unsigned long long)bits, la_bits, ret);
+ trace_ocfs2_alloc_should_use_local(
+ (unsigned long long)bits, osb->local_alloc_state, la_bits, ret);
spin_unlock(&osb->osb_lock);
return ret;
}
@@ -291,8 +290,6 @@ int ocfs2_load_local_alloc(struct ocfs2_super *osb)
struct inode *inode = NULL;
struct ocfs2_local_alloc *la;
- mlog_entry_void();
-
if (osb->local_alloc_bits == 0)
goto bail;
@@ -364,9 +361,10 @@ bail:
if (inode)
iput(inode);
- mlog(0, "Local alloc window bits = %d\n", osb->local_alloc_bits);
+ trace_ocfs2_load_local_alloc(osb->local_alloc_bits);
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -388,8 +386,6 @@ void ocfs2_shutdown_local_alloc(struct ocfs2_super *osb)
struct ocfs2_dinode *alloc_copy = NULL;
struct ocfs2_dinode *alloc = NULL;
- mlog_entry_void();
-
cancel_delayed_work(&osb->la_enable_wq);
flush_workqueue(ocfs2_wq);
@@ -482,8 +478,6 @@ out:
if (alloc_copy)
kfree(alloc_copy);
-
- mlog_exit_void();
}
/*
@@ -502,7 +496,7 @@ int ocfs2_begin_local_alloc_recovery(struct ocfs2_super *osb,
struct inode *inode = NULL;
struct ocfs2_dinode *alloc;
- mlog_entry("(slot_num = %d)\n", slot_num);
+ trace_ocfs2_begin_local_alloc_recovery(slot_num);
*alloc_copy = NULL;
@@ -552,7 +546,8 @@ bail:
iput(inode);
}
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -570,8 +565,6 @@ int ocfs2_complete_local_alloc_recovery(struct ocfs2_super *osb,
struct buffer_head *main_bm_bh = NULL;
struct inode *main_bm_inode;
- mlog_entry_void();
-
main_bm_inode = ocfs2_get_system_file_inode(osb,
GLOBAL_BITMAP_SYSTEM_INODE,
OCFS2_INVALID_SLOT);
@@ -620,7 +613,8 @@ out_mutex:
out:
if (!status)
ocfs2_init_steal_slots(osb);
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -640,8 +634,6 @@ int ocfs2_reserve_local_alloc_bits(struct ocfs2_super *osb,
struct inode *local_alloc_inode;
unsigned int free_bits;
- mlog_entry_void();
-
BUG_ON(!ac);
local_alloc_inode =
@@ -712,10 +704,6 @@ int ocfs2_reserve_local_alloc_bits(struct ocfs2_super *osb,
goto bail;
}
- if (ac->ac_max_block)
- mlog(0, "Calling in_range for max block %llu\n",
- (unsigned long long)ac->ac_max_block);
-
ac->ac_inode = local_alloc_inode;
/* We should never use localalloc from another slot */
ac->ac_alloc_slot = osb->slot_num;
@@ -729,10 +717,12 @@ bail:
iput(local_alloc_inode);
}
- mlog(0, "bits=%d, slot=%d, ret=%d\n", bits_wanted, osb->slot_num,
- status);
+ trace_ocfs2_reserve_local_alloc_bits(
+ (unsigned long long)ac->ac_max_block,
+ bits_wanted, osb->slot_num, status);
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -749,7 +739,6 @@ int ocfs2_claim_local_alloc_bits(struct ocfs2_super *osb,
struct ocfs2_dinode *alloc;
struct ocfs2_local_alloc *la;
- mlog_entry_void();
BUG_ON(ac->ac_which != OCFS2_AC_USE_LOCAL);
local_alloc_inode = ac->ac_inode;
@@ -788,7 +777,8 @@ int ocfs2_claim_local_alloc_bits(struct ocfs2_super *osb,
ocfs2_journal_dirty(handle, osb->local_alloc_bh);
bail:
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -799,13 +789,11 @@ static u32 ocfs2_local_alloc_count_bits(struct ocfs2_dinode *alloc)
u32 count = 0;
struct ocfs2_local_alloc *la = OCFS2_LOCAL_ALLOC(alloc);
- mlog_entry_void();
-
buffer = la->la_bitmap;
for (i = 0; i < le16_to_cpu(la->la_size); i++)
count += hweight8(buffer[i]);
- mlog_exit(count);
+ trace_ocfs2_local_alloc_count_bits(count);
return count;
}
@@ -820,10 +808,7 @@ static int ocfs2_local_alloc_find_clear_bits(struct ocfs2_super *osb,
void *bitmap = NULL;
struct ocfs2_reservation_map *resmap = &osb->osb_la_resmap;
- mlog_entry("(numbits wanted = %u)\n", *numbits);
-
if (!alloc->id1.bitmap1.i_total) {
- mlog(0, "No bits in my window!\n");
bitoff = -1;
goto bail;
}
@@ -883,8 +868,7 @@ static int ocfs2_local_alloc_find_clear_bits(struct ocfs2_super *osb,
}
}
- mlog(0, "Exiting loop, bitoff = %d, numfound = %d\n", bitoff,
- numfound);
+ trace_ocfs2_local_alloc_find_clear_bits_search_bitmap(bitoff, numfound);
if (numfound == *numbits)
bitoff = startoff - numfound;
@@ -895,7 +879,10 @@ bail:
if (local_resv)
ocfs2_resv_discard(resmap, resv);
- mlog_exit(bitoff);
+ trace_ocfs2_local_alloc_find_clear_bits(*numbits,
+ le32_to_cpu(alloc->id1.bitmap1.i_total),
+ bitoff, numfound);
+
return bitoff;
}
@@ -903,15 +890,12 @@ static void ocfs2_clear_local_alloc(struct ocfs2_dinode *alloc)
{
struct ocfs2_local_alloc *la = OCFS2_LOCAL_ALLOC(alloc);
int i;
- mlog_entry_void();
alloc->id1.bitmap1.i_total = 0;
alloc->id1.bitmap1.i_used = 0;
la->la_bm_off = 0;
for(i = 0; i < le16_to_cpu(la->la_size); i++)
la->la_bitmap[i] = 0;
-
- mlog_exit_void();
}
#if 0
@@ -952,18 +936,16 @@ static int ocfs2_sync_local_to_main(struct ocfs2_super *osb,
void *bitmap;
struct ocfs2_local_alloc *la = OCFS2_LOCAL_ALLOC(alloc);
- mlog_entry("total = %u, used = %u\n",
- le32_to_cpu(alloc->id1.bitmap1.i_total),
- le32_to_cpu(alloc->id1.bitmap1.i_used));
+ trace_ocfs2_sync_local_to_main(
+ le32_to_cpu(alloc->id1.bitmap1.i_total),
+ le32_to_cpu(alloc->id1.bitmap1.i_used));
if (!alloc->id1.bitmap1.i_total) {
- mlog(0, "nothing to sync!\n");
goto bail;
}
if (le32_to_cpu(alloc->id1.bitmap1.i_used) ==
le32_to_cpu(alloc->id1.bitmap1.i_total)) {
- mlog(0, "all bits were taken!\n");
goto bail;
}
@@ -985,8 +967,7 @@ static int ocfs2_sync_local_to_main(struct ocfs2_super *osb,
ocfs2_clusters_to_blocks(osb->sb,
start - count);
- mlog(0, "freeing %u bits starting at local alloc bit "
- "%u (la_start_blk = %llu, blkno = %llu)\n",
+ trace_ocfs2_sync_local_to_main_free(
count, start - count,
(unsigned long long)la_start_blk,
(unsigned long long)blkno);
@@ -1007,7 +988,8 @@ static int ocfs2_sync_local_to_main(struct ocfs2_super *osb,
}
bail:
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -1132,7 +1114,8 @@ bail:
*ac = NULL;
}
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -1148,17 +1131,12 @@ static int ocfs2_local_alloc_new_window(struct ocfs2_super *osb,
struct ocfs2_dinode *alloc = NULL;
struct ocfs2_local_alloc *la;
- mlog_entry_void();
-
alloc = (struct ocfs2_dinode *) osb->local_alloc_bh->b_data;
la = OCFS2_LOCAL_ALLOC(alloc);
- if (alloc->id1.bitmap1.i_total)
- mlog(0, "asking me to alloc a new window over a non-empty "
- "one\n");
-
- mlog(0, "Allocating %u clusters for a new window.\n",
- osb->local_alloc_bits);
+ trace_ocfs2_local_alloc_new_window(
+ le32_to_cpu(alloc->id1.bitmap1.i_total),
+ osb->local_alloc_bits);
/* Instruct the allocation code to try the most recently used
* cluster group. We'll re-record the group used this pass
@@ -1220,13 +1198,13 @@ retry_enospc:
ocfs2_resmap_restart(&osb->osb_la_resmap, cluster_count,
OCFS2_LOCAL_ALLOC(alloc)->la_bitmap);
- mlog(0, "New window allocated:\n");
- mlog(0, "window la_bm_off = %u\n",
- OCFS2_LOCAL_ALLOC(alloc)->la_bm_off);
- mlog(0, "window bits = %u\n", le32_to_cpu(alloc->id1.bitmap1.i_total));
+ trace_ocfs2_local_alloc_new_window_result(
+ OCFS2_LOCAL_ALLOC(alloc)->la_bm_off,
+ le32_to_cpu(alloc->id1.bitmap1.i_total));
bail:
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -1243,8 +1221,6 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
struct ocfs2_dinode *alloc_copy = NULL;
struct ocfs2_alloc_context *ac = NULL;
- mlog_entry_void();
-
ocfs2_recalc_la_window(osb, OCFS2_LA_EVENT_SLIDE);
/* This will lock the main bitmap for us. */
@@ -1324,7 +1300,8 @@ bail:
if (ac)
ocfs2_free_alloc_context(ac);
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
diff --git a/fs/ocfs2/locks.c b/fs/ocfs2/locks.c
index b5cb3ed..e57c804 100644
--- a/fs/ocfs2/locks.c
+++ b/fs/ocfs2/locks.c
@@ -26,7 +26,6 @@
#include <linux/fs.h>
#include <linux/fcntl.h>
-#define MLOG_MASK_PREFIX ML_INODE
#include <cluster/masklog.h>
#include "ocfs2.h"
diff --git a/fs/ocfs2/mmap.c b/fs/ocfs2/mmap.c
index 7e32db9..3e9393c 100644
--- a/fs/ocfs2/mmap.c
+++ b/fs/ocfs2/mmap.c
@@ -31,7 +31,6 @@
#include <linux/signal.h>
#include <linux/rbtree.h>
-#define MLOG_MASK_PREFIX ML_FILE_IO
#include <cluster/masklog.h>
#include "ocfs2.h"
@@ -42,6 +41,7 @@
#include "inode.h"
#include "mmap.h"
#include "super.h"
+#include "ocfs2_trace.h"
static int ocfs2_fault(struct vm_area_struct *area, struct vm_fault *vmf)
@@ -49,13 +49,12 @@ static int ocfs2_fault(struct vm_area_struct *area, struct vm_fault *vmf)
sigset_t oldset;
int ret;
- mlog_entry("(area=%p, page offset=%lu)\n", area, vmf->pgoff);
-
ocfs2_block_signals(&oldset);
ret = filemap_fault(area, vmf);
ocfs2_unblock_signals(&oldset);
- mlog_exit_ptr(vmf->page);
+ trace_ocfs2_fault(OCFS2_I(area->vm_file->f_mapping->host)->ip_blkno,
+ area, vmf->page, vmf->pgoff);
return ret;
}
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index d6c25d7..e5d738c 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -42,7 +42,6 @@
#include <linux/highmem.h>
#include <linux/quotaops.h>
-#define MLOG_MASK_PREFIX ML_NAMEI
#include <cluster/masklog.h>
#include "ocfs2.h"
@@ -63,6 +62,7 @@
#include "uptodate.h"
#include "xattr.h"
#include "acl.h"
+#include "ocfs2_trace.h"
#include "buffer_head_io.h"
@@ -106,17 +106,15 @@ static struct dentry *ocfs2_lookup(struct inode *dir, struct dentry *dentry,
struct dentry *ret;
struct ocfs2_inode_info *oi;
- mlog_entry("(0x%p, 0x%p, '%.*s')\n", dir, dentry,
- dentry->d_name.len, dentry->d_name.name);
+ trace_ocfs2_lookup(dir, dentry, dentry->d_name.len,
+ dentry->d_name.name,
+ (unsigned long long)OCFS2_I(dir)->ip_blkno, 0);
if (dentry->d_name.len > OCFS2_MAX_FILENAME_LEN) {
ret = ERR_PTR(-ENAMETOOLONG);
goto bail;
}
- mlog(0, "find name %.*s in directory %llu\n", dentry->d_name.len,
- dentry->d_name.name, (unsigned long long)OCFS2_I(dir)->ip_blkno);
-
status = ocfs2_inode_lock_nested(dir, NULL, 0, OI_LS_PARENT);
if (status < 0) {
if (status != -ENOENT)
@@ -182,7 +180,7 @@ bail_unlock:
bail:
- mlog_exit_ptr(ret);
+ trace_ocfs2_lookup_ret(ret);
return ret;
}
@@ -235,9 +233,9 @@ static int ocfs2_mknod(struct inode *dir,
sigset_t oldset;
int did_block_signals = 0;
- mlog_entry("(0x%p, 0x%p, %d, %lu, '%.*s')\n", dir, dentry, mode,
- (unsigned long)dev, dentry->d_name.len,
- dentry->d_name.name);
+ trace_ocfs2_mknod(dir, dentry, dentry->d_name.len, dentry->d_name.name,
+ (unsigned long long)OCFS2_I(dir)->ip_blkno,
+ (unsigned long)dev, mode);
dquot_initialize(dir);
@@ -354,10 +352,6 @@ static int ocfs2_mknod(struct inode *dir,
goto leave;
did_quota_inode = 1;
- mlog_entry("(0x%p, 0x%p, %d, %lu, '%.*s')\n", dir, dentry,
- inode->i_mode, (unsigned long)dev, dentry->d_name.len,
- dentry->d_name.name);
-
/* do the real work now. */
status = ocfs2_mknod_locked(osb, dir, inode, dev,
&new_fe_bh, parent_fe_bh, handle,
@@ -436,9 +430,6 @@ leave:
if (did_block_signals)
ocfs2_unblock_signals(&oldset);
- if (status == -ENOSPC)
- mlog(0, "Disk is full\n");
-
brelse(new_fe_bh);
brelse(parent_fe_bh);
kfree(si.name);
@@ -466,7 +457,8 @@ leave:
iput(inode);
}
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -577,7 +569,8 @@ leave:
}
}
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -615,10 +608,11 @@ static int ocfs2_mkdir(struct inode *dir,
{
int ret;
- mlog_entry("(0x%p, 0x%p, %d, '%.*s')\n", dir, dentry, mode,
- dentry->d_name.len, dentry->d_name.name);
+ trace_ocfs2_mkdir(dir, dentry, dentry->d_name.len, dentry->d_name.name,
+ OCFS2_I(dir)->ip_blkno, mode);
ret = ocfs2_mknod(dir, dentry, mode | S_IFDIR, 0);
- mlog_exit(ret);
+ if (ret)
+ mlog_errno(ret);
return ret;
}
@@ -630,10 +624,11 @@ static int ocfs2_create(struct inode *dir,
{
int ret;
- mlog_entry("(0x%p, 0x%p, %d, '%.*s')\n", dir, dentry, mode,
- dentry->d_name.len, dentry->d_name.name);
+ trace_ocfs2_create(dir, dentry, dentry->d_name.len, dentry->d_name.name,
+ (unsigned long long)OCFS2_I(dir)->ip_blkno, mode);
ret = ocfs2_mknod(dir, dentry, mode | S_IFREG, 0);
- mlog_exit(ret);
+ if (ret)
+ mlog_errno(ret);
return ret;
}
@@ -652,9 +647,9 @@ static int ocfs2_link(struct dentry *old_dentry,
struct ocfs2_dir_lookup_result lookup = { NULL, };
sigset_t oldset;
- mlog_entry("(inode=%lu, old='%.*s' new='%.*s')\n", inode->i_ino,
- old_dentry->d_name.len, old_dentry->d_name.name,
- dentry->d_name.len, dentry->d_name.name);
+ trace_ocfs2_link((unsigned long long)OCFS2_I(inode)->ip_blkno,
+ old_dentry->d_name.len, old_dentry->d_name.name,
+ dentry->d_name.len, dentry->d_name.name);
if (S_ISDIR(inode->i_mode))
return -EPERM;
@@ -757,7 +752,8 @@ out:
ocfs2_free_dir_lookup_result(&lookup);
- mlog_exit(err);
+ if (err)
+ mlog_errno(err);
return err;
}
@@ -809,19 +805,17 @@ static int ocfs2_unlink(struct inode *dir,
struct ocfs2_dir_lookup_result lookup = { NULL, };
struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
- mlog_entry("(0x%p, 0x%p, '%.*s')\n", dir, dentry,
- dentry->d_name.len, dentry->d_name.name);
+ trace_ocfs2_unlink(dir, dentry, dentry->d_name.len,
+ dentry->d_name.name,
+ (unsigned long long)OCFS2_I(dir)->ip_blkno,
+ (unsigned long long)OCFS2_I(inode)->ip_blkno);
dquot_initialize(dir);
BUG_ON(dentry->d_parent->d_inode != dir);
- mlog(0, "ino = %llu\n", (unsigned long long)OCFS2_I(inode)->ip_blkno);
-
- if (inode == osb->root_inode) {
- mlog(0, "Cannot delete the root directory\n");
+ if (inode == osb->root_inode)
return -EPERM;
- }
status = ocfs2_inode_lock_nested(dir, &parent_node_bh, 1,
OI_LS_PARENT);
@@ -843,9 +837,10 @@ static int ocfs2_unlink(struct inode *dir,
if (OCFS2_I(inode)->ip_blkno != blkno) {
status = -ENOENT;
- mlog(0, "ip_blkno %llu != dirent blkno %llu ip_flags = %x\n",
- (unsigned long long)OCFS2_I(inode)->ip_blkno,
- (unsigned long long)blkno, OCFS2_I(inode)->ip_flags);
+ trace_ocfs2_unlink_noent(
+ (unsigned long long)OCFS2_I(inode)->ip_blkno,
+ (unsigned long long)blkno,
+ OCFS2_I(inode)->ip_flags);
goto leave;
}
@@ -954,7 +949,8 @@ leave:
ocfs2_free_dir_lookup_result(&orphan_insert);
ocfs2_free_dir_lookup_result(&lookup);
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -975,9 +971,8 @@ static int ocfs2_double_lock(struct ocfs2_super *osb,
struct buffer_head **tmpbh;
struct inode *tmpinode;
- mlog_entry("(inode1 = %llu, inode2 = %llu)\n",
- (unsigned long long)oi1->ip_blkno,
- (unsigned long long)oi2->ip_blkno);
+ trace_ocfs2_double_lock((unsigned long long)oi1->ip_blkno,
+ (unsigned long long)oi2->ip_blkno);
if (*bh1)
*bh1 = NULL;
@@ -988,7 +983,6 @@ static int ocfs2_double_lock(struct ocfs2_super *osb,
if (oi1->ip_blkno != oi2->ip_blkno) {
if (oi1->ip_blkno < oi2->ip_blkno) {
/* switch id1 and id2 around */
- mlog(0, "switching them around...\n");
tmpbh = bh2;
bh2 = bh1;
bh1 = tmpbh;
@@ -1024,8 +1018,13 @@ static int ocfs2_double_lock(struct ocfs2_super *osb,
mlog_errno(status);
}
+ trace_ocfs2_double_lock_end(
+ (unsigned long long)OCFS2_I(inode1)->ip_blkno,
+ (unsigned long long)OCFS2_I(inode2)->ip_blkno);
+
bail:
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -1067,10 +1066,9 @@ static int ocfs2_rename(struct inode *old_dir,
/* At some point it might be nice to break this function up a
* bit. */
- mlog_entry("(0x%p, 0x%p, 0x%p, 0x%p, from='%.*s' to='%.*s')\n",
- old_dir, old_dentry, new_dir, new_dentry,
- old_dentry->d_name.len, old_dentry->d_name.name,
- new_dentry->d_name.len, new_dentry->d_name.name);
+ trace_ocfs2_rename(old_dir, old_dentry, new_dir, new_dentry,
+ old_dentry->d_name.len, old_dentry->d_name.name,
+ new_dentry->d_name.len, new_dentry->d_name.name);
dquot_initialize(old_dir);
dquot_initialize(new_dir);
@@ -1227,16 +1225,15 @@ static int ocfs2_rename(struct inode *old_dir,
if (!new_inode) {
status = -EACCES;
- mlog(0, "We found an inode for name %.*s but VFS "
- "didn't give us one.\n", new_dentry->d_name.len,
- new_dentry->d_name.name);
+ trace_ocfs2_rename_target_exists(new_dentry->d_name.len,
+ new_dentry->d_name.name);
goto bail;
}
if (OCFS2_I(new_inode)->ip_blkno != newfe_blkno) {
status = -EACCES;
- mlog(0, "Inode %llu and dir %llu disagree. flags = %x\n",
+ trace_ocfs2_rename_disagree(
(unsigned long long)OCFS2_I(new_inode)->ip_blkno,
(unsigned long long)newfe_blkno,
OCFS2_I(new_inode)->ip_flags);
@@ -1259,8 +1256,7 @@ static int ocfs2_rename(struct inode *old_dir,
newfe = (struct ocfs2_dinode *) newfe_bh->b_data;
- mlog(0, "aha rename over existing... new_blkno=%llu "
- "newfebh=%p bhblocknr=%llu\n",
+ trace_ocfs2_rename_over_existing(
(unsigned long long)newfe_blkno, newfe_bh, newfe_bh ?
(unsigned long long)newfe_bh->b_blocknr : 0ULL);
@@ -1476,7 +1472,8 @@ bail:
brelse(old_dir_bh);
brelse(new_dir_bh);
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -1501,9 +1498,8 @@ static int ocfs2_create_symlink_data(struct ocfs2_super *osb,
* write i_size + 1 bytes. */
blocks = (bytes_left + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
- mlog_entry("i_blocks = %llu, i_size = %llu, blocks = %d\n",
- (unsigned long long)inode->i_blocks,
- i_size_read(inode), blocks);
+ trace_ocfs2_create_symlink_data((unsigned long long)inode->i_blocks,
+ i_size_read(inode), blocks);
/* Sanity check -- make sure we're going to fit. */
if (bytes_left >
@@ -1579,7 +1575,8 @@ bail:
kfree(bhs);
}
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -1610,8 +1607,8 @@ static int ocfs2_symlink(struct inode *dir,
sigset_t oldset;
int did_block_signals = 0;
- mlog_entry("(0x%p, 0x%p, symname='%s' actual='%.*s')\n", dir,
- dentry, symname, dentry->d_name.len, dentry->d_name.name);
+ trace_ocfs2_symlink_begin(dir, dentry, symname,
+ dentry->d_name.len, dentry->d_name.name);
dquot_initialize(dir);
@@ -1713,9 +1710,10 @@ static int ocfs2_symlink(struct inode *dir,
goto bail;
did_quota_inode = 1;
- mlog_entry("(0x%p, 0x%p, %d, '%.*s')\n", dir, dentry,
- inode->i_mode, dentry->d_name.len,
- dentry->d_name.name);
+ trace_ocfs2_symlink_create(dir, dentry, dentry->d_name.len,
+ dentry->d_name.name,
+ (unsigned long long)OCFS2_I(dir)->ip_blkno,
+ inode->i_mode);
status = ocfs2_mknod_locked(osb, dir, inode,
0, &new_fe_bh, parent_fe_bh, handle,
@@ -1835,7 +1833,8 @@ bail:
iput(inode);
}
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -1844,8 +1843,6 @@ static int ocfs2_blkno_stringify(u64 blkno, char *name)
{
int status, namelen;
- mlog_entry_void();
-
namelen = snprintf(name, OCFS2_ORPHAN_NAMELEN + 1, "%016llx",
(long long)blkno);
if (namelen <= 0) {
@@ -1862,12 +1859,12 @@ static int ocfs2_blkno_stringify(u64 blkno, char *name)
goto bail;
}
- mlog(0, "built filename '%s' for orphan dir (len=%d)\n", name,
- namelen);
+ trace_ocfs2_blkno_stringify(blkno, name, namelen);
status = 0;
bail:
- mlog_exit(status);
+ if (status < 0)
+ mlog_errno(status);
return status;
}
@@ -1980,7 +1977,8 @@ out:
iput(orphan_dir_inode);
}
- mlog_exit(ret);
+ if (ret)
+ mlog_errno(ret);
return ret;
}
@@ -1997,7 +1995,8 @@ static int ocfs2_orphan_add(struct ocfs2_super *osb,
struct ocfs2_dinode *orphan_fe;
struct ocfs2_dinode *fe = (struct ocfs2_dinode *) fe_bh->b_data;
- mlog_entry("(inode->i_ino = %lu)\n", inode->i_ino);
+ trace_ocfs2_orphan_add_begin(
+ (unsigned long long)OCFS2_I(inode)->ip_blkno);
status = ocfs2_read_inode_block(orphan_dir_inode, &orphan_dir_bh);
if (status < 0) {
@@ -2056,13 +2055,14 @@ static int ocfs2_orphan_add(struct ocfs2_super *osb,
ocfs2_journal_dirty(handle, fe_bh);
- mlog(0, "Inode %llu orphaned in slot %d\n",
- (unsigned long long)OCFS2_I(inode)->ip_blkno, osb->slot_num);
+ trace_ocfs2_orphan_add_end((unsigned long long)OCFS2_I(inode)->ip_blkno,
+ osb->slot_num);
leave:
brelse(orphan_dir_bh);
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -2078,17 +2078,15 @@ int ocfs2_orphan_del(struct ocfs2_super *osb,
int status = 0;
struct ocfs2_dir_lookup_result lookup = { NULL, };
- mlog_entry_void();
-
status = ocfs2_blkno_stringify(OCFS2_I(inode)->ip_blkno, name);
if (status < 0) {
mlog_errno(status);
goto leave;
}
- mlog(0, "removing '%s' from orphan dir %llu (namelen=%d)\n",
- name, (unsigned long long)OCFS2_I(orphan_dir_inode)->ip_blkno,
- OCFS2_ORPHAN_NAMELEN);
+ trace_ocfs2_orphan_del(
+ (unsigned long long)OCFS2_I(orphan_dir_inode)->ip_blkno,
+ name, OCFS2_ORPHAN_NAMELEN);
/* find it's spot in the orphan directory */
status = ocfs2_find_entry(name, OCFS2_ORPHAN_NAMELEN, orphan_dir_inode,
@@ -2124,12 +2122,13 @@ int ocfs2_orphan_del(struct ocfs2_super *osb,
leave:
ocfs2_free_dir_lookup_result(&lookup);
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
/**
- * ocfs2_prep_new_orphaned_file() - Prepare the orphan dir to recieve a newly
+ * ocfs2_prep_new_orphaned_file() - Prepare the orphan dir to receive a newly
* allocated file. This is different from the typical 'add to orphan dir'
* operation in that the inode does not yet exist. This is a problem because
* the orphan dir stringifies the inode block number to come up with it's
@@ -2321,9 +2320,6 @@ leave:
iput(orphan_dir);
}
- if (status == -ENOSPC)
- mlog(0, "Disk is full\n");
-
if ((status < 0) && inode) {
clear_nlink(inode);
iput(inode);
@@ -2358,8 +2354,10 @@ int ocfs2_mv_orphaned_inode_to_new(struct inode *dir,
struct buffer_head *di_bh = NULL;
struct ocfs2_dir_lookup_result lookup = { NULL, };
- mlog_entry("(0x%p, 0x%p, %.*s')\n", dir, dentry,
- dentry->d_name.len, dentry->d_name.name);
+ trace_ocfs2_mv_orphaned_inode_to_new(dir, dentry,
+ dentry->d_name.len, dentry->d_name.name,
+ (unsigned long long)OCFS2_I(dir)->ip_blkno,
+ (unsigned long long)OCFS2_I(inode)->ip_blkno);
status = ocfs2_inode_lock(dir, &parent_di_bh, 1);
if (status < 0) {
@@ -2476,7 +2474,8 @@ leave:
ocfs2_free_dir_lookup_result(&lookup);
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index 51cd689..4092858 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -147,6 +147,17 @@ struct ocfs2_lock_res_ops;
typedef void (*ocfs2_lock_callback)(int status, unsigned long data);
+#ifdef CONFIG_OCFS2_FS_STATS
+struct ocfs2_lock_stats {
+ u64 ls_total; /* Total wait in NSEC */
+ u32 ls_gets; /* Num acquires */
+ u32 ls_fail; /* Num failed acquires */
+
+ /* Storing max wait in usecs saves 24 bytes per inode */
+ u32 ls_max; /* Max wait in USEC */
+};
+#endif
+
struct ocfs2_lock_res {
void *l_priv;
struct ocfs2_lock_res_ops *l_ops;
@@ -182,15 +193,9 @@ struct ocfs2_lock_res {
struct list_head l_debug_list;
#ifdef CONFIG_OCFS2_FS_STATS
- unsigned long long l_lock_num_prmode; /* PR acquires */
- unsigned long long l_lock_num_exmode; /* EX acquires */
- unsigned int l_lock_num_prmode_failed; /* Failed PR gets */
- unsigned int l_lock_num_exmode_failed; /* Failed EX gets */
- unsigned long long l_lock_total_prmode; /* Tot wait for PR */
- unsigned long long l_lock_total_exmode; /* Tot wait for EX */
- unsigned int l_lock_max_prmode; /* Max wait for PR */
- unsigned int l_lock_max_exmode; /* Max wait for EX */
- unsigned int l_lock_refresh; /* Disk refreshes */
+ struct ocfs2_lock_stats l_lock_prmode; /* PR mode stats */
+ u32 l_lock_refresh; /* Disk refreshes */
+ struct ocfs2_lock_stats l_lock_exmode; /* EX mode stats */
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map l_lockdep_map;
@@ -831,18 +836,18 @@ static inline unsigned int ocfs2_clusters_to_megabytes(struct super_block *sb,
static inline void _ocfs2_set_bit(unsigned int bit, unsigned long *bitmap)
{
- ext2_set_bit(bit, bitmap);
+ __test_and_set_bit_le(bit, bitmap);
}
#define ocfs2_set_bit(bit, addr) _ocfs2_set_bit((bit), (unsigned long *)(addr))
static inline void _ocfs2_clear_bit(unsigned int bit, unsigned long *bitmap)
{
- ext2_clear_bit(bit, bitmap);
+ __test_and_clear_bit_le(bit, bitmap);
}
#define ocfs2_clear_bit(bit, addr) _ocfs2_clear_bit((bit), (unsigned long *)(addr))
-#define ocfs2_test_bit ext2_test_bit
-#define ocfs2_find_next_zero_bit ext2_find_next_zero_bit
-#define ocfs2_find_next_bit ext2_find_next_bit
+#define ocfs2_test_bit test_bit_le
+#define ocfs2_find_next_zero_bit find_next_zero_bit_le
+#define ocfs2_find_next_bit find_next_bit_le
#endif /* OCFS2_H */
diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h
index bf2e776..b68f87a 100644
--- a/fs/ocfs2/ocfs2_fs.h
+++ b/fs/ocfs2/ocfs2_fs.h
@@ -441,7 +441,7 @@ static unsigned char ocfs2_type_by_mode[S_IFMT >> S_SHIFT] = {
struct ocfs2_block_check {
/*00*/ __le32 bc_crc32e; /* 802.3 Ethernet II CRC32 */
__le16 bc_ecc; /* Single-error-correction parity vector.
- This is a simple Hamming code dependant
+ This is a simple Hamming code dependent
on the blocksize. OCFS2's maximum
blocksize, 4K, requires 16 parity bits,
so we fit in __le16. */
@@ -750,7 +750,7 @@ struct ocfs2_dinode {
after an unclean
shutdown */
} journal1;
- } id1; /* Inode type dependant 1 */
+ } id1; /* Inode type dependent 1 */
/*C0*/ union {
struct ocfs2_super_block i_super;
struct ocfs2_local_alloc i_lab;
diff --git a/fs/ocfs2/ocfs2_trace.h b/fs/ocfs2/ocfs2_trace.h
new file mode 100644
index 0000000..a1dae5b
--- /dev/null
+++ b/fs/ocfs2/ocfs2_trace.h
@@ -0,0 +1,2739 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM ocfs2
+
+#if !defined(_TRACE_OCFS2_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_OCFS2_H
+
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(ocfs2__int,
+ TP_PROTO(int num),
+ TP_ARGS(num),
+ TP_STRUCT__entry(
+ __field(int, num)
+ ),
+ TP_fast_assign(
+ __entry->num = num;
+ ),
+ TP_printk("%d", __entry->num)
+);
+
+#define DEFINE_OCFS2_INT_EVENT(name) \
+DEFINE_EVENT(ocfs2__int, name, \
+ TP_PROTO(int num), \
+ TP_ARGS(num))
+
+DECLARE_EVENT_CLASS(ocfs2__uint,
+ TP_PROTO(unsigned int num),
+ TP_ARGS(num),
+ TP_STRUCT__entry(
+ __field( unsigned int, num )
+ ),
+ TP_fast_assign(
+ __entry->num = num;
+ ),
+ TP_printk("%u", __entry->num)
+);
+
+#define DEFINE_OCFS2_UINT_EVENT(name) \
+DEFINE_EVENT(ocfs2__uint, name, \
+ TP_PROTO(unsigned int num), \
+ TP_ARGS(num))
+
+DECLARE_EVENT_CLASS(ocfs2__ull,
+ TP_PROTO(unsigned long long blkno),
+ TP_ARGS(blkno),
+ TP_STRUCT__entry(
+ __field(unsigned long long, blkno)
+ ),
+ TP_fast_assign(
+ __entry->blkno = blkno;
+ ),
+ TP_printk("%llu", __entry->blkno)
+);
+
+#define DEFINE_OCFS2_ULL_EVENT(name) \
+DEFINE_EVENT(ocfs2__ull, name, \
+ TP_PROTO(unsigned long long num), \
+ TP_ARGS(num))
+
+DECLARE_EVENT_CLASS(ocfs2__pointer,
+ TP_PROTO(void *pointer),
+ TP_ARGS(pointer),
+ TP_STRUCT__entry(
+ __field(void *, pointer)
+ ),
+ TP_fast_assign(
+ __entry->pointer = pointer;
+ ),
+ TP_printk("%p", __entry->pointer)
+);
+
+#define DEFINE_OCFS2_POINTER_EVENT(name) \
+DEFINE_EVENT(ocfs2__pointer, name, \
+ TP_PROTO(void *pointer), \
+ TP_ARGS(pointer))
+
+DECLARE_EVENT_CLASS(ocfs2__string,
+ TP_PROTO(const char *name),
+ TP_ARGS(name),
+ TP_STRUCT__entry(
+ __string(name,name)
+ ),
+ TP_fast_assign(
+ __assign_str(name, name);
+ ),
+ TP_printk("%s", __get_str(name))
+);
+
+#define DEFINE_OCFS2_STRING_EVENT(name) \
+DEFINE_EVENT(ocfs2__string, name, \
+ TP_PROTO(const char *name), \
+ TP_ARGS(name))
+
+DECLARE_EVENT_CLASS(ocfs2__int_int,
+ TP_PROTO(int value1, int value2),
+ TP_ARGS(value1, value2),
+ TP_STRUCT__entry(
+ __field(int, value1)
+ __field(int, value2)
+ ),
+ TP_fast_assign(
+ __entry->value1 = value1;
+ __entry->value2 = value2;
+ ),
+ TP_printk("%d %d", __entry->value1, __entry->value2)
+);
+
+#define DEFINE_OCFS2_INT_INT_EVENT(name) \
+DEFINE_EVENT(ocfs2__int_int, name, \
+ TP_PROTO(int val1, int val2), \
+ TP_ARGS(val1, val2))
+
+DECLARE_EVENT_CLASS(ocfs2__uint_int,
+ TP_PROTO(unsigned int value1, int value2),
+ TP_ARGS(value1, value2),
+ TP_STRUCT__entry(
+ __field(unsigned int, value1)
+ __field(int, value2)
+ ),
+ TP_fast_assign(
+ __entry->value1 = value1;
+ __entry->value2 = value2;
+ ),
+ TP_printk("%u %d", __entry->value1, __entry->value2)
+);
+
+#define DEFINE_OCFS2_UINT_INT_EVENT(name) \
+DEFINE_EVENT(ocfs2__uint_int, name, \
+ TP_PROTO(unsigned int val1, int val2), \
+ TP_ARGS(val1, val2))
+
+DECLARE_EVENT_CLASS(ocfs2__uint_uint,
+ TP_PROTO(unsigned int value1, unsigned int value2),
+ TP_ARGS(value1, value2),
+ TP_STRUCT__entry(
+ __field(unsigned int, value1)
+ __field(unsigned int, value2)
+ ),
+ TP_fast_assign(
+ __entry->value1 = value1;
+ __entry->value2 = value2;
+ ),
+ TP_printk("%u %u", __entry->value1, __entry->value2)
+);
+
+#define DEFINE_OCFS2_UINT_UINT_EVENT(name) \
+DEFINE_EVENT(ocfs2__uint_uint, name, \
+ TP_PROTO(unsigned int val1, unsigned int val2), \
+ TP_ARGS(val1, val2))
+
+DECLARE_EVENT_CLASS(ocfs2__ull_uint,
+ TP_PROTO(unsigned long long value1, unsigned int value2),
+ TP_ARGS(value1, value2),
+ TP_STRUCT__entry(
+ __field(unsigned long long, value1)
+ __field(unsigned int, value2)
+ ),
+ TP_fast_assign(
+ __entry->value1 = value1;
+ __entry->value2 = value2;
+ ),
+ TP_printk("%llu %u", __entry->value1, __entry->value2)
+);
+
+#define DEFINE_OCFS2_ULL_UINT_EVENT(name) \
+DEFINE_EVENT(ocfs2__ull_uint, name, \
+ TP_PROTO(unsigned long long val1, unsigned int val2), \
+ TP_ARGS(val1, val2))
+
+DECLARE_EVENT_CLASS(ocfs2__ull_int,
+ TP_PROTO(unsigned long long value1, int value2),
+ TP_ARGS(value1, value2),
+ TP_STRUCT__entry(
+ __field(unsigned long long, value1)
+ __field(int, value2)
+ ),
+ TP_fast_assign(
+ __entry->value1 = value1;
+ __entry->value2 = value2;
+ ),
+ TP_printk("%llu %d", __entry->value1, __entry->value2)
+);
+
+#define DEFINE_OCFS2_ULL_INT_EVENT(name) \
+DEFINE_EVENT(ocfs2__ull_int, name, \
+ TP_PROTO(unsigned long long val1, int val2), \
+ TP_ARGS(val1, val2))
+
+DECLARE_EVENT_CLASS(ocfs2__ull_ull,
+ TP_PROTO(unsigned long long value1, unsigned long long value2),
+ TP_ARGS(value1, value2),
+ TP_STRUCT__entry(
+ __field(unsigned long long, value1)
+ __field(unsigned long long, value2)
+ ),
+ TP_fast_assign(
+ __entry->value1 = value1;
+ __entry->value2 = value2;
+ ),
+ TP_printk("%llu %llu", __entry->value1, __entry->value2)
+);
+
+#define DEFINE_OCFS2_ULL_ULL_EVENT(name) \
+DEFINE_EVENT(ocfs2__ull_ull, name, \
+ TP_PROTO(unsigned long long val1, unsigned long long val2), \
+ TP_ARGS(val1, val2))
+
+DECLARE_EVENT_CLASS(ocfs2__ull_ull_uint,
+ TP_PROTO(unsigned long long value1,
+ unsigned long long value2, unsigned int value3),
+ TP_ARGS(value1, value2, value3),
+ TP_STRUCT__entry(
+ __field(unsigned long long, value1)
+ __field(unsigned long long, value2)
+ __field(unsigned int, value3)
+ ),
+ TP_fast_assign(
+ __entry->value1 = value1;
+ __entry->value2 = value2;
+ __entry->value3 = value3;
+ ),
+ TP_printk("%llu %llu %u",
+ __entry->value1, __entry->value2, __entry->value3)
+);
+
+#define DEFINE_OCFS2_ULL_ULL_UINT_EVENT(name) \
+DEFINE_EVENT(ocfs2__ull_ull_uint, name, \
+ TP_PROTO(unsigned long long val1, \
+ unsigned long long val2, unsigned int val3), \
+ TP_ARGS(val1, val2, val3))
+
+DECLARE_EVENT_CLASS(ocfs2__ull_uint_uint,
+ TP_PROTO(unsigned long long value1,
+ unsigned int value2, unsigned int value3),
+ TP_ARGS(value1, value2, value3),
+ TP_STRUCT__entry(
+ __field(unsigned long long, value1)
+ __field(unsigned int, value2)
+ __field(unsigned int, value3)
+ ),
+ TP_fast_assign(
+ __entry->value1 = value1;
+ __entry->value2 = value2;
+ __entry->value3 = value3;
+ ),
+ TP_printk("%llu %u %u", __entry->value1,
+ __entry->value2, __entry->value3)
+);
+
+#define DEFINE_OCFS2_ULL_UINT_UINT_EVENT(name) \
+DEFINE_EVENT(ocfs2__ull_uint_uint, name, \
+ TP_PROTO(unsigned long long val1, \
+ unsigned int val2, unsigned int val3), \
+ TP_ARGS(val1, val2, val3))
+
+DECLARE_EVENT_CLASS(ocfs2__uint_uint_uint,
+ TP_PROTO(unsigned int value1, unsigned int value2,
+ unsigned int value3),
+ TP_ARGS(value1, value2, value3),
+ TP_STRUCT__entry(
+ __field( unsigned int, value1 )
+ __field( unsigned int, value2 )
+ __field( unsigned int, value3 )
+ ),
+ TP_fast_assign(
+ __entry->value1 = value1;
+ __entry->value2 = value2;
+ __entry->value3 = value3;
+ ),
+ TP_printk("%u %u %u", __entry->value1, __entry->value2, __entry->value3)
+);
+
+#define DEFINE_OCFS2_UINT_UINT_UINT_EVENT(name) \
+DEFINE_EVENT(ocfs2__uint_uint_uint, name, \
+ TP_PROTO(unsigned int value1, unsigned int value2, \
+ unsigned int value3), \
+ TP_ARGS(value1, value2, value3))
+
+DECLARE_EVENT_CLASS(ocfs2__ull_ull_ull,
+ TP_PROTO(unsigned long long value1,
+ unsigned long long value2, unsigned long long value3),
+ TP_ARGS(value1, value2, value3),
+ TP_STRUCT__entry(
+ __field(unsigned long long, value1)
+ __field(unsigned long long, value2)
+ __field(unsigned long long, value3)
+ ),
+ TP_fast_assign(
+ __entry->value1 = value1;
+ __entry->value2 = value2;
+ __entry->value3 = value3;
+ ),
+ TP_printk("%llu %llu %llu",
+ __entry->value1, __entry->value2, __entry->value3)
+);
+
+#define DEFINE_OCFS2_ULL_ULL_ULL_EVENT(name) \
+DEFINE_EVENT(ocfs2__ull_ull_ull, name, \
+ TP_PROTO(unsigned long long value1, unsigned long long value2, \
+ unsigned long long value3), \
+ TP_ARGS(value1, value2, value3))
+
+DECLARE_EVENT_CLASS(ocfs2__ull_int_int_int,
+ TP_PROTO(unsigned long long ull, int value1, int value2, int value3),
+ TP_ARGS(ull, value1, value2, value3),
+ TP_STRUCT__entry(
+ __field( unsigned long long, ull )
+ __field( int, value1 )
+ __field( int, value2 )
+ __field( int, value3 )
+ ),
+ TP_fast_assign(
+ __entry->ull = ull;
+ __entry->value1 = value1;
+ __entry->value2 = value2;
+ __entry->value3 = value3;
+ ),
+ TP_printk("%llu %d %d %d",
+ __entry->ull, __entry->value1,
+ __entry->value2, __entry->value3)
+);
+
+#define DEFINE_OCFS2_ULL_INT_INT_INT_EVENT(name) \
+DEFINE_EVENT(ocfs2__ull_int_int_int, name, \
+ TP_PROTO(unsigned long long ull, int value1, \
+ int value2, int value3), \
+ TP_ARGS(ull, value1, value2, value3))
+
+DECLARE_EVENT_CLASS(ocfs2__ull_uint_uint_uint,
+ TP_PROTO(unsigned long long ull, unsigned int value1,
+ unsigned int value2, unsigned int value3),
+ TP_ARGS(ull, value1, value2, value3),
+ TP_STRUCT__entry(
+ __field(unsigned long long, ull)
+ __field(unsigned int, value1)
+ __field(unsigned int, value2)
+ __field(unsigned int, value3)
+ ),
+ TP_fast_assign(
+ __entry->ull = ull;
+ __entry->value1 = value1;
+ __entry->value2 = value2;
+ __entry->value3 = value3;
+ ),
+ TP_printk("%llu %u %u %u",
+ __entry->ull, __entry->value1,
+ __entry->value2, __entry->value3)
+);
+
+#define DEFINE_OCFS2_ULL_UINT_UINT_UINT_EVENT(name) \
+DEFINE_EVENT(ocfs2__ull_uint_uint_uint, name, \
+ TP_PROTO(unsigned long long ull, unsigned int value1, \
+ unsigned int value2, unsigned int value3), \
+ TP_ARGS(ull, value1, value2, value3))
+
+DECLARE_EVENT_CLASS(ocfs2__ull_ull_uint_uint,
+ TP_PROTO(unsigned long long value1, unsigned long long value2,
+ unsigned int value3, unsigned int value4),
+ TP_ARGS(value1, value2, value3, value4),
+ TP_STRUCT__entry(
+ __field(unsigned long long, value1)
+ __field(unsigned long long, value2)
+ __field(unsigned int, value3)
+ __field(unsigned int, value4)
+ ),
+ TP_fast_assign(
+ __entry->value1 = value1;
+ __entry->value2 = value2;
+ __entry->value3 = value3;
+ __entry->value4 = value4;
+ ),
+ TP_printk("%llu %llu %u %u",
+ __entry->value1, __entry->value2,
+ __entry->value3, __entry->value4)
+);
+
+#define DEFINE_OCFS2_ULL_ULL_UINT_UINT_EVENT(name) \
+DEFINE_EVENT(ocfs2__ull_ull_uint_uint, name, \
+ TP_PROTO(unsigned long long ull, unsigned long long ull1, \
+ unsigned int value2, unsigned int value3), \
+ TP_ARGS(ull, ull1, value2, value3))
+
+/* Trace events for fs/ocfs2/alloc.c. */
+DECLARE_EVENT_CLASS(ocfs2__btree_ops,
+ TP_PROTO(unsigned long long owner,\
+ unsigned int value1, unsigned int value2),
+ TP_ARGS(owner, value1, value2),
+ TP_STRUCT__entry(
+ __field(unsigned long long, owner)
+ __field(unsigned int, value1)
+ __field(unsigned int, value2)
+ ),
+ TP_fast_assign(
+ __entry->owner = owner;
+ __entry->value1 = value1;
+ __entry->value2 = value2;
+ ),
+ TP_printk("%llu %u %u",
+ __entry->owner, __entry->value1, __entry->value2)
+);
+
+#define DEFINE_OCFS2_BTREE_EVENT(name) \
+DEFINE_EVENT(ocfs2__btree_ops, name, \
+ TP_PROTO(unsigned long long owner, \
+ unsigned int value1, unsigned int value2), \
+ TP_ARGS(owner, value1, value2))
+
+DEFINE_OCFS2_BTREE_EVENT(ocfs2_adjust_rightmost_branch);
+
+DEFINE_OCFS2_BTREE_EVENT(ocfs2_rotate_tree_right);
+
+DEFINE_OCFS2_BTREE_EVENT(ocfs2_append_rec_to_path);
+
+DEFINE_OCFS2_BTREE_EVENT(ocfs2_insert_extent_start);
+
+DEFINE_OCFS2_BTREE_EVENT(ocfs2_add_clusters_in_btree);
+
+DEFINE_OCFS2_INT_EVENT(ocfs2_num_free_extents);
+
+DEFINE_OCFS2_INT_EVENT(ocfs2_complete_edge_insert);
+
+TRACE_EVENT(ocfs2_grow_tree,
+ TP_PROTO(unsigned long long owner, int depth),
+ TP_ARGS(owner, depth),
+ TP_STRUCT__entry(
+ __field(unsigned long long, owner)
+ __field(int, depth)
+ ),
+ TP_fast_assign(
+ __entry->owner = owner;
+ __entry->depth = depth;
+ ),
+ TP_printk("%llu %d", __entry->owner, __entry->depth)
+);
+
+TRACE_EVENT(ocfs2_rotate_subtree,
+ TP_PROTO(int subtree_root, unsigned long long blkno,
+ int depth),
+ TP_ARGS(subtree_root, blkno, depth),
+ TP_STRUCT__entry(
+ __field(int, subtree_root)
+ __field(unsigned long long, blkno)
+ __field(int, depth)
+ ),
+ TP_fast_assign(
+ __entry->subtree_root = subtree_root;
+ __entry->blkno = blkno;
+ __entry->depth = depth;
+ ),
+ TP_printk("%d %llu %d", __entry->subtree_root,
+ __entry->blkno, __entry->depth)
+);
+
+TRACE_EVENT(ocfs2_insert_extent,
+ TP_PROTO(unsigned int ins_appending, unsigned int ins_contig,
+ int ins_contig_index, int free_records, int ins_tree_depth),
+ TP_ARGS(ins_appending, ins_contig, ins_contig_index, free_records,
+ ins_tree_depth),
+ TP_STRUCT__entry(
+ __field(unsigned int, ins_appending)
+ __field(unsigned int, ins_contig)
+ __field(int, ins_contig_index)
+ __field(int, free_records)
+ __field(int, ins_tree_depth)
+ ),
+ TP_fast_assign(
+ __entry->ins_appending = ins_appending;
+ __entry->ins_contig = ins_contig;
+ __entry->ins_contig_index = ins_contig_index;
+ __entry->free_records = free_records;
+ __entry->ins_tree_depth = ins_tree_depth;
+ ),
+ TP_printk("%u %u %d %d %d",
+ __entry->ins_appending, __entry->ins_contig,
+ __entry->ins_contig_index, __entry->free_records,
+ __entry->ins_tree_depth)
+);
+
+TRACE_EVENT(ocfs2_split_extent,
+ TP_PROTO(int split_index, unsigned int c_contig_type,
+ unsigned int c_has_empty_extent,
+ unsigned int c_split_covers_rec),
+ TP_ARGS(split_index, c_contig_type,
+ c_has_empty_extent, c_split_covers_rec),
+ TP_STRUCT__entry(
+ __field(int, split_index)
+ __field(unsigned int, c_contig_type)
+ __field(unsigned int, c_has_empty_extent)
+ __field(unsigned int, c_split_covers_rec)
+ ),
+ TP_fast_assign(
+ __entry->split_index = split_index;
+ __entry->c_contig_type = c_contig_type;
+ __entry->c_has_empty_extent = c_has_empty_extent;
+ __entry->c_split_covers_rec = c_split_covers_rec;
+ ),
+ TP_printk("%d %u %u %u", __entry->split_index, __entry->c_contig_type,
+ __entry->c_has_empty_extent, __entry->c_split_covers_rec)
+);
+
+TRACE_EVENT(ocfs2_remove_extent,
+ TP_PROTO(unsigned long long owner, unsigned int cpos,
+ unsigned int len, int index,
+ unsigned int e_cpos, unsigned int clusters),
+ TP_ARGS(owner, cpos, len, index, e_cpos, clusters),
+ TP_STRUCT__entry(
+ __field(unsigned long long, owner)
+ __field(unsigned int, cpos)
+ __field(unsigned int, len)
+ __field(int, index)
+ __field(unsigned int, e_cpos)
+ __field(unsigned int, clusters)
+ ),
+ TP_fast_assign(
+ __entry->owner = owner;
+ __entry->cpos = cpos;
+ __entry->len = len;
+ __entry->index = index;
+ __entry->e_cpos = e_cpos;
+ __entry->clusters = clusters;
+ ),
+ TP_printk("%llu %u %u %d %u %u",
+ __entry->owner, __entry->cpos, __entry->len, __entry->index,
+ __entry->e_cpos, __entry->clusters)
+);
+
+TRACE_EVENT(ocfs2_commit_truncate,
+ TP_PROTO(unsigned long long ino, unsigned int new_cpos,
+ unsigned int clusters, unsigned int depth),
+ TP_ARGS(ino, new_cpos, clusters, depth),
+ TP_STRUCT__entry(
+ __field(unsigned long long, ino)
+ __field(unsigned int, new_cpos)
+ __field(unsigned int, clusters)
+ __field(unsigned int, depth)
+ ),
+ TP_fast_assign(
+ __entry->ino = ino;
+ __entry->new_cpos = new_cpos;
+ __entry->clusters = clusters;
+ __entry->depth = depth;
+ ),
+ TP_printk("%llu %u %u %u",
+ __entry->ino, __entry->new_cpos,
+ __entry->clusters, __entry->depth)
+);
+
+TRACE_EVENT(ocfs2_validate_extent_block,
+ TP_PROTO(unsigned long long blkno),
+ TP_ARGS(blkno),
+ TP_STRUCT__entry(
+ __field(unsigned long long, blkno)
+ ),
+ TP_fast_assign(
+ __entry->blkno = blkno;
+ ),
+ TP_printk("%llu ", __entry->blkno)
+);
+
+TRACE_EVENT(ocfs2_rotate_leaf,
+ TP_PROTO(unsigned int insert_cpos, int insert_index,
+ int has_empty, int next_free,
+ unsigned int l_count),
+ TP_ARGS(insert_cpos, insert_index, has_empty,
+ next_free, l_count),
+ TP_STRUCT__entry(
+ __field(unsigned int, insert_cpos)
+ __field(int, insert_index)
+ __field(int, has_empty)
+ __field(int, next_free)
+ __field(unsigned int, l_count)
+ ),
+ TP_fast_assign(
+ __entry->insert_cpos = insert_cpos;
+ __entry->insert_index = insert_index;
+ __entry->has_empty = has_empty;
+ __entry->next_free = next_free;
+ __entry->l_count = l_count;
+ ),
+ TP_printk("%u %d %d %d %u", __entry->insert_cpos,
+ __entry->insert_index, __entry->has_empty,
+ __entry->next_free, __entry->l_count)
+);
+
+TRACE_EVENT(ocfs2_add_clusters_in_btree_ret,
+ TP_PROTO(int status, int reason, int err),
+ TP_ARGS(status, reason, err),
+ TP_STRUCT__entry(
+ __field(int, status)
+ __field(int, reason)
+ __field(int, err)
+ ),
+ TP_fast_assign(
+ __entry->status = status;
+ __entry->reason = reason;
+ __entry->err = err;
+ ),
+ TP_printk("%d %d %d", __entry->status,
+ __entry->reason, __entry->err)
+);
+
+TRACE_EVENT(ocfs2_mark_extent_written,
+ TP_PROTO(unsigned long long owner, unsigned int cpos,
+ unsigned int len, unsigned int phys),
+ TP_ARGS(owner, cpos, len, phys),
+ TP_STRUCT__entry(
+ __field(unsigned long long, owner)
+ __field(unsigned int, cpos)
+ __field(unsigned int, len)
+ __field(unsigned int, phys)
+ ),
+ TP_fast_assign(
+ __entry->owner = owner;
+ __entry->cpos = cpos;
+ __entry->len = len;
+ __entry->phys = phys;
+ ),
+ TP_printk("%llu %u %u %u",
+ __entry->owner, __entry->cpos,
+ __entry->len, __entry->phys)
+);
+
+DECLARE_EVENT_CLASS(ocfs2__truncate_log_ops,
+ TP_PROTO(unsigned long long blkno, int index,
+ unsigned int start, unsigned int num),
+ TP_ARGS(blkno, index, start, num),
+ TP_STRUCT__entry(
+ __field(unsigned long long, blkno)
+ __field(int, index)
+ __field(unsigned int, start)
+ __field(unsigned int, num)
+ ),
+ TP_fast_assign(
+ __entry->blkno = blkno;
+ __entry->index = index;
+ __entry->start = start;
+ __entry->num = num;
+ ),
+ TP_printk("%llu %d %u %u",
+ __entry->blkno, __entry->index,
+ __entry->start, __entry->num)
+);
+
+#define DEFINE_OCFS2_TRUNCATE_LOG_OPS_EVENT(name) \
+DEFINE_EVENT(ocfs2__truncate_log_ops, name, \
+ TP_PROTO(unsigned long long blkno, int index, \
+ unsigned int start, unsigned int num), \
+ TP_ARGS(blkno, index, start, num))
+
+DEFINE_OCFS2_TRUNCATE_LOG_OPS_EVENT(ocfs2_truncate_log_append);
+
+DEFINE_OCFS2_TRUNCATE_LOG_OPS_EVENT(ocfs2_replay_truncate_records);
+
+DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_flush_truncate_log);
+
+DEFINE_OCFS2_INT_EVENT(ocfs2_begin_truncate_log_recovery);
+
+DEFINE_OCFS2_INT_EVENT(ocfs2_truncate_log_recovery_num);
+
+DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_complete_truncate_log_recovery);
+
+DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_free_cached_blocks);
+
+DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_cache_cluster_dealloc);
+
+DEFINE_OCFS2_INT_INT_EVENT(ocfs2_run_deallocs);
+
+TRACE_EVENT(ocfs2_cache_block_dealloc,
+ TP_PROTO(int type, int slot, unsigned long long suballoc,
+ unsigned long long blkno, unsigned int bit),
+ TP_ARGS(type, slot, suballoc, blkno, bit),
+ TP_STRUCT__entry(
+ __field(int, type)
+ __field(int, slot)
+ __field(unsigned long long, suballoc)
+ __field(unsigned long long, blkno)
+ __field(unsigned int, bit)
+ ),
+ TP_fast_assign(
+ __entry->type = type;
+ __entry->slot = slot;
+ __entry->suballoc = suballoc;
+ __entry->blkno = blkno;
+ __entry->bit = bit;
+ ),
+ TP_printk("%d %d %llu %llu %u",
+ __entry->type, __entry->slot, __entry->suballoc,
+ __entry->blkno, __entry->bit)
+);
+
+/* End of trace events for fs/ocfs2/alloc.c. */
+
+/* Trace events for fs/ocfs2/localalloc.c. */
+
+DEFINE_OCFS2_UINT_UINT_UINT_EVENT(ocfs2_la_set_sizes);
+
+DEFINE_OCFS2_ULL_INT_INT_INT_EVENT(ocfs2_alloc_should_use_local);
+
+DEFINE_OCFS2_INT_EVENT(ocfs2_load_local_alloc);
+
+DEFINE_OCFS2_INT_EVENT(ocfs2_begin_local_alloc_recovery);
+
+DEFINE_OCFS2_ULL_INT_INT_INT_EVENT(ocfs2_reserve_local_alloc_bits);
+
+DEFINE_OCFS2_UINT_EVENT(ocfs2_local_alloc_count_bits);
+
+DEFINE_OCFS2_INT_INT_EVENT(ocfs2_local_alloc_find_clear_bits_search_bitmap);
+
+DEFINE_OCFS2_ULL_INT_INT_INT_EVENT(ocfs2_local_alloc_find_clear_bits);
+
+DEFINE_OCFS2_INT_INT_EVENT(ocfs2_sync_local_to_main);
+
+TRACE_EVENT(ocfs2_sync_local_to_main_free,
+ TP_PROTO(int count, int bit, unsigned long long start_blk,
+ unsigned long long blkno),
+ TP_ARGS(count, bit, start_blk, blkno),
+ TP_STRUCT__entry(
+ __field(int, count)
+ __field(int, bit)
+ __field(unsigned long long, start_blk)
+ __field(unsigned long long, blkno)
+ ),
+ TP_fast_assign(
+ __entry->count = count;
+ __entry->bit = bit;
+ __entry->start_blk = start_blk;
+ __entry->blkno = blkno;
+ ),
+ TP_printk("%d %d %llu %llu",
+ __entry->count, __entry->bit, __entry->start_blk,
+ __entry->blkno)
+);
+
+DEFINE_OCFS2_INT_INT_EVENT(ocfs2_local_alloc_new_window);
+
+DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_local_alloc_new_window_result);
+
+/* End of trace events for fs/ocfs2/localalloc.c. */
+
+/* Trace events for fs/ocfs2/resize.c. */
+
+DEFINE_OCFS2_UINT_UINT_EVENT(ocfs2_update_last_group_and_inode);
+
+DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_group_extend);
+
+DEFINE_OCFS2_ULL_UINT_UINT_UINT_EVENT(ocfs2_group_add);
+
+/* End of trace events for fs/ocfs2/resize.c. */
+
+/* Trace events for fs/ocfs2/suballoc.c. */
+
+DEFINE_OCFS2_ULL_EVENT(ocfs2_validate_group_descriptor);
+
+DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_block_group_alloc_contig);
+
+DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_block_group_alloc_discontig);
+
+DEFINE_OCFS2_ULL_EVENT(ocfs2_block_group_alloc);
+
+DEFINE_OCFS2_UINT_UINT_EVENT(ocfs2_reserve_suballoc_bits_nospc);
+
+DEFINE_OCFS2_UINT_UINT_UINT_EVENT(ocfs2_reserve_suballoc_bits_no_new_group);
+
+DEFINE_OCFS2_ULL_EVENT(ocfs2_reserve_new_inode_new_group);
+
+DEFINE_OCFS2_UINT_UINT_EVENT(ocfs2_block_group_set_bits);
+
+TRACE_EVENT(ocfs2_relink_block_group,
+ TP_PROTO(unsigned long long i_blkno, unsigned int chain,
+ unsigned long long bg_blkno,
+ unsigned long long prev_blkno),
+ TP_ARGS(i_blkno, chain, bg_blkno, prev_blkno),
+ TP_STRUCT__entry(
+ __field(unsigned long long, i_blkno)
+ __field(unsigned int, chain)
+ __field(unsigned long long, bg_blkno)
+ __field(unsigned long long, prev_blkno)
+ ),
+ TP_fast_assign(
+ __entry->i_blkno = i_blkno;
+ __entry->chain = chain;
+ __entry->bg_blkno = bg_blkno;
+ __entry->prev_blkno = prev_blkno;
+ ),
+ TP_printk("%llu %u %llu %llu",
+ __entry->i_blkno, __entry->chain, __entry->bg_blkno,
+ __entry->prev_blkno)
+);
+
+DEFINE_OCFS2_ULL_UINT_UINT_UINT_EVENT(ocfs2_cluster_group_search_wrong_max_bits);
+
+DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_cluster_group_search_max_block);
+
+DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_block_group_search_max_block);
+
+DEFINE_OCFS2_ULL_UINT_UINT_EVENT(ocfs2_search_chain_begin);
+
+DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_search_chain_succ);
+
+DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_search_chain_end);
+
+DEFINE_OCFS2_UINT_EVENT(ocfs2_claim_suballoc_bits);
+
+DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_claim_new_inode_at_loc);
+
+DEFINE_OCFS2_UINT_UINT_EVENT(ocfs2_block_group_clear_bits);
+
+TRACE_EVENT(ocfs2_free_suballoc_bits,
+ TP_PROTO(unsigned long long inode, unsigned long long group,
+ unsigned int start_bit, unsigned int count),
+ TP_ARGS(inode, group, start_bit, count),
+ TP_STRUCT__entry(
+ __field(unsigned long long, inode)
+ __field(unsigned long long, group)
+ __field(unsigned int, start_bit)
+ __field(unsigned int, count)
+ ),
+ TP_fast_assign(
+ __entry->inode = inode;
+ __entry->group = group;
+ __entry->start_bit = start_bit;
+ __entry->count = count;
+ ),
+ TP_printk("%llu %llu %u %u", __entry->inode, __entry->group,
+ __entry->start_bit, __entry->count)
+);
+
+TRACE_EVENT(ocfs2_free_clusters,
+ TP_PROTO(unsigned long long bg_blkno, unsigned long long start_blk,
+ unsigned int start_bit, unsigned int count),
+ TP_ARGS(bg_blkno, start_blk, start_bit, count),
+ TP_STRUCT__entry(
+ __field(unsigned long long, bg_blkno)
+ __field(unsigned long long, start_blk)
+ __field(unsigned int, start_bit)
+ __field(unsigned int, count)
+ ),
+ TP_fast_assign(
+ __entry->bg_blkno = bg_blkno;
+ __entry->start_blk = start_blk;
+ __entry->start_bit = start_bit;
+ __entry->count = count;
+ ),
+ TP_printk("%llu %llu %u %u", __entry->bg_blkno, __entry->start_blk,
+ __entry->start_bit, __entry->count)
+);
+
+DEFINE_OCFS2_ULL_EVENT(ocfs2_get_suballoc_slot_bit);
+
+DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_test_suballoc_bit);
+
+DEFINE_OCFS2_ULL_EVENT(ocfs2_test_inode_bit);
+
+/* End of trace events for fs/ocfs2/suballoc.c. */
+
+/* Trace events for fs/ocfs2/refcounttree.c. */
+
+DEFINE_OCFS2_ULL_EVENT(ocfs2_validate_refcount_block);
+
+DEFINE_OCFS2_ULL_EVENT(ocfs2_purge_refcount_trees);
+
+DEFINE_OCFS2_ULL_EVENT(ocfs2_create_refcount_tree);
+
+DEFINE_OCFS2_ULL_EVENT(ocfs2_create_refcount_tree_blkno);
+
+DEFINE_OCFS2_ULL_INT_INT_INT_EVENT(ocfs2_change_refcount_rec);
+
+DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_expand_inline_ref_root);
+
+DEFINE_OCFS2_ULL_UINT_UINT_EVENT(ocfs2_divide_leaf_refcount_block);
+
+DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_new_leaf_refcount_block);
+
+DECLARE_EVENT_CLASS(ocfs2__refcount_tree_ops,
+ TP_PROTO(unsigned long long blkno, int index,
+ unsigned long long cpos,
+ unsigned int clusters, unsigned int refcount),
+ TP_ARGS(blkno, index, cpos, clusters, refcount),
+ TP_STRUCT__entry(
+ __field(unsigned long long, blkno)
+ __field(int, index)
+ __field(unsigned long long, cpos)
+ __field(unsigned int, clusters)
+ __field(unsigned int, refcount)
+ ),
+ TP_fast_assign(
+ __entry->blkno = blkno;
+ __entry->index = index;
+ __entry->cpos = cpos;
+ __entry->clusters = clusters;
+ __entry->refcount = refcount;
+ ),
+ TP_printk("%llu %d %llu %u %u", __entry->blkno, __entry->index,
+ __entry->cpos, __entry->clusters, __entry->refcount)
+);
+
+#define DEFINE_OCFS2_REFCOUNT_TREE_OPS_EVENT(name) \
+DEFINE_EVENT(ocfs2__refcount_tree_ops, name, \
+ TP_PROTO(unsigned long long blkno, int index, \
+ unsigned long long cpos, \
+ unsigned int count, unsigned int refcount), \
+ TP_ARGS(blkno, index, cpos, count, refcount))
+
+DEFINE_OCFS2_REFCOUNT_TREE_OPS_EVENT(ocfs2_insert_refcount_rec);
+
+TRACE_EVENT(ocfs2_split_refcount_rec,
+ TP_PROTO(unsigned long long cpos,
+ unsigned int clusters, unsigned int refcount,
+ unsigned long long split_cpos,
+ unsigned int split_clusters, unsigned int split_refcount),
+ TP_ARGS(cpos, clusters, refcount,
+ split_cpos, split_clusters, split_refcount),
+ TP_STRUCT__entry(
+ __field(unsigned long long, cpos)
+ __field(unsigned int, clusters)
+ __field(unsigned int, refcount)
+ __field(unsigned long long, split_cpos)
+ __field(unsigned int, split_clusters)
+ __field(unsigned int, split_refcount)
+ ),
+ TP_fast_assign(
+ __entry->cpos = cpos;
+ __entry->clusters = clusters;
+ __entry->refcount = refcount;
+ __entry->split_cpos = split_cpos;
+ __entry->split_clusters = split_clusters;
+ __entry->split_refcount = split_refcount;
+ ),
+ TP_printk("%llu %u %u %llu %u %u",
+ __entry->cpos, __entry->clusters, __entry->refcount,
+ __entry->split_cpos, __entry->split_clusters,
+ __entry->split_refcount)
+);
+
+DEFINE_OCFS2_REFCOUNT_TREE_OPS_EVENT(ocfs2_split_refcount_rec_insert);
+
+DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_increase_refcount_begin);
+
+DEFINE_OCFS2_ULL_UINT_UINT_EVENT(ocfs2_increase_refcount_change);
+
+DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_increase_refcount_insert);
+
+DEFINE_OCFS2_ULL_UINT_UINT_EVENT(ocfs2_increase_refcount_split);
+
+DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_remove_refcount_extent);
+
+DEFINE_OCFS2_ULL_EVENT(ocfs2_restore_refcount_block);
+
+DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_decrease_refcount_rec);
+
+TRACE_EVENT(ocfs2_decrease_refcount,
+ TP_PROTO(unsigned long long owner,
+ unsigned long long cpos,
+ unsigned int len, int delete),
+ TP_ARGS(owner, cpos, len, delete),
+ TP_STRUCT__entry(
+ __field(unsigned long long, owner)
+ __field(unsigned long long, cpos)
+ __field(unsigned int, len)
+ __field(int, delete)
+ ),
+ TP_fast_assign(
+ __entry->owner = owner;
+ __entry->cpos = cpos;
+ __entry->len = len;
+ __entry->delete = delete;
+ ),
+ TP_printk("%llu %llu %u %d",
+ __entry->owner, __entry->cpos, __entry->len, __entry->delete)
+);
+
+DEFINE_OCFS2_ULL_UINT_UINT_UINT_EVENT(ocfs2_mark_extent_refcounted);
+
+DEFINE_OCFS2_ULL_UINT_UINT_UINT_EVENT(ocfs2_calc_refcount_meta_credits);
+
+TRACE_EVENT(ocfs2_calc_refcount_meta_credits_iterate,
+ TP_PROTO(int recs_add, unsigned long long cpos,
+ unsigned int clusters, unsigned long long r_cpos,
+ unsigned int r_clusters, unsigned int refcount, int index),
+ TP_ARGS(recs_add, cpos, clusters, r_cpos, r_clusters, refcount, index),
+ TP_STRUCT__entry(
+ __field(int, recs_add)
+ __field(unsigned long long, cpos)
+ __field(unsigned int, clusters)
+ __field(unsigned long long, r_cpos)
+ __field(unsigned int, r_clusters)
+ __field(unsigned int, refcount)
+ __field(int, index)
+ ),
+ TP_fast_assign(
+ __entry->recs_add = recs_add;
+ __entry->cpos = cpos;
+ __entry->clusters = clusters;
+ __entry->r_cpos = r_cpos;
+ __entry->r_clusters = r_clusters;
+ __entry->refcount = refcount;
+ __entry->index = index;
+ ),
+ TP_printk("%d %llu %u %llu %u %u %d",
+ __entry->recs_add, __entry->cpos, __entry->clusters,
+ __entry->r_cpos, __entry->r_clusters,
+ __entry->refcount, __entry->index)
+);
+
+DEFINE_OCFS2_INT_INT_EVENT(ocfs2_add_refcount_flag);
+
+DEFINE_OCFS2_INT_INT_EVENT(ocfs2_prepare_refcount_change_for_del);
+
+DEFINE_OCFS2_INT_INT_EVENT(ocfs2_lock_refcount_allocators);
+
+DEFINE_OCFS2_ULL_UINT_UINT_UINT_EVENT(ocfs2_duplicate_clusters_by_page);
+
+DEFINE_OCFS2_ULL_UINT_UINT_UINT_EVENT(ocfs2_duplicate_clusters_by_jbd);
+
+TRACE_EVENT(ocfs2_clear_ext_refcount,
+ TP_PROTO(unsigned long long ino, unsigned int cpos,
+ unsigned int len, unsigned int p_cluster,
+ unsigned int ext_flags),
+ TP_ARGS(ino, cpos, len, p_cluster, ext_flags),
+ TP_STRUCT__entry(
+ __field(unsigned long long, ino)
+ __field(unsigned int, cpos)
+ __field(unsigned int, len)
+ __field(unsigned int, p_cluster)
+ __field(unsigned int, ext_flags)
+ ),
+ TP_fast_assign(
+ __entry->ino = ino;
+ __entry->cpos = cpos;
+ __entry->len = len;
+ __entry->p_cluster = p_cluster;
+ __entry->ext_flags = ext_flags;
+ ),
+ TP_printk("%llu %u %u %u %u",
+ __entry->ino, __entry->cpos, __entry->len,
+ __entry->p_cluster, __entry->ext_flags)
+);
+
+TRACE_EVENT(ocfs2_replace_clusters,
+ TP_PROTO(unsigned long long ino, unsigned int cpos,
+ unsigned int old, unsigned int new, unsigned int len,
+ unsigned int ext_flags),
+ TP_ARGS(ino, cpos, old, new, len, ext_flags),
+ TP_STRUCT__entry(
+ __field(unsigned long long, ino)
+ __field(unsigned int, cpos)
+ __field(unsigned int, old)
+ __field(unsigned int, new)
+ __field(unsigned int, len)
+ __field(unsigned int, ext_flags)
+ ),
+ TP_fast_assign(
+ __entry->ino = ino;
+ __entry->cpos = cpos;
+ __entry->old = old;
+ __entry->new = new;
+ __entry->len = len;
+ __entry->ext_flags = ext_flags;
+ ),
+ TP_printk("%llu %u %u %u %u %u",
+ __entry->ino, __entry->cpos, __entry->old, __entry->new,
+ __entry->len, __entry->ext_flags)
+);
+
+DEFINE_OCFS2_ULL_UINT_UINT_UINT_EVENT(ocfs2_make_clusters_writable);
+
+TRACE_EVENT(ocfs2_refcount_cow_hunk,
+ TP_PROTO(unsigned long long ino, unsigned int cpos,
+ unsigned int write_len, unsigned int max_cpos,
+ unsigned int cow_start, unsigned int cow_len),
+ TP_ARGS(ino, cpos, write_len, max_cpos, cow_start, cow_len),
+ TP_STRUCT__entry(
+ __field(unsigned long long, ino)
+ __field(unsigned int, cpos)
+ __field(unsigned int, write_len)
+ __field(unsigned int, max_cpos)
+ __field(unsigned int, cow_start)
+ __field(unsigned int, cow_len)
+ ),
+ TP_fast_assign(
+ __entry->ino = ino;
+ __entry->cpos = cpos;
+ __entry->write_len = write_len;
+ __entry->max_cpos = max_cpos;
+ __entry->cow_start = cow_start;
+ __entry->cow_len = cow_len;
+ ),
+ TP_printk("%llu %u %u %u %u %u",
+ __entry->ino, __entry->cpos, __entry->write_len,
+ __entry->max_cpos, __entry->cow_start, __entry->cow_len)
+);
+
+/* End of trace events for fs/ocfs2/refcounttree.c. */
+
+/* Trace events for fs/ocfs2/aops.c. */
+
+DECLARE_EVENT_CLASS(ocfs2__get_block,
+ TP_PROTO(unsigned long long ino, unsigned long long iblock,
+ void *bh_result, int create),
+ TP_ARGS(ino, iblock, bh_result, create),
+ TP_STRUCT__entry(
+ __field(unsigned long long, ino)
+ __field(unsigned long long, iblock)
+ __field(void *, bh_result)
+ __field(int, create)
+ ),
+ TP_fast_assign(
+ __entry->ino = ino;
+ __entry->iblock = iblock;
+ __entry->bh_result = bh_result;
+ __entry->create = create;
+ ),
+ TP_printk("%llu %llu %p %d",
+ __entry->ino, __entry->iblock,
+ __entry->bh_result, __entry->create)
+);
+
+#define DEFINE_OCFS2_GET_BLOCK_EVENT(name) \
+DEFINE_EVENT(ocfs2__get_block, name, \
+ TP_PROTO(unsigned long long ino, unsigned long long iblock, \
+ void *bh_result, int create), \
+ TP_ARGS(ino, iblock, bh_result, create))
+
+DEFINE_OCFS2_GET_BLOCK_EVENT(ocfs2_symlink_get_block);
+
+DEFINE_OCFS2_GET_BLOCK_EVENT(ocfs2_get_block);
+
+DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_get_block_end);
+
+DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_readpage);
+
+DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_writepage);
+
+DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_bmap);
+
+TRACE_EVENT(ocfs2_try_to_write_inline_data,
+ TP_PROTO(unsigned long long ino, unsigned int len,
+ unsigned long long pos, unsigned int flags),
+ TP_ARGS(ino, len, pos, flags),
+ TP_STRUCT__entry(
+ __field(unsigned long long, ino)
+ __field(unsigned int, len)
+ __field(unsigned long long, pos)
+ __field(unsigned int, flags)
+ ),
+ TP_fast_assign(
+ __entry->ino = ino;
+ __entry->len = len;
+ __entry->pos = pos;
+ __entry->flags = flags;
+ ),
+ TP_printk("%llu %u %llu 0x%x",
+ __entry->ino, __entry->len, __entry->pos, __entry->flags)
+);
+
+TRACE_EVENT(ocfs2_write_begin_nolock,
+ TP_PROTO(unsigned long long ino,
+ long long i_size, unsigned int i_clusters,
+ unsigned long long pos, unsigned int len,
+ unsigned int flags, void *page,
+ unsigned int clusters, unsigned int extents_to_split),
+ TP_ARGS(ino, i_size, i_clusters, pos, len, flags,
+ page, clusters, extents_to_split),
+ TP_STRUCT__entry(
+ __field(unsigned long long, ino)
+ __field(long long, i_size)
+ __field(unsigned int, i_clusters)
+ __field(unsigned long long, pos)
+ __field(unsigned int, len)
+ __field(unsigned int, flags)
+ __field(void *, page)
+ __field(unsigned int, clusters)
+ __field(unsigned int, extents_to_split)
+ ),
+ TP_fast_assign(
+ __entry->ino = ino;
+ __entry->i_size = i_size;
+ __entry->i_clusters = i_clusters;
+ __entry->pos = pos;
+ __entry->len = len;
+ __entry->flags = flags;
+ __entry->page = page;
+ __entry->clusters = clusters;
+ __entry->extents_to_split = extents_to_split;
+ ),
+ TP_printk("%llu %lld %u %llu %u %u %p %u %u",
+ __entry->ino, __entry->i_size, __entry->i_clusters,
+ __entry->pos, __entry->len,
+ __entry->flags, __entry->page, __entry->clusters,
+ __entry->extents_to_split)
+);
+
+TRACE_EVENT(ocfs2_write_end_inline,
+ TP_PROTO(unsigned long long ino,
+ unsigned long long pos, unsigned int copied,
+ unsigned int id_count, unsigned int features),
+ TP_ARGS(ino, pos, copied, id_count, features),
+ TP_STRUCT__entry(
+ __field(unsigned long long, ino)
+ __field(unsigned long long, pos)
+ __field(unsigned int, copied)
+ __field(unsigned int, id_count)
+ __field(unsigned int, features)
+ ),
+ TP_fast_assign(
+ __entry->ino = ino;
+ __entry->pos = pos;
+ __entry->copied = copied;
+ __entry->id_count = id_count;
+ __entry->features = features;
+ ),
+ TP_printk("%llu %llu %u %u %u",
+ __entry->ino, __entry->pos, __entry->copied,
+ __entry->id_count, __entry->features)
+);
+
+/* End of trace events for fs/ocfs2/aops.c. */
+
+/* Trace events for fs/ocfs2/mmap.c. */
+
+TRACE_EVENT(ocfs2_fault,
+ TP_PROTO(unsigned long long ino,
+ void *area, void *page, unsigned long pgoff),
+ TP_ARGS(ino, area, page, pgoff),
+ TP_STRUCT__entry(
+ __field(unsigned long long, ino)
+ __field(void *, area)
+ __field(void *, page)
+ __field(unsigned long, pgoff)
+ ),
+ TP_fast_assign(
+ __entry->ino = ino;
+ __entry->area = area;
+ __entry->page = page;
+ __entry->pgoff = pgoff;
+ ),
+ TP_printk("%llu %p %p %lu",
+ __entry->ino, __entry->area, __entry->page, __entry->pgoff)
+);
+
+/* End of trace events for fs/ocfs2/mmap.c. */
+
+/* Trace events for fs/ocfs2/file.c. */
+
+DECLARE_EVENT_CLASS(ocfs2__file_ops,
+ TP_PROTO(void *inode, void *file, void *dentry,
+ unsigned long long ino,
+ unsigned int d_len, const unsigned char *d_name,
+ unsigned long long para),
+ TP_ARGS(inode, file, dentry, ino, d_len, d_name, para),
+ TP_STRUCT__entry(
+ __field(void *, inode)
+ __field(void *, file)
+ __field(void *, dentry)
+ __field(unsigned long long, ino)
+ __field(unsigned int, d_len)
+ __string(d_name, d_name)
+ __field(unsigned long long, para)
+ ),
+ TP_fast_assign(
+ __entry->inode = inode;
+ __entry->file = file;
+ __entry->dentry = dentry;
+ __entry->ino = ino;
+ __entry->d_len = d_len;
+ __assign_str(d_name, d_name);
+ __entry->para = para;
+ ),
+ TP_printk("%p %p %p %llu %llu %.*s", __entry->inode, __entry->file,
+ __entry->dentry, __entry->ino, __entry->para,
+ __entry->d_len, __get_str(d_name))
+);
+
+#define DEFINE_OCFS2_FILE_OPS(name) \
+DEFINE_EVENT(ocfs2__file_ops, name, \
+TP_PROTO(void *inode, void *file, void *dentry, \
+ unsigned long long ino, \
+ unsigned int d_len, const unsigned char *d_name, \
+ unsigned long long mode), \
+ TP_ARGS(inode, file, dentry, ino, d_len, d_name, mode))
+
+DEFINE_OCFS2_FILE_OPS(ocfs2_file_open);
+
+DEFINE_OCFS2_FILE_OPS(ocfs2_file_release);
+
+DEFINE_OCFS2_FILE_OPS(ocfs2_sync_file);
+
+DEFINE_OCFS2_FILE_OPS(ocfs2_file_aio_write);
+
+DEFINE_OCFS2_FILE_OPS(ocfs2_file_splice_write);
+
+DEFINE_OCFS2_FILE_OPS(ocfs2_file_splice_read);
+
+DEFINE_OCFS2_FILE_OPS(ocfs2_file_aio_read);
+
+DEFINE_OCFS2_ULL_ULL_ULL_EVENT(ocfs2_truncate_file);
+
+DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_truncate_file_error);
+
+TRACE_EVENT(ocfs2_extend_allocation,
+ TP_PROTO(unsigned long long ip_blkno, unsigned long long size,
+ unsigned int clusters, unsigned int clusters_to_add,
+ int why, int restart_func),
+ TP_ARGS(ip_blkno, size, clusters, clusters_to_add, why, restart_func),
+ TP_STRUCT__entry(
+ __field(unsigned long long, ip_blkno)
+ __field(unsigned long long, size)
+ __field(unsigned int, clusters)
+ __field(unsigned int, clusters_to_add)
+ __field(int, why)
+ __field(int, restart_func)
+ ),
+ TP_fast_assign(
+ __entry->ip_blkno = ip_blkno;
+ __entry->size = size;
+ __entry->clusters = clusters;
+ __entry->clusters_to_add = clusters_to_add;
+ __entry->why = why;
+ __entry->restart_func = restart_func;
+ ),
+ TP_printk("%llu %llu %u %u %d %d",
+ __entry->ip_blkno, __entry->size, __entry->clusters,
+ __entry->clusters_to_add, __entry->why, __entry->restart_func)
+);
+
+TRACE_EVENT(ocfs2_extend_allocation_end,
+ TP_PROTO(unsigned long long ino,
+ unsigned int di_clusters, unsigned long long di_size,
+ unsigned int ip_clusters, unsigned long long i_size),
+ TP_ARGS(ino, di_clusters, di_size, ip_clusters, i_size),
+ TP_STRUCT__entry(
+ __field(unsigned long long, ino)
+ __field(unsigned int, di_clusters)
+ __field(unsigned long long, di_size)
+ __field(unsigned int, ip_clusters)
+ __field(unsigned long long, i_size)
+ ),
+ TP_fast_assign(
+ __entry->ino = ino;
+ __entry->di_clusters = di_clusters;
+ __entry->di_size = di_size;
+ __entry->ip_clusters = ip_clusters;
+ __entry->i_size = i_size;
+ ),
+ TP_printk("%llu %u %llu %u %llu", __entry->ino, __entry->di_clusters,
+ __entry->di_size, __entry->ip_clusters, __entry->i_size)
+);
+
+TRACE_EVENT(ocfs2_write_zero_page,
+ TP_PROTO(unsigned long long ino,
+ unsigned long long abs_from, unsigned long long abs_to,
+ unsigned long index, unsigned int zero_from,
+ unsigned int zero_to),
+ TP_ARGS(ino, abs_from, abs_to, index, zero_from, zero_to),
+ TP_STRUCT__entry(
+ __field(unsigned long long, ino)
+ __field(unsigned long long, abs_from)
+ __field(unsigned long long, abs_to)
+ __field(unsigned long, index)
+ __field(unsigned int, zero_from)
+ __field(unsigned int, zero_to)
+ ),
+ TP_fast_assign(
+ __entry->ino = ino;
+ __entry->abs_from = abs_from;
+ __entry->abs_to = abs_to;
+ __entry->index = index;
+ __entry->zero_from = zero_from;
+ __entry->zero_to = zero_to;
+ ),
+ TP_printk("%llu %llu %llu %lu %u %u", __entry->ino,
+ __entry->abs_from, __entry->abs_to,
+ __entry->index, __entry->zero_from, __entry->zero_to)
+);
+
+DEFINE_OCFS2_ULL_ULL_ULL_EVENT(ocfs2_zero_extend_range);
+
+DEFINE_OCFS2_ULL_ULL_ULL_EVENT(ocfs2_zero_extend);
+
+TRACE_EVENT(ocfs2_setattr,
+ TP_PROTO(void *inode, void *dentry,
+ unsigned long long ino,
+ unsigned int d_len, const unsigned char *d_name,
+ unsigned int ia_valid, unsigned int ia_mode,
+ unsigned int ia_uid, unsigned int ia_gid),
+ TP_ARGS(inode, dentry, ino, d_len, d_name,
+ ia_valid, ia_mode, ia_uid, ia_gid),
+ TP_STRUCT__entry(
+ __field(void *, inode)
+ __field(void *, dentry)
+ __field(unsigned long long, ino)
+ __field(unsigned int, d_len)
+ __string(d_name, d_name)
+ __field(unsigned int, ia_valid)
+ __field(unsigned int, ia_mode)
+ __field(unsigned int, ia_uid)
+ __field(unsigned int, ia_gid)
+ ),
+ TP_fast_assign(
+ __entry->inode = inode;
+ __entry->dentry = dentry;
+ __entry->ino = ino;
+ __entry->d_len = d_len;
+ __assign_str(d_name, d_name);
+ __entry->ia_valid = ia_valid;
+ __entry->ia_mode = ia_mode;
+ __entry->ia_uid = ia_uid;
+ __entry->ia_gid = ia_gid;
+ ),
+ TP_printk("%p %p %llu %.*s %u %u %u %u", __entry->inode,
+ __entry->dentry, __entry->ino, __entry->d_len,
+ __get_str(d_name), __entry->ia_valid, __entry->ia_mode,
+ __entry->ia_uid, __entry->ia_gid)
+);
+
+DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_write_remove_suid);
+
+DEFINE_OCFS2_ULL_ULL_ULL_EVENT(ocfs2_zero_partial_clusters);
+
+DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_zero_partial_clusters_range1);
+
+DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_zero_partial_clusters_range2);
+
+DEFINE_OCFS2_ULL_ULL_ULL_EVENT(ocfs2_remove_inode_range);
+
+TRACE_EVENT(ocfs2_prepare_inode_for_write,
+ TP_PROTO(unsigned long long ino, unsigned long long saved_pos,
+ int appending, unsigned long count,
+ int *direct_io, int *has_refcount),
+ TP_ARGS(ino, saved_pos, appending, count, direct_io, has_refcount),
+ TP_STRUCT__entry(
+ __field(unsigned long long, ino)
+ __field(unsigned long long, saved_pos)
+ __field(int, appending)
+ __field(unsigned long, count)
+ __field(int, direct_io)
+ __field(int, has_refcount)
+ ),
+ TP_fast_assign(
+ __entry->ino = ino;
+ __entry->saved_pos = saved_pos;
+ __entry->appending = appending;
+ __entry->count = count;
+ __entry->direct_io = direct_io ? *direct_io : -1;
+ __entry->has_refcount = has_refcount ? *has_refcount : -1;
+ ),
+ TP_printk("%llu %llu %d %lu %d %d", __entry->ino,
+ __entry->saved_pos, __entry->appending, __entry->count,
+ __entry->direct_io, __entry->has_refcount)
+);
+
+DEFINE_OCFS2_INT_EVENT(generic_file_aio_read_ret);
+
+/* End of trace events for fs/ocfs2/file.c. */
+
+/* Trace events for fs/ocfs2/inode.c. */
+
+TRACE_EVENT(ocfs2_iget_begin,
+ TP_PROTO(unsigned long long ino, unsigned int flags, int sysfile_type),
+ TP_ARGS(ino, flags, sysfile_type),
+ TP_STRUCT__entry(
+ __field(unsigned long long, ino)
+ __field(unsigned int, flags)
+ __field(int, sysfile_type)
+ ),
+ TP_fast_assign(
+ __entry->ino = ino;
+ __entry->flags = flags;
+ __entry->sysfile_type = sysfile_type;
+ ),
+ TP_printk("%llu %u %d", __entry->ino,
+ __entry->flags, __entry->sysfile_type)
+);
+
+DEFINE_OCFS2_ULL_EVENT(ocfs2_iget5_locked);
+
+TRACE_EVENT(ocfs2_iget_end,
+ TP_PROTO(void *inode, unsigned long long ino),
+ TP_ARGS(inode, ino),
+ TP_STRUCT__entry(
+ __field(void *, inode)
+ __field(unsigned long long, ino)
+ ),
+ TP_fast_assign(
+ __entry->inode = inode;
+ __entry->ino = ino;
+ ),
+ TP_printk("%p %llu", __entry->inode, __entry->ino)
+);
+
+TRACE_EVENT(ocfs2_find_actor,
+ TP_PROTO(void *inode, unsigned long long ino,
+ void *args, unsigned long long fi_blkno),
+ TP_ARGS(inode, ino, args, fi_blkno),
+ TP_STRUCT__entry(
+ __field(void *, inode)
+ __field(unsigned long long, ino)
+ __field(void *, args)
+ __field(unsigned long long, fi_blkno)
+ ),
+ TP_fast_assign(
+ __entry->inode = inode;
+ __entry->ino = ino;
+ __entry->args = args;
+ __entry->fi_blkno = fi_blkno;
+ ),
+ TP_printk("%p %llu %p %llu", __entry->inode, __entry->ino,
+ __entry->args, __entry->fi_blkno)
+);
+
+DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_populate_inode);
+
+DEFINE_OCFS2_ULL_INT_EVENT(ocfs2_read_locked_inode);
+
+DEFINE_OCFS2_INT_INT_EVENT(ocfs2_check_orphan_recovery_state);
+
+DEFINE_OCFS2_ULL_EVENT(ocfs2_validate_inode_block);
+
+TRACE_EVENT(ocfs2_inode_is_valid_to_delete,
+ TP_PROTO(void *task, void *dc_task, unsigned long long ino,
+ unsigned int flags),
+ TP_ARGS(task, dc_task, ino, flags),
+ TP_STRUCT__entry(
+ __field(void *, task)
+ __field(void *, dc_task)
+ __field(unsigned long long, ino)
+ __field(unsigned int, flags)
+ ),
+ TP_fast_assign(
+ __entry->task = task;
+ __entry->dc_task = dc_task;
+ __entry->ino = ino;
+ __entry->flags = flags;
+ ),
+ TP_printk("%p %p %llu %u", __entry->task, __entry->dc_task,
+ __entry->ino, __entry->flags)
+);
+
+DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_query_inode_wipe_begin);
+
+DEFINE_OCFS2_UINT_EVENT(ocfs2_query_inode_wipe_succ);
+
+DEFINE_OCFS2_INT_INT_EVENT(ocfs2_query_inode_wipe_end);
+
+DEFINE_OCFS2_ULL_INT_EVENT(ocfs2_cleanup_delete_inode);
+
+DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_delete_inode);
+
+DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_clear_inode);
+
+DEFINE_OCFS2_ULL_UINT_UINT_EVENT(ocfs2_drop_inode);
+
+TRACE_EVENT(ocfs2_inode_revalidate,
+ TP_PROTO(void *inode, unsigned long long ino,
+ unsigned int flags),
+ TP_ARGS(inode, ino, flags),
+ TP_STRUCT__entry(
+ __field(void *, inode)
+ __field(unsigned long long, ino)
+ __field(unsigned int, flags)
+ ),
+ TP_fast_assign(
+ __entry->inode = inode;
+ __entry->ino = ino;
+ __entry->flags = flags;
+ ),
+ TP_printk("%p %llu %u", __entry->inode, __entry->ino, __entry->flags)
+);
+
+DEFINE_OCFS2_ULL_EVENT(ocfs2_mark_inode_dirty);
+
+/* End of trace events for fs/ocfs2/inode.c. */
+
+/* Trace events for fs/ocfs2/extent_map.c. */
+
+TRACE_EVENT(ocfs2_read_virt_blocks,
+ TP_PROTO(void *inode, unsigned long long vblock, int nr,
+ void *bhs, unsigned int flags, void *validate),
+ TP_ARGS(inode, vblock, nr, bhs, flags, validate),
+ TP_STRUCT__entry(
+ __field(void *, inode)
+ __field(unsigned long long, vblock)
+ __field(int, nr)
+ __field(void *, bhs)
+ __field(unsigned int, flags)
+ __field(void *, validate)
+ ),
+ TP_fast_assign(
+ __entry->inode = inode;
+ __entry->vblock = vblock;
+ __entry->nr = nr;
+ __entry->bhs = bhs;
+ __entry->flags = flags;
+ __entry->validate = validate;
+ ),
+ TP_printk("%p %llu %d %p %x %p", __entry->inode, __entry->vblock,
+ __entry->nr, __entry->bhs, __entry->flags, __entry->validate)
+);
+
+/* End of trace events for fs/ocfs2/extent_map.c. */
+
+/* Trace events for fs/ocfs2/slot_map.c. */
+
+DEFINE_OCFS2_UINT_EVENT(ocfs2_refresh_slot_info);
+
+DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_map_slot_buffers);
+
+DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_map_slot_buffers_block);
+
+DEFINE_OCFS2_INT_EVENT(ocfs2_find_slot);
+
+/* End of trace events for fs/ocfs2/slot_map.c. */
+
+/* Trace events for fs/ocfs2/heartbeat.c. */
+
+DEFINE_OCFS2_INT_EVENT(ocfs2_do_node_down);
+
+/* End of trace events for fs/ocfs2/heartbeat.c. */
+
+/* Trace events for fs/ocfs2/super.c. */
+
+TRACE_EVENT(ocfs2_remount,
+ TP_PROTO(unsigned long s_flags, unsigned long osb_flags, int flags),
+ TP_ARGS(s_flags, osb_flags, flags),
+ TP_STRUCT__entry(
+ __field(unsigned long, s_flags)
+ __field(unsigned long, osb_flags)
+ __field(int, flags)
+ ),
+ TP_fast_assign(
+ __entry->s_flags = s_flags;
+ __entry->osb_flags = osb_flags;
+ __entry->flags = flags;
+ ),
+ TP_printk("%lu %lu %d", __entry->s_flags,
+ __entry->osb_flags, __entry->flags)
+);
+
+TRACE_EVENT(ocfs2_fill_super,
+ TP_PROTO(void *sb, void *data, int silent),
+ TP_ARGS(sb, data, silent),
+ TP_STRUCT__entry(
+ __field(void *, sb)
+ __field(void *, data)
+ __field(int, silent)
+ ),
+ TP_fast_assign(
+ __entry->sb = sb;
+ __entry->data = data;
+ __entry->silent = silent;
+ ),
+ TP_printk("%p %p %d", __entry->sb,
+ __entry->data, __entry->silent)
+);
+
+TRACE_EVENT(ocfs2_parse_options,
+ TP_PROTO(int is_remount, char *options),
+ TP_ARGS(is_remount, options),
+ TP_STRUCT__entry(
+ __field(int, is_remount)
+ __string(options, options)
+ ),
+ TP_fast_assign(
+ __entry->is_remount = is_remount;
+ __assign_str(options, options);
+ ),
+ TP_printk("%d %s", __entry->is_remount, __get_str(options))
+);
+
+DEFINE_OCFS2_POINTER_EVENT(ocfs2_put_super);
+
+TRACE_EVENT(ocfs2_statfs,
+ TP_PROTO(void *sb, void *buf),
+ TP_ARGS(sb, buf),
+ TP_STRUCT__entry(
+ __field(void *, sb)
+ __field(void *, buf)
+ ),
+ TP_fast_assign(
+ __entry->sb = sb;
+ __entry->buf = buf;
+ ),
+ TP_printk("%p %p", __entry->sb, __entry->buf)
+);
+
+DEFINE_OCFS2_POINTER_EVENT(ocfs2_dismount_volume);
+
+TRACE_EVENT(ocfs2_initialize_super,
+ TP_PROTO(char *label, char *uuid_str, unsigned long long root_dir,
+ unsigned long long system_dir, int cluster_bits),
+ TP_ARGS(label, uuid_str, root_dir, system_dir, cluster_bits),
+ TP_STRUCT__entry(
+ __string(label, label)
+ __string(uuid_str, uuid_str)
+ __field(unsigned long long, root_dir)
+ __field(unsigned long long, system_dir)
+ __field(int, cluster_bits)
+ ),
+ TP_fast_assign(
+ __assign_str(label, label);
+ __assign_str(uuid_str, uuid_str);
+ __entry->root_dir = root_dir;
+ __entry->system_dir = system_dir;
+ __entry->cluster_bits = cluster_bits;
+ ),
+ TP_printk("%s %s %llu %llu %d", __get_str(label), __get_str(uuid_str),
+ __entry->root_dir, __entry->system_dir, __entry->cluster_bits)
+);
+
+/* End of trace events for fs/ocfs2/super.c. */
+
+/* Trace events for fs/ocfs2/xattr.c. */
+
+DEFINE_OCFS2_ULL_EVENT(ocfs2_validate_xattr_block);
+
+DEFINE_OCFS2_UINT_EVENT(ocfs2_xattr_extend_allocation);
+
+TRACE_EVENT(ocfs2_init_xattr_set_ctxt,
+ TP_PROTO(const char *name, int meta, int clusters, int credits),
+ TP_ARGS(name, meta, clusters, credits),
+ TP_STRUCT__entry(
+ __string(name, name)
+ __field(int, meta)
+ __field(int, clusters)
+ __field(int, credits)
+ ),
+ TP_fast_assign(
+ __assign_str(name, name);
+ __entry->meta = meta;
+ __entry->clusters = clusters;
+ __entry->credits = credits;
+ ),
+ TP_printk("%s %d %d %d", __get_str(name), __entry->meta,
+ __entry->clusters, __entry->credits)
+);
+
+DECLARE_EVENT_CLASS(ocfs2__xattr_find,
+ TP_PROTO(unsigned long long ino, const char *name, int name_index,
+ unsigned int hash, unsigned long long location,
+ int xe_index),
+ TP_ARGS(ino, name, name_index, hash, location, xe_index),
+ TP_STRUCT__entry(
+ __field(unsigned long long, ino)
+ __string(name, name)
+ __field(int, name_index)
+ __field(unsigned int, hash)
+ __field(unsigned long long, location)
+ __field(int, xe_index)
+ ),
+ TP_fast_assign(
+ __entry->ino = ino;
+ __assign_str(name, name);
+ __entry->name_index = name_index;
+ __entry->hash = hash;
+ __entry->location = location;
+ __entry->xe_index = xe_index;
+ ),
+ TP_printk("%llu %s %d %u %llu %d", __entry->ino, __get_str(name),
+ __entry->name_index, __entry->hash, __entry->location,
+ __entry->xe_index)
+);
+
+#define DEFINE_OCFS2_XATTR_FIND_EVENT(name) \
+DEFINE_EVENT(ocfs2__xattr_find, name, \
+TP_PROTO(unsigned long long ino, const char *name, int name_index, \
+ unsigned int hash, unsigned long long bucket, \
+ int xe_index), \
+ TP_ARGS(ino, name, name_index, hash, bucket, xe_index))
+
+DEFINE_OCFS2_XATTR_FIND_EVENT(ocfs2_xattr_bucket_find);
+
+DEFINE_OCFS2_XATTR_FIND_EVENT(ocfs2_xattr_index_block_find);
+
+DEFINE_OCFS2_XATTR_FIND_EVENT(ocfs2_xattr_index_block_find_rec);
+
+DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_iterate_xattr_buckets);
+
+DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_iterate_xattr_bucket);
+
+DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_cp_xattr_block_to_bucket_begin);
+
+DEFINE_OCFS2_UINT_UINT_UINT_EVENT(ocfs2_cp_xattr_block_to_bucket_end);
+
+DEFINE_OCFS2_ULL_EVENT(ocfs2_xattr_create_index_block_begin);
+
+DEFINE_OCFS2_ULL_EVENT(ocfs2_xattr_create_index_block);
+
+DEFINE_OCFS2_ULL_UINT_UINT_UINT_EVENT(ocfs2_defrag_xattr_bucket);
+
+DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_mv_xattr_bucket_cross_cluster);
+
+DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_divide_xattr_bucket_begin);
+
+DEFINE_OCFS2_UINT_UINT_UINT_EVENT(ocfs2_divide_xattr_bucket_move);
+
+DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_cp_xattr_bucket);
+
+DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_mv_xattr_buckets);
+
+DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_adjust_xattr_cross_cluster);
+
+DEFINE_OCFS2_ULL_ULL_UINT_UINT_EVENT(ocfs2_add_new_xattr_cluster_begin);
+
+DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_add_new_xattr_cluster);
+
+DEFINE_OCFS2_ULL_UINT_UINT_EVENT(ocfs2_add_new_xattr_cluster_insert);
+
+DEFINE_OCFS2_ULL_ULL_UINT_UINT_EVENT(ocfs2_extend_xattr_bucket);
+
+DEFINE_OCFS2_ULL_EVENT(ocfs2_add_new_xattr_bucket);
+
+DEFINE_OCFS2_ULL_UINT_UINT_EVENT(ocfs2_xattr_bucket_value_truncate);
+
+DEFINE_OCFS2_ULL_ULL_UINT_UINT_EVENT(ocfs2_rm_xattr_cluster);
+
+DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_reflink_xattr_header);
+
+DEFINE_OCFS2_ULL_INT_EVENT(ocfs2_create_empty_xattr_block);
+
+DEFINE_OCFS2_STRING_EVENT(ocfs2_xattr_set_entry_bucket);
+
+DEFINE_OCFS2_STRING_EVENT(ocfs2_xattr_set_entry_index_block);
+
+DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_xattr_bucket_value_refcount);
+
+DEFINE_OCFS2_ULL_UINT_UINT_EVENT(ocfs2_reflink_xattr_buckets);
+
+DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_reflink_xattr_rec);
+
+/* End of trace events for fs/ocfs2/xattr.c. */
+
+/* Trace events for fs/ocfs2/reservations.c. */
+
+DEFINE_OCFS2_UINT_UINT_EVENT(ocfs2_resv_insert);
+
+DEFINE_OCFS2_ULL_UINT_UINT_UINT_EVENT(ocfs2_resmap_find_free_bits_begin);
+
+DEFINE_OCFS2_UINT_UINT_EVENT(ocfs2_resmap_find_free_bits_end);
+
+TRACE_EVENT(ocfs2_resv_find_window_begin,
+ TP_PROTO(unsigned int r_start, unsigned int r_end, unsigned int goal,
+ unsigned int wanted, int empty_root),
+ TP_ARGS(r_start, r_end, goal, wanted, empty_root),
+ TP_STRUCT__entry(
+ __field(unsigned int, r_start)
+ __field(unsigned int, r_end)
+ __field(unsigned int, goal)
+ __field(unsigned int, wanted)
+ __field(int, empty_root)
+ ),
+ TP_fast_assign(
+ __entry->r_start = r_start;
+ __entry->r_end = r_end;
+ __entry->goal = goal;
+ __entry->wanted = wanted;
+ __entry->empty_root = empty_root;
+ ),
+ TP_printk("%u %u %u %u %d", __entry->r_start, __entry->r_end,
+ __entry->goal, __entry->wanted, __entry->empty_root)
+);
+
+DEFINE_OCFS2_UINT_UINT_EVENT(ocfs2_resv_find_window_prev);
+
+DEFINE_OCFS2_INT_INT_EVENT(ocfs2_resv_find_window_next);
+
+DEFINE_OCFS2_UINT_UINT_UINT_EVENT(ocfs2_cannibalize_resv_begin);
+
+TRACE_EVENT(ocfs2_cannibalize_resv_end,
+ TP_PROTO(unsigned int start, unsigned int end, unsigned int len,
+ unsigned int last_start, unsigned int last_len),
+ TP_ARGS(start, end, len, last_start, last_len),
+ TP_STRUCT__entry(
+ __field(unsigned int, start)
+ __field(unsigned int, end)
+ __field(unsigned int, len)
+ __field(unsigned int, last_start)
+ __field(unsigned int, last_len)
+ ),
+ TP_fast_assign(
+ __entry->start = start;
+ __entry->end = end;
+ __entry->len = len;
+ __entry->last_start = last_start;
+ __entry->last_len = last_len;
+ ),
+ TP_printk("%u %u %u %u %u", __entry->start, __entry->end,
+ __entry->len, __entry->last_start, __entry->last_len)
+);
+
+DEFINE_OCFS2_UINT_UINT_EVENT(ocfs2_resmap_resv_bits);
+
+TRACE_EVENT(ocfs2_resmap_claimed_bits_begin,
+ TP_PROTO(unsigned int cstart, unsigned int cend, unsigned int clen,
+ unsigned int r_start, unsigned int r_end, unsigned int r_len,
+ unsigned int last_start, unsigned int last_len),
+ TP_ARGS(cstart, cend, clen, r_start, r_end,
+ r_len, last_start, last_len),
+ TP_STRUCT__entry(
+ __field(unsigned int, cstart)
+ __field(unsigned int, cend)
+ __field(unsigned int, clen)
+ __field(unsigned int, r_start)
+ __field(unsigned int, r_end)
+ __field(unsigned int, r_len)
+ __field(unsigned int, last_start)
+ __field(unsigned int, last_len)
+ ),
+ TP_fast_assign(
+ __entry->cstart = cstart;
+ __entry->cend = cend;
+ __entry->clen = clen;
+ __entry->r_start = r_start;
+ __entry->r_end = r_end;
+ __entry->r_len = r_len;
+ __entry->last_start = last_start;
+ __entry->last_len = last_len;
+ ),
+ TP_printk("%u %u %u %u %u %u %u %u",
+ __entry->cstart, __entry->cend, __entry->clen,
+ __entry->r_start, __entry->r_end, __entry->r_len,
+ __entry->last_start, __entry->last_len)
+);
+
+TRACE_EVENT(ocfs2_resmap_claimed_bits_end,
+ TP_PROTO(unsigned int start, unsigned int end, unsigned int len,
+ unsigned int last_start, unsigned int last_len),
+ TP_ARGS(start, end, len, last_start, last_len),
+ TP_STRUCT__entry(
+ __field(unsigned int, start)
+ __field(unsigned int, end)
+ __field(unsigned int, len)
+ __field(unsigned int, last_start)
+ __field(unsigned int, last_len)
+ ),
+ TP_fast_assign(
+ __entry->start = start;
+ __entry->end = end;
+ __entry->len = len;
+ __entry->last_start = last_start;
+ __entry->last_len = last_len;
+ ),
+ TP_printk("%u %u %u %u %u", __entry->start, __entry->end,
+ __entry->len, __entry->last_start, __entry->last_len)
+);
+
+/* End of trace events for fs/ocfs2/reservations.c. */
+
+/* Trace events for fs/ocfs2/quota_local.c. */
+
+DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_recover_local_quota_file);
+
+DEFINE_OCFS2_INT_EVENT(ocfs2_finish_quota_recovery);
+
+DEFINE_OCFS2_ULL_ULL_UINT_EVENT(olq_set_dquot);
+
+/* End of trace events for fs/ocfs2/quota_local.c. */
+
+/* Trace events for fs/ocfs2/quota_global.c. */
+
+DEFINE_OCFS2_ULL_EVENT(ocfs2_validate_quota_block);
+
+TRACE_EVENT(ocfs2_sync_dquot,
+ TP_PROTO(unsigned int dq_id, long long dqb_curspace,
+ long long spacechange, long long curinodes,
+ long long inodechange),
+ TP_ARGS(dq_id, dqb_curspace, spacechange, curinodes, inodechange),
+ TP_STRUCT__entry(
+ __field(unsigned int, dq_id)
+ __field(long long, dqb_curspace)
+ __field(long long, spacechange)
+ __field(long long, curinodes)
+ __field(long long, inodechange)
+ ),
+ TP_fast_assign(
+ __entry->dq_id = dq_id;
+ __entry->dqb_curspace = dqb_curspace;
+ __entry->spacechange = spacechange;
+ __entry->curinodes = curinodes;
+ __entry->inodechange = inodechange;
+ ),
+ TP_printk("%u %lld %lld %lld %lld", __entry->dq_id,
+ __entry->dqb_curspace, __entry->spacechange,
+ __entry->curinodes, __entry->inodechange)
+);
+
+TRACE_EVENT(ocfs2_sync_dquot_helper,
+ TP_PROTO(unsigned int dq_id, unsigned int dq_type, unsigned long type,
+ const char *s_id),
+ TP_ARGS(dq_id, dq_type, type, s_id),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, dq_id)
+ __field(unsigned int, dq_type)
+ __field(unsigned long, type)
+ __string(s_id, s_id)
+ ),
+ TP_fast_assign(
+ __entry->dq_id = dq_id;
+ __entry->dq_type = dq_type;
+ __entry->type = type;
+ __assign_str(s_id, s_id);
+ ),
+ TP_printk("%u %u %lu %s", __entry->dq_id, __entry->dq_type,
+ __entry->type, __get_str(s_id))
+);
+
+DEFINE_OCFS2_UINT_INT_EVENT(ocfs2_write_dquot);
+
+DEFINE_OCFS2_UINT_INT_EVENT(ocfs2_release_dquot);
+
+DEFINE_OCFS2_UINT_INT_EVENT(ocfs2_acquire_dquot);
+
+DEFINE_OCFS2_UINT_INT_EVENT(ocfs2_mark_dquot_dirty);
+
+/* End of trace events for fs/ocfs2/quota_global.c. */
+
+/* Trace events for fs/ocfs2/dir.c. */
+DEFINE_OCFS2_INT_EVENT(ocfs2_search_dirblock);
+
+DEFINE_OCFS2_ULL_EVENT(ocfs2_validate_dir_block);
+
+DEFINE_OCFS2_POINTER_EVENT(ocfs2_find_entry_el);
+
+TRACE_EVENT(ocfs2_dx_dir_search,
+ TP_PROTO(unsigned long long ino, int namelen, const char *name,
+ unsigned int major_hash, unsigned int minor_hash,
+ unsigned long long blkno),
+ TP_ARGS(ino, namelen, name, major_hash, minor_hash, blkno),
+ TP_STRUCT__entry(
+ __field(unsigned long long, ino)
+ __field(int, namelen)
+ __string(name, name)
+ __field(unsigned int, major_hash)
+ __field(unsigned int,minor_hash)
+ __field(unsigned long long, blkno)
+ ),
+ TP_fast_assign(
+ __entry->ino = ino;
+ __entry->namelen = namelen;
+ __assign_str(name, name);
+ __entry->major_hash = major_hash;
+ __entry->minor_hash = minor_hash;
+ __entry->blkno = blkno;
+ ),
+ TP_printk("%llu %.*s %u %u %llu", __entry->ino,
+ __entry->namelen, __get_str(name),
+ __entry->major_hash, __entry->minor_hash, __entry->blkno)
+);
+
+DEFINE_OCFS2_UINT_UINT_EVENT(ocfs2_dx_dir_search_leaf_info);
+
+DEFINE_OCFS2_ULL_INT_EVENT(ocfs2_delete_entry_dx);
+
+DEFINE_OCFS2_ULL_EVENT(ocfs2_readdir);
+
+TRACE_EVENT(ocfs2_find_files_on_disk,
+ TP_PROTO(int namelen, const char *name, void *blkno,
+ unsigned long long dir),
+ TP_ARGS(namelen, name, blkno, dir),
+ TP_STRUCT__entry(
+ __field(int, namelen)
+ __string(name, name)
+ __field(void *, blkno)
+ __field(unsigned long long, dir)
+ ),
+ TP_fast_assign(
+ __entry->namelen = namelen;
+ __assign_str(name, name);
+ __entry->blkno = blkno;
+ __entry->dir = dir;
+ ),
+ TP_printk("%.*s %p %llu", __entry->namelen, __get_str(name),
+ __entry->blkno, __entry->dir)
+);
+
+TRACE_EVENT(ocfs2_check_dir_for_entry,
+ TP_PROTO(unsigned long long dir, int namelen, const char *name),
+ TP_ARGS(dir, namelen, name),
+ TP_STRUCT__entry(
+ __field(unsigned long long, dir)
+ __field(int, namelen)
+ __string(name, name)
+ ),
+ TP_fast_assign(
+ __entry->dir = dir;
+ __entry->namelen = namelen;
+ __assign_str(name, name);
+ ),
+ TP_printk("%llu %.*s", __entry->dir,
+ __entry->namelen, __get_str(name))
+);
+
+DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_dx_dir_attach_index);
+
+DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_dx_dir_format_cluster);
+
+TRACE_EVENT(ocfs2_dx_dir_index_root_block,
+ TP_PROTO(unsigned long long dir,
+ unsigned int major_hash, unsigned int minor_hash,
+ int namelen, const char *name, unsigned int num_used),
+ TP_ARGS(dir, major_hash, minor_hash, namelen, name, num_used),
+ TP_STRUCT__entry(
+ __field(unsigned long long, dir)
+ __field(unsigned int, major_hash)
+ __field(unsigned int, minor_hash)
+ __field(int, namelen)
+ __string(name, name)
+ __field(unsigned int, num_used)
+ ),
+ TP_fast_assign(
+ __entry->dir = dir;
+ __entry->major_hash = major_hash;
+ __entry->minor_hash = minor_hash;
+ __entry->namelen = namelen;
+ __assign_str(name, name);
+ __entry->num_used = num_used;
+ ),
+ TP_printk("%llu %x %x %.*s %u", __entry->dir,
+ __entry->major_hash, __entry->minor_hash,
+ __entry->namelen, __get_str(name), __entry->num_used)
+);
+
+DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_extend_dir);
+
+DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_dx_dir_rebalance);
+
+DEFINE_OCFS2_UINT_UINT_UINT_EVENT(ocfs2_dx_dir_rebalance_split);
+
+DEFINE_OCFS2_ULL_INT_EVENT(ocfs2_prepare_dir_for_insert);
+
+/* End of trace events for fs/ocfs2/dir.c. */
+
+/* Trace events for fs/ocfs2/namei.c. */
+
+DECLARE_EVENT_CLASS(ocfs2__dentry_ops,
+ TP_PROTO(void *dir, void *dentry, int name_len, const char *name,
+ unsigned long long dir_blkno, unsigned long long extra),
+ TP_ARGS(dir, dentry, name_len, name, dir_blkno, extra),
+ TP_STRUCT__entry(
+ __field(void *, dir)
+ __field(void *, dentry)
+ __field(int, name_len)
+ __string(name, name)
+ __field(unsigned long long, dir_blkno)
+ __field(unsigned long long, extra)
+ ),
+ TP_fast_assign(
+ __entry->dir = dir;
+ __entry->dentry = dentry;
+ __entry->name_len = name_len;
+ __assign_str(name, name);
+ __entry->dir_blkno = dir_blkno;
+ __entry->extra = extra;
+ ),
+ TP_printk("%p %p %.*s %llu %llu", __entry->dir, __entry->dentry,
+ __entry->name_len, __get_str(name),
+ __entry->dir_blkno, __entry->extra)
+);
+
+#define DEFINE_OCFS2_DENTRY_OPS(name) \
+DEFINE_EVENT(ocfs2__dentry_ops, name, \
+TP_PROTO(void *dir, void *dentry, int name_len, const char *name, \
+ unsigned long long dir_blkno, unsigned long long extra), \
+ TP_ARGS(dir, dentry, name_len, name, dir_blkno, extra))
+
+DEFINE_OCFS2_DENTRY_OPS(ocfs2_lookup);
+
+DEFINE_OCFS2_DENTRY_OPS(ocfs2_mkdir);
+
+DEFINE_OCFS2_DENTRY_OPS(ocfs2_create);
+
+DEFINE_OCFS2_DENTRY_OPS(ocfs2_unlink);
+
+DEFINE_OCFS2_DENTRY_OPS(ocfs2_symlink_create);
+
+DEFINE_OCFS2_DENTRY_OPS(ocfs2_mv_orphaned_inode_to_new);
+
+DEFINE_OCFS2_POINTER_EVENT(ocfs2_lookup_ret);
+
+TRACE_EVENT(ocfs2_mknod,
+ TP_PROTO(void *dir, void *dentry, int name_len, const char *name,
+ unsigned long long dir_blkno, unsigned long dev, int mode),
+ TP_ARGS(dir, dentry, name_len, name, dir_blkno, dev, mode),
+ TP_STRUCT__entry(
+ __field(void *, dir)
+ __field(void *, dentry)
+ __field(int, name_len)
+ __string(name, name)
+ __field(unsigned long long, dir_blkno)
+ __field(unsigned long, dev)
+ __field(int, mode)
+ ),
+ TP_fast_assign(
+ __entry->dir = dir;
+ __entry->dentry = dentry;
+ __entry->name_len = name_len;
+ __assign_str(name, name);
+ __entry->dir_blkno = dir_blkno;
+ __entry->dev = dev;
+ __entry->mode = mode;
+ ),
+ TP_printk("%p %p %.*s %llu %lu %d", __entry->dir, __entry->dentry,
+ __entry->name_len, __get_str(name),
+ __entry->dir_blkno, __entry->dev, __entry->mode)
+);
+
+TRACE_EVENT(ocfs2_link,
+ TP_PROTO(unsigned long long ino, int old_len, const char *old_name,
+ int name_len, const char *name),
+ TP_ARGS(ino, old_len, old_name, name_len, name),
+ TP_STRUCT__entry(
+ __field(unsigned long long, ino)
+ __field(int, old_len)
+ __string(old_name, old_name)
+ __field(int, name_len)
+ __string(name, name)
+ ),
+ TP_fast_assign(
+ __entry->ino = ino;
+ __entry->old_len = old_len;
+ __assign_str(old_name, old_name);
+ __entry->name_len = name_len;
+ __assign_str(name, name);
+ ),
+ TP_printk("%llu %.*s %.*s", __entry->ino,
+ __entry->old_len, __get_str(old_name),
+ __entry->name_len, __get_str(name))
+);
+
+DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_unlink_noent);
+
+DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_double_lock);
+
+DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_double_lock_end);
+
+TRACE_EVENT(ocfs2_rename,
+ TP_PROTO(void *old_dir, void *old_dentry,
+ void *new_dir, void *new_dentry,
+ int old_len, const char *old_name,
+ int new_len, const char *new_name),
+ TP_ARGS(old_dir, old_dentry, new_dir, new_dentry,
+ old_len, old_name, new_len, new_name),
+ TP_STRUCT__entry(
+ __field(void *, old_dir)
+ __field(void *, old_dentry)
+ __field(void *, new_dir)
+ __field(void *, new_dentry)
+ __field(int, old_len)
+ __string(old_name, old_name)
+ __field(int, new_len)
+ __string(new_name, new_name)
+ ),
+ TP_fast_assign(
+ __entry->old_dir = old_dir;
+ __entry->old_dentry = old_dentry;
+ __entry->new_dir = new_dir;
+ __entry->new_dentry = new_dentry;
+ __entry->old_len = old_len;
+ __assign_str(old_name, old_name);
+ __entry->new_len = new_len;
+ __assign_str(new_name, new_name);
+ ),
+ TP_printk("%p %p %p %p %.*s %.*s",
+ __entry->old_dir, __entry->old_dentry,
+ __entry->new_dir, __entry->new_dentry,
+ __entry->old_len, __get_str(old_name),
+ __entry->new_len, __get_str(new_name))
+);
+
+TRACE_EVENT(ocfs2_rename_target_exists,
+ TP_PROTO(int new_len, const char *new_name),
+ TP_ARGS(new_len, new_name),
+ TP_STRUCT__entry(
+ __field(int, new_len)
+ __string(new_name, new_name)
+ ),
+ TP_fast_assign(
+ __entry->new_len = new_len;
+ __assign_str(new_name, new_name);
+ ),
+ TP_printk("%.*s", __entry->new_len, __get_str(new_name))
+);
+
+DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_rename_disagree);
+
+TRACE_EVENT(ocfs2_rename_over_existing,
+ TP_PROTO(unsigned long long new_blkno, void *new_bh,
+ unsigned long long newdi_blkno),
+ TP_ARGS(new_blkno, new_bh, newdi_blkno),
+ TP_STRUCT__entry(
+ __field(unsigned long long, new_blkno)
+ __field(void *, new_bh)
+ __field(unsigned long long, newdi_blkno)
+ ),
+ TP_fast_assign(
+ __entry->new_blkno = new_blkno;
+ __entry->new_bh = new_bh;
+ __entry->newdi_blkno = newdi_blkno;
+ ),
+ TP_printk("%llu %p %llu", __entry->new_blkno, __entry->new_bh,
+ __entry->newdi_blkno)
+);
+
+DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_create_symlink_data);
+
+TRACE_EVENT(ocfs2_symlink_begin,
+ TP_PROTO(void *dir, void *dentry, const char *symname,
+ int len, const char *name),
+ TP_ARGS(dir, dentry, symname, len, name),
+ TP_STRUCT__entry(
+ __field(void *, dir)
+ __field(void *, dentry)
+ __field(const char *, symname)
+ __field(int, len)
+ __string(name, name)
+ ),
+ TP_fast_assign(
+ __entry->dir = dir;
+ __entry->dentry = dentry;
+ __entry->symname = symname;
+ __entry->len = len;
+ __assign_str(name, name);
+ ),
+ TP_printk("%p %p %s %.*s", __entry->dir, __entry->dentry,
+ __entry->symname, __entry->len, __get_str(name))
+);
+
+TRACE_EVENT(ocfs2_blkno_stringify,
+ TP_PROTO(unsigned long long blkno, const char *name, int namelen),
+ TP_ARGS(blkno, name, namelen),
+ TP_STRUCT__entry(
+ __field(unsigned long long, blkno)
+ __string(name, name)
+ __field(int, namelen)
+ ),
+ TP_fast_assign(
+ __entry->blkno = blkno;
+ __assign_str(name, name);
+ __entry->namelen = namelen;
+ ),
+ TP_printk("%llu %s %d", __entry->blkno, __get_str(name),
+ __entry->namelen)
+);
+
+DEFINE_OCFS2_ULL_EVENT(ocfs2_orphan_add_begin);
+
+DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_orphan_add_end);
+
+TRACE_EVENT(ocfs2_orphan_del,
+ TP_PROTO(unsigned long long dir, const char *name, int namelen),
+ TP_ARGS(dir, name, namelen),
+ TP_STRUCT__entry(
+ __field(unsigned long long, dir)
+ __string(name, name)
+ __field(int, namelen)
+ ),
+ TP_fast_assign(
+ __entry->dir = dir;
+ __assign_str(name, name);
+ __entry->namelen = namelen;
+ ),
+ TP_printk("%llu %s %d", __entry->dir, __get_str(name),
+ __entry->namelen)
+);
+
+/* End of trace events for fs/ocfs2/namei.c. */
+
+/* Trace events for fs/ocfs2/dcache.c. */
+
+TRACE_EVENT(ocfs2_dentry_revalidate,
+ TP_PROTO(void *dentry, int len, const char *name),
+ TP_ARGS(dentry, len, name),
+ TP_STRUCT__entry(
+ __field(void *, dentry)
+ __field(int, len)
+ __string(name, name)
+ ),
+ TP_fast_assign(
+ __entry->dentry = dentry;
+ __entry->len = len;
+ __assign_str(name, name);
+ ),
+ TP_printk("%p %.*s", __entry->dentry, __entry->len, __get_str(name))
+);
+
+TRACE_EVENT(ocfs2_dentry_revalidate_negative,
+ TP_PROTO(int len, const char *name, unsigned long pgen,
+ unsigned long gen),
+ TP_ARGS(len, name, pgen, gen),
+ TP_STRUCT__entry(
+ __field(int, len)
+ __string(name, name)
+ __field(unsigned long, pgen)
+ __field(unsigned long, gen)
+ ),
+ TP_fast_assign(
+ __entry->len = len;
+ __assign_str(name, name);
+ __entry->pgen = pgen;
+ __entry->gen = gen;
+ ),
+ TP_printk("%.*s %lu %lu", __entry->len, __get_str(name),
+ __entry->pgen, __entry->gen)
+);
+
+DEFINE_OCFS2_ULL_EVENT(ocfs2_dentry_revalidate_delete);
+
+DEFINE_OCFS2_ULL_INT_EVENT(ocfs2_dentry_revalidate_orphaned);
+
+DEFINE_OCFS2_ULL_EVENT(ocfs2_dentry_revalidate_nofsdata);
+
+DEFINE_OCFS2_INT_EVENT(ocfs2_dentry_revalidate_ret);
+
+TRACE_EVENT(ocfs2_find_local_alias,
+ TP_PROTO(int len, const char *name),
+ TP_ARGS(len, name),
+ TP_STRUCT__entry(
+ __field(int, len)
+ __string(name, name)
+ ),
+ TP_fast_assign(
+ __entry->len = len;
+ __assign_str(name, name);
+ ),
+ TP_printk("%.*s", __entry->len, __get_str(name))
+);
+
+TRACE_EVENT(ocfs2_dentry_attach_lock,
+ TP_PROTO(int len, const char *name,
+ unsigned long long parent, void *fsdata),
+ TP_ARGS(len, name, parent, fsdata),
+ TP_STRUCT__entry(
+ __field(int, len)
+ __string(name, name)
+ __field(unsigned long long, parent)
+ __field(void *, fsdata)
+ ),
+ TP_fast_assign(
+ __entry->len = len;
+ __assign_str(name, name);
+ __entry->parent = parent;
+ __entry->fsdata = fsdata;
+ ),
+ TP_printk("%.*s %llu %p", __entry->len, __get_str(name),
+ __entry->parent, __entry->fsdata)
+);
+
+TRACE_EVENT(ocfs2_dentry_attach_lock_found,
+ TP_PROTO(const char *name, unsigned long long parent,
+ unsigned long long ino),
+ TP_ARGS(name, parent, ino),
+ TP_STRUCT__entry(
+ __string(name, name)
+ __field(unsigned long long, parent)
+ __field(unsigned long long, ino)
+ ),
+ TP_fast_assign(
+ __assign_str(name, name);
+ __entry->parent = parent;
+ __entry->ino = ino;
+ ),
+ TP_printk("%s %llu %llu", __get_str(name), __entry->parent, __entry->ino)
+);
+/* End of trace events for fs/ocfs2/dcache.c. */
+
+/* Trace events for fs/ocfs2/export.c. */
+
+TRACE_EVENT(ocfs2_get_dentry_begin,
+ TP_PROTO(void *sb, void *handle, unsigned long long blkno),
+ TP_ARGS(sb, handle, blkno),
+ TP_STRUCT__entry(
+ __field(void *, sb)
+ __field(void *, handle)
+ __field(unsigned long long, blkno)
+ ),
+ TP_fast_assign(
+ __entry->sb = sb;
+ __entry->handle = handle;
+ __entry->blkno = blkno;
+ ),
+ TP_printk("%p %p %llu", __entry->sb, __entry->handle, __entry->blkno)
+);
+
+DEFINE_OCFS2_INT_INT_EVENT(ocfs2_get_dentry_test_bit);
+
+DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_get_dentry_stale);
+
+DEFINE_OCFS2_ULL_UINT_UINT_EVENT(ocfs2_get_dentry_generation);
+
+DEFINE_OCFS2_POINTER_EVENT(ocfs2_get_dentry_end);
+
+TRACE_EVENT(ocfs2_get_parent,
+ TP_PROTO(void *child, int len, const char *name,
+ unsigned long long ino),
+ TP_ARGS(child, len, name, ino),
+ TP_STRUCT__entry(
+ __field(void *, child)
+ __field(int, len)
+ __string(name, name)
+ __field(unsigned long long, ino)
+ ),
+ TP_fast_assign(
+ __entry->child = child;
+ __entry->len = len;
+ __assign_str(name, name);
+ __entry->ino = ino;
+ ),
+ TP_printk("%p %.*s %llu", __entry->child, __entry->len,
+ __get_str(name), __entry->ino)
+);
+
+DEFINE_OCFS2_POINTER_EVENT(ocfs2_get_parent_end);
+
+TRACE_EVENT(ocfs2_encode_fh_begin,
+ TP_PROTO(void *dentry, int name_len, const char *name,
+ void *fh, int len, int connectable),
+ TP_ARGS(dentry, name_len, name, fh, len, connectable),
+ TP_STRUCT__entry(
+ __field(void *, dentry)
+ __field(int, name_len)
+ __string(name, name)
+ __field(void *, fh)
+ __field(int, len)
+ __field(int, connectable)
+ ),
+ TP_fast_assign(
+ __entry->dentry = dentry;
+ __entry->name_len = name_len;
+ __assign_str(name, name);
+ __entry->fh = fh;
+ __entry->len = len;
+ __entry->connectable = connectable;
+ ),
+ TP_printk("%p %.*s %p %d %d", __entry->dentry, __entry->name_len,
+ __get_str(name), __entry->fh, __entry->len,
+ __entry->connectable)
+);
+
+DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_encode_fh_self);
+
+DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_encode_fh_parent);
+
+DEFINE_OCFS2_INT_EVENT(ocfs2_encode_fh_type);
+
+/* End of trace events for fs/ocfs2/export.c. */
+
+/* Trace events for fs/ocfs2/journal.c. */
+
+DEFINE_OCFS2_UINT_EVENT(ocfs2_commit_cache_begin);
+
+DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_commit_cache_end);
+
+DEFINE_OCFS2_INT_INT_EVENT(ocfs2_extend_trans);
+
+DEFINE_OCFS2_INT_EVENT(ocfs2_extend_trans_restart);
+
+DEFINE_OCFS2_ULL_ULL_UINT_UINT_EVENT(ocfs2_journal_access);
+
+DEFINE_OCFS2_ULL_EVENT(ocfs2_journal_dirty);
+
+DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_journal_init);
+
+DEFINE_OCFS2_UINT_EVENT(ocfs2_journal_init_maxlen);
+
+DEFINE_OCFS2_INT_EVENT(ocfs2_journal_shutdown);
+
+DEFINE_OCFS2_POINTER_EVENT(ocfs2_journal_shutdown_wait);
+
+DEFINE_OCFS2_ULL_EVENT(ocfs2_complete_recovery);
+
+DEFINE_OCFS2_INT_EVENT(ocfs2_complete_recovery_end);
+
+TRACE_EVENT(ocfs2_complete_recovery_slot,
+ TP_PROTO(int slot, unsigned long long la_ino,
+ unsigned long long tl_ino, void *qrec),
+ TP_ARGS(slot, la_ino, tl_ino, qrec),
+ TP_STRUCT__entry(
+ __field(int, slot)
+ __field(unsigned long long, la_ino)
+ __field(unsigned long long, tl_ino)
+ __field(void *, qrec)
+ ),
+ TP_fast_assign(
+ __entry->slot = slot;
+ __entry->la_ino = la_ino;
+ __entry->tl_ino = tl_ino;
+ __entry->qrec = qrec;
+ ),
+ TP_printk("%d %llu %llu %p", __entry->slot, __entry->la_ino,
+ __entry->tl_ino, __entry->qrec)
+);
+
+DEFINE_OCFS2_INT_INT_EVENT(ocfs2_recovery_thread_node);
+
+DEFINE_OCFS2_INT_EVENT(ocfs2_recovery_thread_end);
+
+TRACE_EVENT(ocfs2_recovery_thread,
+ TP_PROTO(int node_num, int osb_node_num, int disable,
+ void *recovery_thread, int map_set),
+ TP_ARGS(node_num, osb_node_num, disable, recovery_thread, map_set),
+ TP_STRUCT__entry(
+ __field(int, node_num)
+ __field(int, osb_node_num)
+ __field(int,disable)
+ __field(void *, recovery_thread)
+ __field(int,map_set)
+ ),
+ TP_fast_assign(
+ __entry->node_num = node_num;
+ __entry->osb_node_num = osb_node_num;
+ __entry->disable = disable;
+ __entry->recovery_thread = recovery_thread;
+ __entry->map_set = map_set;
+ ),
+ TP_printk("%d %d %d %p %d", __entry->node_num,
+ __entry->osb_node_num, __entry->disable,
+ __entry->recovery_thread, __entry->map_set)
+);
+
+DEFINE_OCFS2_UINT_UINT_UINT_EVENT(ocfs2_replay_journal_recovered);
+
+DEFINE_OCFS2_INT_EVENT(ocfs2_replay_journal_lock_err);
+
+DEFINE_OCFS2_INT_EVENT(ocfs2_replay_journal_skip);
+
+DEFINE_OCFS2_UINT_UINT_UINT_EVENT(ocfs2_recover_node);
+
+DEFINE_OCFS2_UINT_UINT_EVENT(ocfs2_recover_node_skip);
+
+DEFINE_OCFS2_UINT_UINT_EVENT(ocfs2_mark_dead_nodes);
+
+DEFINE_OCFS2_UINT_UINT_UINT_EVENT(ocfs2_queue_orphan_scan_begin);
+
+DEFINE_OCFS2_UINT_UINT_UINT_EVENT(ocfs2_queue_orphan_scan_end);
+
+DEFINE_OCFS2_ULL_EVENT(ocfs2_orphan_filldir);
+
+DEFINE_OCFS2_INT_EVENT(ocfs2_recover_orphans);
+
+DEFINE_OCFS2_ULL_EVENT(ocfs2_recover_orphans_iput);
+
+DEFINE_OCFS2_INT_EVENT(ocfs2_wait_on_mount);
+
+/* End of trace events for fs/ocfs2/journal.c. */
+
+/* Trace events for fs/ocfs2/buffer_head_io.c. */
+
+DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_read_blocks_sync);
+
+DEFINE_OCFS2_ULL_EVENT(ocfs2_read_blocks_sync_jbd);
+
+DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_read_blocks_from_disk);
+
+DEFINE_OCFS2_ULL_INT_INT_INT_EVENT(ocfs2_read_blocks_bh);
+
+DEFINE_OCFS2_ULL_INT_INT_INT_EVENT(ocfs2_read_blocks_end);
+
+TRACE_EVENT(ocfs2_write_block,
+ TP_PROTO(unsigned long long block, void *ci),
+ TP_ARGS(block, ci),
+ TP_STRUCT__entry(
+ __field(unsigned long long, block)
+ __field(void *, ci)
+ ),
+ TP_fast_assign(
+ __entry->block = block;
+ __entry->ci = ci;
+ ),
+ TP_printk("%llu %p", __entry->block, __entry->ci)
+);
+
+TRACE_EVENT(ocfs2_read_blocks_begin,
+ TP_PROTO(void *ci, unsigned long long block,
+ unsigned int nr, int flags),
+ TP_ARGS(ci, block, nr, flags),
+ TP_STRUCT__entry(
+ __field(void *, ci)
+ __field(unsigned long long, block)
+ __field(unsigned int, nr)
+ __field(int, flags)
+ ),
+ TP_fast_assign(
+ __entry->ci = ci;
+ __entry->block = block;
+ __entry->nr = nr;
+ __entry->flags = flags;
+ ),
+ TP_printk("%p %llu %u %d", __entry->ci, __entry->block,
+ __entry->nr, __entry->flags)
+);
+
+/* End of trace events for fs/ocfs2/buffer_head_io.c. */
+
+/* Trace events for fs/ocfs2/uptodate.c. */
+
+DEFINE_OCFS2_ULL_EVENT(ocfs2_purge_copied_metadata_tree);
+
+DEFINE_OCFS2_ULL_UINT_UINT_EVENT(ocfs2_metadata_cache_purge);
+
+DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_buffer_cached_begin);
+
+TRACE_EVENT(ocfs2_buffer_cached_end,
+ TP_PROTO(int index, void *item),
+ TP_ARGS(index, item),
+ TP_STRUCT__entry(
+ __field(int, index)
+ __field(void *, item)
+ ),
+ TP_fast_assign(
+ __entry->index = index;
+ __entry->item = item;
+ ),
+ TP_printk("%d %p", __entry->index, __entry->item)
+);
+
+DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_append_cache_array);
+
+DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_insert_cache_tree);
+
+DEFINE_OCFS2_ULL_UINT_UINT_EVENT(ocfs2_expand_cache);
+
+DEFINE_OCFS2_ULL_UINT_UINT_EVENT(ocfs2_set_buffer_uptodate);
+
+DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_set_buffer_uptodate_begin);
+
+DEFINE_OCFS2_ULL_UINT_UINT_EVENT(ocfs2_remove_metadata_array);
+
+DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_remove_metadata_tree);
+
+DEFINE_OCFS2_ULL_ULL_UINT_UINT_EVENT(ocfs2_remove_block_from_cache);
+
+/* End of trace events for fs/ocfs2/uptodate.c. */
+#endif /* _TRACE_OCFS2_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE ocfs2_trace
+#include <trace/define_trace.h>
diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c
index a73f641..92fcd57 100644
--- a/fs/ocfs2/quota_global.c
+++ b/fs/ocfs2/quota_global.c
@@ -11,7 +11,6 @@
#include <linux/writeback.h>
#include <linux/workqueue.h>
-#define MLOG_MASK_PREFIX ML_QUOTA
#include <cluster/masklog.h>
#include "ocfs2_fs.h"
@@ -27,6 +26,7 @@
#include "super.h"
#include "buffer_head_io.h"
#include "quota.h"
+#include "ocfs2_trace.h"
/*
* Locking of quotas with OCFS2 is rather complex. Here are rules that
@@ -130,8 +130,7 @@ int ocfs2_validate_quota_block(struct super_block *sb, struct buffer_head *bh)
struct ocfs2_disk_dqtrailer *dqt =
ocfs2_block_dqtrailer(sb->s_blocksize, bh->b_data);
- mlog(0, "Validating quota block %llu\n",
- (unsigned long long)bh->b_blocknr);
+ trace_ocfs2_validate_quota_block((unsigned long long)bh->b_blocknr);
BUG_ON(!buffer_uptodate(bh));
@@ -341,8 +340,6 @@ int ocfs2_global_read_info(struct super_block *sb, int type)
u64 pcount;
int status;
- mlog_entry_void();
-
/* Read global header */
gqinode = ocfs2_get_system_file_inode(OCFS2_SB(sb), ino[type],
OCFS2_INVALID_SLOT);
@@ -402,7 +399,8 @@ int ocfs2_global_read_info(struct super_block *sb, int type)
msecs_to_jiffies(oinfo->dqi_syncms));
out_err:
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
out_unlock:
ocfs2_unlock_global_qf(oinfo, 0);
@@ -508,9 +506,10 @@ int __ocfs2_sync_dquot(struct dquot *dquot, int freeing)
olditime = dquot->dq_dqb.dqb_itime;
oldbtime = dquot->dq_dqb.dqb_btime;
ocfs2_global_disk2memdqb(dquot, &dqblk);
- mlog(0, "Syncing global dquot %u space %lld+%lld, inodes %lld+%lld\n",
- dquot->dq_id, dquot->dq_dqb.dqb_curspace, (long long)spacechange,
- dquot->dq_dqb.dqb_curinodes, (long long)inodechange);
+ trace_ocfs2_sync_dquot(dquot->dq_id, dquot->dq_dqb.dqb_curspace,
+ (long long)spacechange,
+ dquot->dq_dqb.dqb_curinodes,
+ (long long)inodechange);
if (!test_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags))
dquot->dq_dqb.dqb_curspace += spacechange;
if (!test_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags))
@@ -557,7 +556,7 @@ int __ocfs2_sync_dquot(struct dquot *dquot, int freeing)
spin_unlock(&dq_data_lock);
err = ocfs2_qinfo_lock(info, freeing);
if (err < 0) {
- mlog(ML_ERROR, "Failed to lock quota info, loosing quota write"
+ mlog(ML_ERROR, "Failed to lock quota info, losing quota write"
" (type=%d, id=%u)\n", dquot->dq_type,
(unsigned)dquot->dq_id);
goto out;
@@ -594,8 +593,8 @@ static int ocfs2_sync_dquot_helper(struct dquot *dquot, unsigned long type)
struct ocfs2_super *osb = OCFS2_SB(sb);
int status = 0;
- mlog_entry("id=%u qtype=%u type=%lu device=%s\n", dquot->dq_id,
- dquot->dq_type, type, sb->s_id);
+ trace_ocfs2_sync_dquot_helper(dquot->dq_id, dquot->dq_type,
+ type, sb->s_id);
if (type != dquot->dq_type)
goto out;
status = ocfs2_lock_global_qf(oinfo, 1);
@@ -621,7 +620,6 @@ static int ocfs2_sync_dquot_helper(struct dquot *dquot, unsigned long type)
out_ilock:
ocfs2_unlock_global_qf(oinfo, 1);
out:
- mlog_exit(status);
return status;
}
@@ -647,7 +645,7 @@ static int ocfs2_write_dquot(struct dquot *dquot)
struct ocfs2_super *osb = OCFS2_SB(dquot->dq_sb);
int status = 0;
- mlog_entry("id=%u, type=%d", dquot->dq_id, dquot->dq_type);
+ trace_ocfs2_write_dquot(dquot->dq_id, dquot->dq_type);
handle = ocfs2_start_trans(osb, OCFS2_QWRITE_CREDITS);
if (IS_ERR(handle)) {
@@ -660,7 +658,6 @@ static int ocfs2_write_dquot(struct dquot *dquot)
mutex_unlock(&sb_dqopt(dquot->dq_sb)->dqio_mutex);
ocfs2_commit_trans(osb, handle);
out:
- mlog_exit(status);
return status;
}
@@ -686,7 +683,7 @@ static int ocfs2_release_dquot(struct dquot *dquot)
struct ocfs2_super *osb = OCFS2_SB(dquot->dq_sb);
int status = 0;
- mlog_entry("id=%u, type=%d", dquot->dq_id, dquot->dq_type);
+ trace_ocfs2_release_dquot(dquot->dq_id, dquot->dq_type);
mutex_lock(&dquot->dq_lock);
/* Check whether we are not racing with some other dqget() */
@@ -722,7 +719,8 @@ out_ilock:
ocfs2_unlock_global_qf(oinfo, 1);
out:
mutex_unlock(&dquot->dq_lock);
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -743,7 +741,7 @@ static int ocfs2_acquire_dquot(struct dquot *dquot)
int need_alloc = ocfs2_global_qinit_alloc(sb, type);
handle_t *handle;
- mlog_entry("id=%u, type=%d", dquot->dq_id, type);
+ trace_ocfs2_acquire_dquot(dquot->dq_id, type);
mutex_lock(&dquot->dq_lock);
/*
* We need an exclusive lock, because we're going to update use count
@@ -809,7 +807,8 @@ out_dq:
set_bit(DQ_ACTIVE_B, &dquot->dq_flags);
out:
mutex_unlock(&dquot->dq_lock);
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -829,7 +828,7 @@ static int ocfs2_mark_dquot_dirty(struct dquot *dquot)
handle_t *handle;
struct ocfs2_super *osb = OCFS2_SB(sb);
- mlog_entry("id=%u, type=%d", dquot->dq_id, type);
+ trace_ocfs2_mark_dquot_dirty(dquot->dq_id, type);
/* In case user set some limits, sync dquot immediately to global
* quota file so that information propagates quicker */
@@ -866,7 +865,8 @@ out_dlock:
out_ilock:
ocfs2_unlock_global_qf(oinfo, 1);
out:
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -877,8 +877,6 @@ static int ocfs2_write_info(struct super_block *sb, int type)
int status = 0;
struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
- mlog_entry_void();
-
status = ocfs2_lock_global_qf(oinfo, 1);
if (status < 0)
goto out;
@@ -893,7 +891,8 @@ static int ocfs2_write_info(struct super_block *sb, int type)
out_ilock:
ocfs2_unlock_global_qf(oinfo, 1);
out:
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c
index dc78764..dc8007f 100644
--- a/fs/ocfs2/quota_local.c
+++ b/fs/ocfs2/quota_local.c
@@ -8,7 +8,6 @@
#include <linux/quotaops.h>
#include <linux/module.h>
-#define MLOG_MASK_PREFIX ML_QUOTA
#include <cluster/masklog.h>
#include "ocfs2_fs.h"
@@ -23,6 +22,7 @@
#include "quota.h"
#include "uptodate.h"
#include "super.h"
+#include "ocfs2_trace.h"
/* Number of local quota structures per block */
static inline unsigned int ol_quota_entries_per_block(struct super_block *sb)
@@ -475,7 +475,7 @@ static int ocfs2_recover_local_quota_file(struct inode *lqinode,
struct ocfs2_recovery_chunk *rchunk, *next;
qsize_t spacechange, inodechange;
- mlog_entry("ino=%lu type=%u", (unsigned long)lqinode->i_ino, type);
+ trace_ocfs2_recover_local_quota_file((unsigned long)lqinode->i_ino, type);
list_for_each_entry_safe(rchunk, next, &(rec->r_list[type]), rc_list) {
chunk = rchunk->rc_chunk;
@@ -575,7 +575,8 @@ out_put_bh:
}
if (status < 0)
free_recovery_list(&(rec->r_list[type]));
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -600,7 +601,7 @@ int ocfs2_finish_quota_recovery(struct ocfs2_super *osb,
for (type = 0; type < MAXQUOTAS; type++) {
if (list_empty(&(rec->r_list[type])))
continue;
- mlog(0, "Recovering quota in slot %d\n", slot_num);
+ trace_ocfs2_finish_quota_recovery(slot_num);
lqinode = ocfs2_get_system_file_inode(osb, ino[type], slot_num);
if (!lqinode) {
status = -ENOENT;
@@ -882,9 +883,10 @@ static void olq_set_dquot(struct buffer_head *bh, void *private)
dqblk->dqb_inodemod = cpu_to_le64(od->dq_dquot.dq_dqb.dqb_curinodes -
od->dq_originodes);
spin_unlock(&dq_data_lock);
- mlog(0, "Writing local dquot %u space %lld inodes %lld\n",
- od->dq_dquot.dq_id, (long long)le64_to_cpu(dqblk->dqb_spacemod),
- (long long)le64_to_cpu(dqblk->dqb_inodemod));
+ trace_olq_set_dquot(
+ (unsigned long long)le64_to_cpu(dqblk->dqb_spacemod),
+ (unsigned long long)le64_to_cpu(dqblk->dqb_inodemod),
+ od->dq_dquot.dq_id);
}
/* Write dquot to local quota file */
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index c4feced..3c7606c 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -16,7 +16,6 @@
*/
#include <linux/sort.h>
-#define MLOG_MASK_PREFIX ML_REFCOUNT
#include <cluster/masklog.h>
#include "ocfs2.h"
#include "inode.h"
@@ -34,6 +33,7 @@
#include "aops.h"
#include "xattr.h"
#include "namei.h"
+#include "ocfs2_trace.h"
#include <linux/bio.h>
#include <linux/blkdev.h>
@@ -84,8 +84,7 @@ static int ocfs2_validate_refcount_block(struct super_block *sb,
struct ocfs2_refcount_block *rb =
(struct ocfs2_refcount_block *)bh->b_data;
- mlog(0, "Validating refcount block %llu\n",
- (unsigned long long)bh->b_blocknr);
+ trace_ocfs2_validate_refcount_block((unsigned long long)bh->b_blocknr);
BUG_ON(!buffer_uptodate(bh));
@@ -545,8 +544,8 @@ void ocfs2_purge_refcount_trees(struct ocfs2_super *osb)
while ((node = rb_last(root)) != NULL) {
tree = rb_entry(node, struct ocfs2_refcount_tree, rf_node);
- mlog(0, "Purge tree %llu\n",
- (unsigned long long) tree->rf_blkno);
+ trace_ocfs2_purge_refcount_trees(
+ (unsigned long long) tree->rf_blkno);
rb_erase(&tree->rf_node, root);
ocfs2_free_refcount_tree(tree);
@@ -575,7 +574,8 @@ static int ocfs2_create_refcount_tree(struct inode *inode,
BUG_ON(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL);
- mlog(0, "create tree for inode %lu\n", inode->i_ino);
+ trace_ocfs2_create_refcount_tree(
+ (unsigned long long)OCFS2_I(inode)->ip_blkno);
ret = ocfs2_reserve_new_metadata_blocks(osb, 1, &meta_ac);
if (ret) {
@@ -646,8 +646,7 @@ static int ocfs2_create_refcount_tree(struct inode *inode,
di->i_refcount_loc = cpu_to_le64(first_blkno);
spin_unlock(&oi->ip_lock);
- mlog(0, "created tree for inode %lu, refblock %llu\n",
- inode->i_ino, (unsigned long long)first_blkno);
+ trace_ocfs2_create_refcount_tree_blkno((unsigned long long)first_blkno);
ocfs2_journal_dirty(handle, di_bh);
@@ -1256,8 +1255,9 @@ static int ocfs2_change_refcount_rec(handle_t *handle,
goto out;
}
- mlog(0, "change index %d, old count %u, change %d\n", index,
- le32_to_cpu(rec->r_refcount), change);
+ trace_ocfs2_change_refcount_rec(
+ (unsigned long long)ocfs2_metadata_cache_owner(ci),
+ index, le32_to_cpu(rec->r_refcount), change);
le32_add_cpu(&rec->r_refcount, change);
if (!rec->r_refcount) {
@@ -1353,8 +1353,8 @@ static int ocfs2_expand_inline_ref_root(handle_t *handle,
ocfs2_journal_dirty(handle, ref_root_bh);
- mlog(0, "new leaf block %llu, used %u\n", (unsigned long long)blkno,
- le16_to_cpu(new_rb->rf_records.rl_used));
+ trace_ocfs2_expand_inline_ref_root((unsigned long long)blkno,
+ le16_to_cpu(new_rb->rf_records.rl_used));
*ref_leaf_bh = new_bh;
new_bh = NULL;
@@ -1466,9 +1466,9 @@ static int ocfs2_divide_leaf_refcount_block(struct buffer_head *ref_leaf_bh,
(struct ocfs2_refcount_block *)new_bh->b_data;
struct ocfs2_refcount_list *new_rl = &new_rb->rf_records;
- mlog(0, "split old leaf refcount block %llu, count = %u, used = %u\n",
- (unsigned long long)ref_leaf_bh->b_blocknr,
- le32_to_cpu(rl->rl_count), le32_to_cpu(rl->rl_used));
+ trace_ocfs2_divide_leaf_refcount_block(
+ (unsigned long long)ref_leaf_bh->b_blocknr,
+ le32_to_cpu(rl->rl_count), le32_to_cpu(rl->rl_used));
/*
* XXX: Improvement later.
@@ -1601,8 +1601,8 @@ static int ocfs2_new_leaf_refcount_block(handle_t *handle,
ocfs2_init_refcount_extent_tree(&ref_et, ci, ref_root_bh);
- mlog(0, "insert new leaf block %llu at %u\n",
- (unsigned long long)new_bh->b_blocknr, new_cpos);
+ trace_ocfs2_new_leaf_refcount_block(
+ (unsigned long long)new_bh->b_blocknr, new_cpos);
/* Insert the new leaf block with the specific offset cpos. */
ret = ocfs2_insert_extent(handle, &ref_et, new_cpos, new_bh->b_blocknr,
@@ -1794,11 +1794,10 @@ static int ocfs2_insert_refcount_rec(handle_t *handle,
(le16_to_cpu(rf_list->rl_used) - index) *
sizeof(struct ocfs2_refcount_rec));
- mlog(0, "insert refcount record start %llu, len %u, count %u "
- "to leaf block %llu at index %d\n",
- (unsigned long long)le64_to_cpu(rec->r_cpos),
- le32_to_cpu(rec->r_clusters), le32_to_cpu(rec->r_refcount),
- (unsigned long long)ref_leaf_bh->b_blocknr, index);
+ trace_ocfs2_insert_refcount_rec(
+ (unsigned long long)ref_leaf_bh->b_blocknr, index,
+ (unsigned long long)le64_to_cpu(rec->r_cpos),
+ le32_to_cpu(rec->r_clusters), le32_to_cpu(rec->r_refcount));
rf_list->rl_recs[index] = *rec;
@@ -1850,10 +1849,12 @@ static int ocfs2_split_refcount_rec(handle_t *handle,
BUG_ON(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL);
- mlog(0, "original r_pos %llu, cluster %u, split %llu, cluster %u\n",
- le64_to_cpu(orig_rec->r_cpos), le32_to_cpu(orig_rec->r_clusters),
- le64_to_cpu(split_rec->r_cpos),
- le32_to_cpu(split_rec->r_clusters));
+ trace_ocfs2_split_refcount_rec(le64_to_cpu(orig_rec->r_cpos),
+ le32_to_cpu(orig_rec->r_clusters),
+ le32_to_cpu(orig_rec->r_refcount),
+ le64_to_cpu(split_rec->r_cpos),
+ le32_to_cpu(split_rec->r_clusters),
+ le32_to_cpu(split_rec->r_refcount));
/*
* If we just need to split the header or tail clusters,
@@ -1967,12 +1968,11 @@ static int ocfs2_split_refcount_rec(handle_t *handle,
if (split_rec->r_refcount) {
rf_list->rl_recs[index] = *split_rec;
- mlog(0, "insert refcount record start %llu, len %u, count %u "
- "to leaf block %llu at index %d\n",
- (unsigned long long)le64_to_cpu(split_rec->r_cpos),
- le32_to_cpu(split_rec->r_clusters),
- le32_to_cpu(split_rec->r_refcount),
- (unsigned long long)ref_leaf_bh->b_blocknr, index);
+ trace_ocfs2_split_refcount_rec_insert(
+ (unsigned long long)ref_leaf_bh->b_blocknr, index,
+ (unsigned long long)le64_to_cpu(split_rec->r_cpos),
+ le32_to_cpu(split_rec->r_clusters),
+ le32_to_cpu(split_rec->r_refcount));
if (merge)
ocfs2_refcount_rec_merge(rb, index);
@@ -1997,7 +1997,7 @@ static int __ocfs2_increase_refcount(handle_t *handle,
struct ocfs2_refcount_rec rec;
unsigned int set_len = 0;
- mlog(0, "Tree owner %llu, add refcount start %llu, len %u\n",
+ trace_ocfs2_increase_refcount_begin(
(unsigned long long)ocfs2_metadata_cache_owner(ci),
(unsigned long long)cpos, len);
@@ -2024,9 +2024,9 @@ static int __ocfs2_increase_refcount(handle_t *handle,
*/
if (rec.r_refcount && le64_to_cpu(rec.r_cpos) == cpos &&
set_len <= len) {
- mlog(0, "increase refcount rec, start %llu, len %u, "
- "count %u\n", (unsigned long long)cpos, set_len,
- le32_to_cpu(rec.r_refcount));
+ trace_ocfs2_increase_refcount_change(
+ (unsigned long long)cpos, set_len,
+ le32_to_cpu(rec.r_refcount));
ret = ocfs2_change_refcount_rec(handle, ci,
ref_leaf_bh, index,
merge, 1);
@@ -2037,7 +2037,7 @@ static int __ocfs2_increase_refcount(handle_t *handle,
} else if (!rec.r_refcount) {
rec.r_refcount = cpu_to_le32(1);
- mlog(0, "insert refcount rec, start %llu, len %u\n",
+ trace_ocfs2_increase_refcount_insert(
(unsigned long long)le64_to_cpu(rec.r_cpos),
set_len);
ret = ocfs2_insert_refcount_rec(handle, ci, ref_root_bh,
@@ -2055,8 +2055,7 @@ static int __ocfs2_increase_refcount(handle_t *handle,
rec.r_clusters = cpu_to_le32(set_len);
le32_add_cpu(&rec.r_refcount, 1);
- mlog(0, "split refcount rec, start %llu, "
- "len %u, count %u\n",
+ trace_ocfs2_increase_refcount_split(
(unsigned long long)le64_to_cpu(rec.r_cpos),
set_len, le32_to_cpu(rec.r_refcount));
ret = ocfs2_split_refcount_rec(handle, ci,
@@ -2095,6 +2094,11 @@ static int ocfs2_remove_refcount_extent(handle_t *handle,
BUG_ON(rb->rf_records.rl_used);
+ trace_ocfs2_remove_refcount_extent(
+ (unsigned long long)ocfs2_metadata_cache_owner(ci),
+ (unsigned long long)ref_leaf_bh->b_blocknr,
+ le32_to_cpu(rb->rf_cpos));
+
ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh);
ret = ocfs2_remove_extent(handle, &et, le32_to_cpu(rb->rf_cpos),
1, meta_ac, dealloc);
@@ -2137,7 +2141,7 @@ static int ocfs2_remove_refcount_extent(handle_t *handle,
if (!rb->rf_list.l_next_free_rec) {
BUG_ON(rb->rf_clusters);
- mlog(0, "reset refcount tree root %llu to be a record block.\n",
+ trace_ocfs2_restore_refcount_block(
(unsigned long long)ref_root_bh->b_blocknr);
rb->rf_flags = 0;
@@ -2184,6 +2188,10 @@ static int ocfs2_decrease_refcount_rec(handle_t *handle,
BUG_ON(cpos + len >
le64_to_cpu(rec->r_cpos) + le32_to_cpu(rec->r_clusters));
+ trace_ocfs2_decrease_refcount_rec(
+ (unsigned long long)ocfs2_metadata_cache_owner(ci),
+ (unsigned long long)cpos, len);
+
if (cpos == le64_to_cpu(rec->r_cpos) &&
len == le32_to_cpu(rec->r_clusters))
ret = ocfs2_change_refcount_rec(handle, ci,
@@ -2195,12 +2203,6 @@ static int ocfs2_decrease_refcount_rec(handle_t *handle,
le32_add_cpu(&split.r_refcount, -1);
- mlog(0, "split refcount rec, start %llu, "
- "len %u, count %u, original start %llu, len %u\n",
- (unsigned long long)le64_to_cpu(split.r_cpos),
- len, le32_to_cpu(split.r_refcount),
- (unsigned long long)le64_to_cpu(rec->r_cpos),
- le32_to_cpu(rec->r_clusters));
ret = ocfs2_split_refcount_rec(handle, ci,
ref_root_bh, ref_leaf_bh,
&split, index, 1,
@@ -2239,10 +2241,9 @@ static int __ocfs2_decrease_refcount(handle_t *handle,
struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
struct buffer_head *ref_leaf_bh = NULL;
- mlog(0, "Tree owner %llu, decrease refcount start %llu, "
- "len %u, delete %u\n",
- (unsigned long long)ocfs2_metadata_cache_owner(ci),
- (unsigned long long)cpos, len, delete);
+ trace_ocfs2_decrease_refcount(
+ (unsigned long long)ocfs2_metadata_cache_owner(ci),
+ (unsigned long long)cpos, len, delete);
while (len) {
ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
@@ -2352,8 +2353,8 @@ static int ocfs2_mark_extent_refcounted(struct inode *inode,
{
int ret;
- mlog(0, "Inode %lu refcount tree cpos %u, len %u, phys cluster %u\n",
- inode->i_ino, cpos, len, phys);
+ trace_ocfs2_mark_extent_refcounted(OCFS2_I(inode)->ip_blkno,
+ cpos, len, phys);
if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) {
ocfs2_error(inode->i_sb, "Inode %lu want to use refcount "
@@ -2392,8 +2393,6 @@ static int ocfs2_calc_refcount_meta_credits(struct super_block *sb,
struct buffer_head *ref_leaf_bh = NULL, *prev_bh = NULL;
u32 len;
- mlog(0, "start_cpos %llu, clusters %u\n",
- (unsigned long long)start_cpos, clusters);
while (clusters) {
ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
cpos, clusters, &rec,
@@ -2427,12 +2426,11 @@ static int ocfs2_calc_refcount_meta_credits(struct super_block *sb,
rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
- mlog(0, "recs_add %d,cpos %llu, clusters %u, rec->r_cpos %llu,"
- "rec->r_clusters %u, rec->r_refcount %u, index %d\n",
- recs_add, (unsigned long long)cpos, clusters,
- (unsigned long long)le64_to_cpu(rec.r_cpos),
- le32_to_cpu(rec.r_clusters),
- le32_to_cpu(rec.r_refcount), index);
+ trace_ocfs2_calc_refcount_meta_credits_iterate(
+ recs_add, (unsigned long long)cpos, clusters,
+ (unsigned long long)le64_to_cpu(rec.r_cpos),
+ le32_to_cpu(rec.r_clusters),
+ le32_to_cpu(rec.r_refcount), index);
len = min((u64)cpos + clusters, le64_to_cpu(rec.r_cpos) +
le32_to_cpu(rec.r_clusters)) - cpos;
@@ -2488,7 +2486,6 @@ static int ocfs2_calc_refcount_meta_credits(struct super_block *sb,
if (!ref_blocks)
goto out;
- mlog(0, "we need ref_blocks %d\n", ref_blocks);
*meta_add += ref_blocks;
*credits += ref_blocks;
@@ -2514,6 +2511,10 @@ static int ocfs2_calc_refcount_meta_credits(struct super_block *sb,
}
out:
+
+ trace_ocfs2_calc_refcount_meta_credits(
+ (unsigned long long)start_cpos, clusters,
+ *meta_add, *credits);
brelse(ref_leaf_bh);
brelse(prev_bh);
return ret;
@@ -2578,8 +2579,7 @@ int ocfs2_prepare_refcount_change_for_del(struct inode *inode,
goto out;
}
- mlog(0, "reserve new metadata %d blocks, credits = %d\n",
- *ref_blocks, *credits);
+ trace_ocfs2_prepare_refcount_change_for_del(*ref_blocks, *credits);
out:
brelse(ref_root_bh);
@@ -2886,8 +2886,7 @@ static int ocfs2_lock_refcount_allocators(struct super_block *sb,
goto out;
}
- mlog(0, "reserve new metadata %d, clusters %u, credits = %d\n",
- meta_add, num_clusters, *credits);
+ trace_ocfs2_lock_refcount_allocators(meta_add, *credits);
ret = ocfs2_reserve_new_metadata_blocks(OCFS2_SB(sb), meta_add,
meta_ac);
if (ret) {
@@ -2937,8 +2936,8 @@ static int ocfs2_duplicate_clusters_by_page(handle_t *handle,
loff_t offset, end, map_end;
struct address_space *mapping = context->inode->i_mapping;
- mlog(0, "old_cluster %u, new %u, len %u at offset %u\n", old_cluster,
- new_cluster, new_len, cpos);
+ trace_ocfs2_duplicate_clusters_by_page(cpos, old_cluster,
+ new_cluster, new_len);
readahead_pages =
(ocfs2_cow_contig_clusters(sb) <<
@@ -3031,8 +3030,8 @@ static int ocfs2_duplicate_clusters_by_jbd(handle_t *handle,
struct buffer_head *old_bh = NULL;
struct buffer_head *new_bh = NULL;
- mlog(0, "old_cluster %u, new %u, len %u\n", old_cluster,
- new_cluster, new_len);
+ trace_ocfs2_duplicate_clusters_by_page(cpos, old_cluster,
+ new_cluster, new_len);
for (i = 0; i < blocks; i++, old_block++, new_block++) {
new_bh = sb_getblk(osb->sb, new_block);
@@ -3085,8 +3084,8 @@ static int ocfs2_clear_ext_refcount(handle_t *handle,
struct super_block *sb = ocfs2_metadata_cache_get_super(et->et_ci);
u64 ino = ocfs2_metadata_cache_owner(et->et_ci);
- mlog(0, "inode %llu cpos %u, len %u, p_cluster %u, ext_flags %u\n",
- (unsigned long long)ino, cpos, len, p_cluster, ext_flags);
+ trace_ocfs2_clear_ext_refcount((unsigned long long)ino,
+ cpos, len, p_cluster, ext_flags);
memset(&replace_rec, 0, sizeof(replace_rec));
replace_rec.e_cpos = cpu_to_le32(cpos);
@@ -3141,8 +3140,8 @@ static int ocfs2_replace_clusters(handle_t *handle,
struct ocfs2_caching_info *ci = context->data_et.et_ci;
u64 ino = ocfs2_metadata_cache_owner(ci);
- mlog(0, "inode %llu, cpos %u, old %u, new %u, len %u, ext_flags %u\n",
- (unsigned long long)ino, cpos, old, new, len, ext_flags);
+ trace_ocfs2_replace_clusters((unsigned long long)ino,
+ cpos, old, new, len, ext_flags);
/*If the old clusters is unwritten, no need to duplicate. */
if (!(ext_flags & OCFS2_EXT_UNWRITTEN)) {
@@ -3236,8 +3235,8 @@ static int ocfs2_make_clusters_writable(struct super_block *sb,
struct ocfs2_caching_info *ref_ci = &context->ref_tree->rf_ci;
struct ocfs2_refcount_rec rec;
- mlog(0, "cpos %u, p_cluster %u, num_clusters %u, e_flags %u\n",
- cpos, p_cluster, num_clusters, e_flags);
+ trace_ocfs2_make_clusters_writable(cpos, p_cluster,
+ num_clusters, e_flags);
ret = ocfs2_lock_refcount_allocators(sb, p_cluster, num_clusters,
&context->data_et,
@@ -3475,9 +3474,9 @@ static int ocfs2_refcount_cow_hunk(struct inode *inode,
goto out;
}
- mlog(0, "CoW inode %lu, cpos %u, write_len %u, cow_start %u, "
- "cow_len %u\n", inode->i_ino,
- cpos, write_len, cow_start, cow_len);
+ trace_ocfs2_refcount_cow_hunk(OCFS2_I(inode)->ip_blkno,
+ cpos, write_len, max_cpos,
+ cow_start, cow_len);
BUG_ON(cow_len == 0);
@@ -3756,8 +3755,7 @@ int ocfs2_add_refcount_flag(struct inode *inode,
goto out;
}
- mlog(0, "reserve new metadata %d, credits = %d\n",
- ref_blocks, credits);
+ trace_ocfs2_add_refcount_flag(ref_blocks, credits);
if (ref_blocks) {
ret = ocfs2_reserve_new_metadata_blocks(OCFS2_SB(inode->i_sb),
diff --git a/fs/ocfs2/reservations.c b/fs/ocfs2/reservations.c
index 3e78db3..41ffd36 100644
--- a/fs/ocfs2/reservations.c
+++ b/fs/ocfs2/reservations.c
@@ -30,10 +30,10 @@
#include <linux/bitops.h>
#include <linux/list.h>
-#define MLOG_MASK_PREFIX ML_RESERVATIONS
#include <cluster/masklog.h>
#include "ocfs2.h"
+#include "ocfs2_trace.h"
#ifdef CONFIG_OCFS2_DEBUG_FS
#define OCFS2_CHECK_RESERVATIONS
@@ -321,8 +321,7 @@ static void ocfs2_resv_insert(struct ocfs2_reservation_map *resmap,
assert_spin_locked(&resv_lock);
- mlog(0, "Insert reservation start: %u len: %u\n", new->r_start,
- new->r_len);
+ trace_ocfs2_resv_insert(new->r_start, new->r_len);
while (*p) {
parent = *p;
@@ -423,8 +422,8 @@ static int ocfs2_resmap_find_free_bits(struct ocfs2_reservation_map *resmap,
unsigned int best_start, best_len = 0;
int offset, start, found;
- mlog(0, "Find %u bits within range (%u, len %u) resmap len: %u\n",
- wanted, search_start, search_len, resmap->m_bitmap_len);
+ trace_ocfs2_resmap_find_free_bits_begin(search_start, search_len,
+ wanted, resmap->m_bitmap_len);
found = best_start = best_len = 0;
@@ -463,7 +462,7 @@ static int ocfs2_resmap_find_free_bits(struct ocfs2_reservation_map *resmap,
*rlen = best_len;
*rstart = best_start;
- mlog(0, "Found start: %u len: %u\n", best_start, best_len);
+ trace_ocfs2_resmap_find_free_bits_end(best_start, best_len);
return *rlen;
}
@@ -487,9 +486,8 @@ static void __ocfs2_resv_find_window(struct ocfs2_reservation_map *resmap,
* - our window should be last in all reservations
* - need to make sure we don't go past end of bitmap
*/
-
- mlog(0, "resv start: %u resv end: %u goal: %u wanted: %u\n",
- resv->r_start, ocfs2_resv_end(resv), goal, wanted);
+ trace_ocfs2_resv_find_window_begin(resv->r_start, ocfs2_resv_end(resv),
+ goal, wanted, RB_EMPTY_ROOT(root));
assert_spin_locked(&resv_lock);
@@ -498,9 +496,6 @@ static void __ocfs2_resv_find_window(struct ocfs2_reservation_map *resmap,
* Easiest case - empty tree. We can just take
* whatever window of free bits we want.
*/
-
- mlog(0, "Empty root\n");
-
clen = ocfs2_resmap_find_free_bits(resmap, wanted, goal,
resmap->m_bitmap_len - goal,
&cstart, &clen);
@@ -524,8 +519,6 @@ static void __ocfs2_resv_find_window(struct ocfs2_reservation_map *resmap,
prev_resv = ocfs2_find_resv_lhs(resmap, goal);
if (prev_resv == NULL) {
- mlog(0, "Goal on LHS of leftmost window\n");
-
/*
* A NULL here means that the search code couldn't
* find a window that starts before goal.
@@ -570,13 +563,15 @@ static void __ocfs2_resv_find_window(struct ocfs2_reservation_map *resmap,
next_resv = NULL;
}
+ trace_ocfs2_resv_find_window_prev(prev_resv->r_start,
+ ocfs2_resv_end(prev_resv));
+
prev = &prev_resv->r_node;
/* Now we do a linear search for a window, starting at 'prev_rsv' */
while (1) {
next = rb_next(prev);
if (next) {
- mlog(0, "One more resv found in linear search\n");
next_resv = rb_entry(next,
struct ocfs2_alloc_reservation,
r_node);
@@ -585,7 +580,6 @@ static void __ocfs2_resv_find_window(struct ocfs2_reservation_map *resmap,
gap_end = next_resv->r_start - 1;
gap_len = gap_end - gap_start + 1;
} else {
- mlog(0, "No next node\n");
/*
* We're at the rightmost edge of the
* tree. See if a reservation between this
@@ -596,6 +590,8 @@ static void __ocfs2_resv_find_window(struct ocfs2_reservation_map *resmap,
gap_end = resmap->m_bitmap_len - 1;
}
+ trace_ocfs2_resv_find_window_next(next ? next_resv->r_start: -1,
+ next ? ocfs2_resv_end(next_resv) : -1);
/*
* No need to check this gap if we have already found
* a larger region of free bits.
@@ -654,8 +650,9 @@ static void ocfs2_cannibalize_resv(struct ocfs2_reservation_map *resmap,
lru_resv = list_first_entry(&resmap->m_lru,
struct ocfs2_alloc_reservation, r_lru);
- mlog(0, "lru resv: start: %u len: %u end: %u\n", lru_resv->r_start,
- lru_resv->r_len, ocfs2_resv_end(lru_resv));
+ trace_ocfs2_cannibalize_resv_begin(lru_resv->r_start,
+ lru_resv->r_len,
+ ocfs2_resv_end(lru_resv));
/*
* Cannibalize (some or all) of the target reservation and
@@ -684,10 +681,9 @@ static void ocfs2_cannibalize_resv(struct ocfs2_reservation_map *resmap,
resv->r_len = shrink;
}
- mlog(0, "Reservation now looks like: r_start: %u r_end: %u "
- "r_len: %u r_last_start: %u r_last_len: %u\n",
- resv->r_start, ocfs2_resv_end(resv), resv->r_len,
- resv->r_last_start, resv->r_last_len);
+ trace_ocfs2_cannibalize_resv_end(resv->r_start, ocfs2_resv_end(resv),
+ resv->r_len, resv->r_last_start,
+ resv->r_last_len);
ocfs2_resv_insert(resmap, resv);
}
@@ -748,7 +744,6 @@ int ocfs2_resmap_resv_bits(struct ocfs2_reservation_map *resmap,
if ((resv->r_flags & OCFS2_RESV_FLAG_TMP) || wanted < *clen)
wanted = *clen;
- mlog(0, "empty reservation, find new window\n");
/*
* Try to get a window here. If it works, we must fall
* through and test the bitmap . This avoids some
@@ -757,6 +752,7 @@ int ocfs2_resmap_resv_bits(struct ocfs2_reservation_map *resmap,
* that inode.
*/
ocfs2_resv_find_window(resmap, resv, wanted);
+ trace_ocfs2_resmap_resv_bits(resv->r_start, resv->r_len);
}
BUG_ON(ocfs2_resv_empty(resv));
@@ -813,10 +809,10 @@ void ocfs2_resmap_claimed_bits(struct ocfs2_reservation_map *resmap,
spin_lock(&resv_lock);
- mlog(0, "claim bits: cstart: %u cend: %u clen: %u r_start: %u "
- "r_end: %u r_len: %u, r_last_start: %u r_last_len: %u\n",
- cstart, cend, clen, resv->r_start, ocfs2_resv_end(resv),
- resv->r_len, resv->r_last_start, resv->r_last_len);
+ trace_ocfs2_resmap_claimed_bits_begin(cstart, cend, clen, resv->r_start,
+ ocfs2_resv_end(resv), resv->r_len,
+ resv->r_last_start,
+ resv->r_last_len);
BUG_ON(cstart < resv->r_start);
BUG_ON(cstart > ocfs2_resv_end(resv));
@@ -833,10 +829,9 @@ void ocfs2_resmap_claimed_bits(struct ocfs2_reservation_map *resmap,
if (!ocfs2_resv_empty(resv))
ocfs2_resv_mark_lru(resmap, resv);
- mlog(0, "Reservation now looks like: r_start: %u r_end: %u "
- "r_len: %u r_last_start: %u r_last_len: %u\n",
- resv->r_start, ocfs2_resv_end(resv), resv->r_len,
- resv->r_last_start, resv->r_last_len);
+ trace_ocfs2_resmap_claimed_bits_end(resv->r_start, ocfs2_resv_end(resv),
+ resv->r_len, resv->r_last_start,
+ resv->r_last_len);
ocfs2_check_resmap(resmap);
diff --git a/fs/ocfs2/reservations.h b/fs/ocfs2/reservations.h
index 1e49cc2..42c2b80 100644
--- a/fs/ocfs2/reservations.h
+++ b/fs/ocfs2/reservations.h
@@ -29,7 +29,7 @@
struct ocfs2_alloc_reservation {
struct rb_node r_node;
- unsigned int r_start; /* Begining of current window */
+ unsigned int r_start; /* Beginning of current window */
unsigned int r_len; /* Length of the window */
unsigned int r_last_len; /* Length of most recent alloc */
diff --git a/fs/ocfs2/resize.c b/fs/ocfs2/resize.c
index dacd553..ec55add 100644
--- a/fs/ocfs2/resize.c
+++ b/fs/ocfs2/resize.c
@@ -27,7 +27,6 @@
#include <linux/fs.h>
#include <linux/types.h>
-#define MLOG_MASK_PREFIX ML_DISK_ALLOC
#include <cluster/masklog.h>
#include "ocfs2.h"
@@ -39,6 +38,7 @@
#include "super.h"
#include "sysfile.h"
#include "uptodate.h"
+#include "ocfs2_trace.h"
#include "buffer_head_io.h"
#include "suballoc.h"
@@ -82,7 +82,6 @@ static u16 ocfs2_calc_new_backup_super(struct inode *inode,
backups++;
}
- mlog_exit_void();
return backups;
}
@@ -103,8 +102,8 @@ static int ocfs2_update_last_group_and_inode(handle_t *handle,
u16 cl_bpc = le16_to_cpu(cl->cl_bpc);
u16 cl_cpg = le16_to_cpu(cl->cl_cpg);
- mlog_entry("(new_clusters=%d, first_new_cluster = %u)\n",
- new_clusters, first_new_cluster);
+ trace_ocfs2_update_last_group_and_inode(new_clusters,
+ first_new_cluster);
ret = ocfs2_journal_access_gd(handle, INODE_CACHE(bm_inode),
group_bh, OCFS2_JOURNAL_ACCESS_WRITE);
@@ -176,7 +175,8 @@ out_rollback:
le16_add_cpu(&group->bg_free_bits_count, -1 * num_bits);
}
out:
- mlog_exit(ret);
+ if (ret)
+ mlog_errno(ret);
return ret;
}
@@ -281,8 +281,6 @@ int ocfs2_group_extend(struct inode * inode, int new_clusters)
u32 first_new_cluster;
u64 lgd_blkno;
- mlog_entry_void();
-
if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
return -EROFS;
@@ -342,7 +340,8 @@ int ocfs2_group_extend(struct inode * inode, int new_clusters)
goto out_unlock;
}
- mlog(0, "extend the last group at %llu, new clusters = %d\n",
+
+ trace_ocfs2_group_extend(
(unsigned long long)le64_to_cpu(group->bg_blkno), new_clusters);
handle = ocfs2_start_trans(osb, OCFS2_GROUP_EXTEND_CREDITS);
@@ -377,7 +376,6 @@ out_mutex:
iput(main_bm_inode);
out:
- mlog_exit_void();
return ret;
}
@@ -472,8 +470,6 @@ int ocfs2_group_add(struct inode *inode, struct ocfs2_new_group_input *input)
struct ocfs2_chain_rec *cr;
u16 cl_bpc;
- mlog_entry_void();
-
if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
return -EROFS;
@@ -520,8 +516,8 @@ int ocfs2_group_add(struct inode *inode, struct ocfs2_new_group_input *input)
goto out_unlock;
}
- mlog(0, "Add a new group %llu in chain = %u, length = %u\n",
- (unsigned long long)input->group, input->chain, input->clusters);
+ trace_ocfs2_group_add((unsigned long long)input->group,
+ input->chain, input->clusters, input->frees);
handle = ocfs2_start_trans(osb, OCFS2_GROUP_ADD_CREDITS);
if (IS_ERR(handle)) {
@@ -589,6 +585,5 @@ out_mutex:
iput(main_bm_inode);
out:
- mlog_exit_void();
return ret;
}
diff --git a/fs/ocfs2/slot_map.c b/fs/ocfs2/slot_map.c
index ab4e017..26fc001 100644
--- a/fs/ocfs2/slot_map.c
+++ b/fs/ocfs2/slot_map.c
@@ -27,7 +27,6 @@
#include <linux/slab.h>
#include <linux/highmem.h>
-#define MLOG_MASK_PREFIX ML_SUPER
#include <cluster/masklog.h>
#include "ocfs2.h"
@@ -39,6 +38,7 @@
#include "slot_map.h"
#include "super.h"
#include "sysfile.h"
+#include "ocfs2_trace.h"
#include "buffer_head_io.h"
@@ -142,8 +142,7 @@ int ocfs2_refresh_slot_info(struct ocfs2_super *osb)
BUG_ON(si->si_blocks == 0);
BUG_ON(si->si_bh == NULL);
- mlog(0, "Refreshing slot map, reading %u block(s)\n",
- si->si_blocks);
+ trace_ocfs2_refresh_slot_info(si->si_blocks);
/*
* We pass -1 as blocknr because we expect all of si->si_bh to
@@ -381,8 +380,7 @@ static int ocfs2_map_slot_buffers(struct ocfs2_super *osb,
/* The size checks above should ensure this */
BUG_ON((osb->max_slots / si->si_slots_per_block) > blocks);
- mlog(0, "Slot map needs %u buffers for %llu bytes\n",
- si->si_blocks, bytes);
+ trace_ocfs2_map_slot_buffers(bytes, si->si_blocks);
si->si_bh = kzalloc(sizeof(struct buffer_head *) * si->si_blocks,
GFP_KERNEL);
@@ -400,8 +398,7 @@ static int ocfs2_map_slot_buffers(struct ocfs2_super *osb,
goto bail;
}
- mlog(0, "Reading slot map block %u at %llu\n", i,
- (unsigned long long)blkno);
+ trace_ocfs2_map_slot_buffers_block((unsigned long long)blkno, i);
bh = NULL; /* Acquire a fresh bh */
status = ocfs2_read_blocks(INODE_CACHE(si->si_inode), blkno,
@@ -475,8 +472,6 @@ int ocfs2_find_slot(struct ocfs2_super *osb)
int slot;
struct ocfs2_slot_info *si;
- mlog_entry_void();
-
si = osb->slot_info;
spin_lock(&osb->osb_lock);
@@ -505,14 +500,13 @@ int ocfs2_find_slot(struct ocfs2_super *osb)
osb->slot_num = slot;
spin_unlock(&osb->osb_lock);
- mlog(0, "taking node slot %d\n", osb->slot_num);
+ trace_ocfs2_find_slot(osb->slot_num);
status = ocfs2_update_disk_slot(osb, si, osb->slot_num);
if (status < 0)
mlog_errno(status);
bail:
- mlog_exit(status);
return status;
}
diff --git a/fs/ocfs2/stackglue.h b/fs/ocfs2/stackglue.h
index 8ce7398..1ec56fd 100644
--- a/fs/ocfs2/stackglue.h
+++ b/fs/ocfs2/stackglue.h
@@ -126,7 +126,7 @@ struct ocfs2_stack_operations {
*
* ->connect() must not return until it is guaranteed that
*
- * - Node down notifications for the filesystem will be recieved
+ * - Node down notifications for the filesystem will be received
* and passed to conn->cc_recovery_handler().
* - Locking requests for the filesystem will be processed.
*/
diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
index 71998d4..ba5d97e 100644
--- a/fs/ocfs2/suballoc.c
+++ b/fs/ocfs2/suballoc.c
@@ -29,7 +29,6 @@
#include <linux/slab.h>
#include <linux/highmem.h>
-#define MLOG_MASK_PREFIX ML_DISK_ALLOC
#include <cluster/masklog.h>
#include "ocfs2.h"
@@ -44,6 +43,7 @@
#include "super.h"
#include "sysfile.h"
#include "uptodate.h"
+#include "ocfs2_trace.h"
#include "buffer_head_io.h"
@@ -308,8 +308,8 @@ static int ocfs2_validate_group_descriptor(struct super_block *sb,
int rc;
struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *)bh->b_data;
- mlog(0, "Validating group descriptor %llu\n",
- (unsigned long long)bh->b_blocknr);
+ trace_ocfs2_validate_group_descriptor(
+ (unsigned long long)bh->b_blocknr);
BUG_ON(!buffer_uptodate(bh));
@@ -389,8 +389,6 @@ static int ocfs2_block_group_fill(handle_t *handle,
struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) bg_bh->b_data;
struct super_block * sb = alloc_inode->i_sb;
- mlog_entry_void();
-
if (((unsigned long long) bg_bh->b_blocknr) != group_blkno) {
ocfs2_error(alloc_inode->i_sb, "group block (%llu) != "
"b_blocknr (%llu)",
@@ -436,7 +434,8 @@ static int ocfs2_block_group_fill(handle_t *handle,
* allocation time. */
bail:
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -477,8 +476,8 @@ ocfs2_block_group_alloc_contig(struct ocfs2_super *osb, handle_t *handle,
/* setup the group */
bg_blkno = ocfs2_clusters_to_blocks(osb->sb, bit_off);
- mlog(0, "new descriptor, record %u, at block %llu\n",
- alloc_rec, (unsigned long long)bg_blkno);
+ trace_ocfs2_block_group_alloc_contig(
+ (unsigned long long)bg_blkno, alloc_rec);
bg_bh = sb_getblk(osb->sb, bg_blkno);
if (!bg_bh) {
@@ -657,8 +656,8 @@ ocfs2_block_group_alloc_discontig(handle_t *handle,
/* setup the group */
bg_blkno = ocfs2_clusters_to_blocks(osb->sb, bit_off);
- mlog(0, "new descriptor, record %u, at block %llu\n",
- alloc_rec, (unsigned long long)bg_blkno);
+ trace_ocfs2_block_group_alloc_discontig(
+ (unsigned long long)bg_blkno, alloc_rec);
bg_bh = sb_getblk(osb->sb, bg_blkno);
if (!bg_bh) {
@@ -707,8 +706,6 @@ static int ocfs2_block_group_alloc(struct ocfs2_super *osb,
BUG_ON(ocfs2_is_cluster_bitmap(alloc_inode));
- mlog_entry_void();
-
cl = &fe->id2.i_chain;
status = ocfs2_reserve_clusters_with_limit(osb,
le16_to_cpu(cl->cl_cpg),
@@ -730,8 +727,8 @@ static int ocfs2_block_group_alloc(struct ocfs2_super *osb,
}
if (last_alloc_group && *last_alloc_group != 0) {
- mlog(0, "use old allocation group %llu for block group alloc\n",
- (unsigned long long)*last_alloc_group);
+ trace_ocfs2_block_group_alloc(
+ (unsigned long long)*last_alloc_group);
ac->ac_last_group = *last_alloc_group;
}
@@ -796,7 +793,8 @@ bail:
brelse(bg_bh);
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -814,8 +812,6 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
struct ocfs2_dinode *fe;
u32 free_bits;
- mlog_entry_void();
-
alloc_inode = ocfs2_get_system_file_inode(osb, type, slot);
if (!alloc_inode) {
mlog_errno(-EINVAL);
@@ -855,16 +851,15 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
if (bits_wanted > free_bits) {
/* cluster bitmap never grows */
if (ocfs2_is_cluster_bitmap(alloc_inode)) {
- mlog(0, "Disk Full: wanted=%u, free_bits=%u\n",
- bits_wanted, free_bits);
+ trace_ocfs2_reserve_suballoc_bits_nospc(bits_wanted,
+ free_bits);
status = -ENOSPC;
goto bail;
}
if (!(flags & ALLOC_NEW_GROUP)) {
- mlog(0, "Alloc File %u Full: wanted=%u, free_bits=%u, "
- "and we don't alloc a new group for it.\n",
- slot, bits_wanted, free_bits);
+ trace_ocfs2_reserve_suballoc_bits_no_new_group(
+ slot, bits_wanted, free_bits);
status = -ENOSPC;
goto bail;
}
@@ -890,7 +885,8 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
bail:
brelse(bh);
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -1052,7 +1048,8 @@ bail:
*ac = NULL;
}
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -1119,8 +1116,8 @@ int ocfs2_reserve_new_inode(struct ocfs2_super *osb,
spin_lock(&osb->osb_lock);
osb->osb_inode_alloc_group = alloc_group;
spin_unlock(&osb->osb_lock);
- mlog(0, "after reservation, new allocation group is "
- "%llu\n", (unsigned long long)alloc_group);
+ trace_ocfs2_reserve_new_inode_new_group(
+ (unsigned long long)alloc_group);
/*
* Some inodes must be freed by us, so try to allocate
@@ -1152,7 +1149,8 @@ bail:
*ac = NULL;
}
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -1189,8 +1187,6 @@ static int ocfs2_reserve_clusters_with_limit(struct ocfs2_super *osb,
{
int status;
- mlog_entry_void();
-
*ac = kzalloc(sizeof(struct ocfs2_alloc_context), GFP_KERNEL);
if (!(*ac)) {
status = -ENOMEM;
@@ -1229,7 +1225,8 @@ bail:
*ac = NULL;
}
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -1357,15 +1354,12 @@ static inline int ocfs2_block_group_set_bits(handle_t *handle,
void *bitmap = bg->bg_bitmap;
int journal_type = OCFS2_JOURNAL_ACCESS_WRITE;
- mlog_entry_void();
-
/* All callers get the descriptor via
* ocfs2_read_group_descriptor(). Any corruption is a code bug. */
BUG_ON(!OCFS2_IS_VALID_GROUP_DESC(bg));
BUG_ON(le16_to_cpu(bg->bg_free_bits_count) < num_bits);
- mlog(0, "block_group_set_bits: off = %u, num = %u\n", bit_off,
- num_bits);
+ trace_ocfs2_block_group_set_bits(bit_off, num_bits);
if (ocfs2_is_cluster_bitmap(alloc_inode))
journal_type = OCFS2_JOURNAL_ACCESS_UNDO;
@@ -1394,7 +1388,8 @@ static inline int ocfs2_block_group_set_bits(handle_t *handle,
ocfs2_journal_dirty(handle, group_bh);
bail:
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -1437,10 +1432,10 @@ static int ocfs2_relink_block_group(handle_t *handle,
BUG_ON(!OCFS2_IS_VALID_GROUP_DESC(bg));
BUG_ON(!OCFS2_IS_VALID_GROUP_DESC(prev_bg));
- mlog(0, "Suballoc %llu, chain %u, move group %llu to top, prev = %llu\n",
- (unsigned long long)le64_to_cpu(fe->i_blkno), chain,
- (unsigned long long)le64_to_cpu(bg->bg_blkno),
- (unsigned long long)le64_to_cpu(prev_bg->bg_blkno));
+ trace_ocfs2_relink_block_group(
+ (unsigned long long)le64_to_cpu(fe->i_blkno), chain,
+ (unsigned long long)le64_to_cpu(bg->bg_blkno),
+ (unsigned long long)le64_to_cpu(prev_bg->bg_blkno));
fe_ptr = le64_to_cpu(fe->id2.i_chain.cl_recs[chain].c_blkno);
bg_ptr = le64_to_cpu(bg->bg_next_group);
@@ -1484,7 +1479,8 @@ out_rollback:
prev_bg->bg_next_group = cpu_to_le64(prev_bg_ptr);
}
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -1515,7 +1511,7 @@ static int ocfs2_cluster_group_search(struct inode *inode,
max_bits = le16_to_cpu(gd->bg_bits);
/* Tail groups in cluster bitmaps which aren't cpg
- * aligned are prone to partial extention by a failed
+ * aligned are prone to partial extension by a failed
* fs resize. If the file system resize never got to
* update the dinode cluster count, then we don't want
* to trust any clusters past it, regardless of what
@@ -1525,10 +1521,10 @@ static int ocfs2_cluster_group_search(struct inode *inode,
if ((gd_cluster_off + max_bits) >
OCFS2_I(inode)->ip_clusters) {
max_bits = OCFS2_I(inode)->ip_clusters - gd_cluster_off;
- mlog(0, "Desc %llu, bg_bits %u, clusters %u, use %u\n",
- (unsigned long long)le64_to_cpu(gd->bg_blkno),
- le16_to_cpu(gd->bg_bits),
- OCFS2_I(inode)->ip_clusters, max_bits);
+ trace_ocfs2_cluster_group_search_wrong_max_bits(
+ (unsigned long long)le64_to_cpu(gd->bg_blkno),
+ le16_to_cpu(gd->bg_bits),
+ OCFS2_I(inode)->ip_clusters, max_bits);
}
ret = ocfs2_block_group_find_clear_bits(OCFS2_SB(inode->i_sb),
@@ -1542,9 +1538,9 @@ static int ocfs2_cluster_group_search(struct inode *inode,
gd_cluster_off +
res->sr_bit_offset +
res->sr_bits);
- mlog(0, "Checking %llu against %llu\n",
- (unsigned long long)blkoff,
- (unsigned long long)max_block);
+ trace_ocfs2_cluster_group_search_max_block(
+ (unsigned long long)blkoff,
+ (unsigned long long)max_block);
if (blkoff > max_block)
return -ENOSPC;
}
@@ -1588,9 +1584,9 @@ static int ocfs2_block_group_search(struct inode *inode,
if (!ret && max_block) {
blkoff = le64_to_cpu(bg->bg_blkno) +
res->sr_bit_offset + res->sr_bits;
- mlog(0, "Checking %llu against %llu\n",
- (unsigned long long)blkoff,
- (unsigned long long)max_block);
+ trace_ocfs2_block_group_search_max_block(
+ (unsigned long long)blkoff,
+ (unsigned long long)max_block);
if (blkoff > max_block)
ret = -ENOSPC;
}
@@ -1756,9 +1752,9 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
struct ocfs2_group_desc *bg;
chain = ac->ac_chain;
- mlog(0, "trying to alloc %u bits from chain %u, inode %llu\n",
- bits_wanted, chain,
- (unsigned long long)OCFS2_I(alloc_inode)->ip_blkno);
+ trace_ocfs2_search_chain_begin(
+ (unsigned long long)OCFS2_I(alloc_inode)->ip_blkno,
+ bits_wanted, chain);
status = ocfs2_read_group_descriptor(alloc_inode, fe,
le64_to_cpu(cl->cl_recs[chain].c_blkno),
@@ -1799,8 +1795,8 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
goto bail;
}
- mlog(0, "alloc succeeds: we give %u bits from block group %llu\n",
- res->sr_bits, (unsigned long long)le64_to_cpu(bg->bg_blkno));
+ trace_ocfs2_search_chain_succ(
+ (unsigned long long)le64_to_cpu(bg->bg_blkno), res->sr_bits);
res->sr_bg_blkno = le64_to_cpu(bg->bg_blkno);
@@ -1861,8 +1857,9 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
goto bail;
}
- mlog(0, "Allocated %u bits from suballocator %llu\n", res->sr_bits,
- (unsigned long long)le64_to_cpu(fe->i_blkno));
+ trace_ocfs2_search_chain_end(
+ (unsigned long long)le64_to_cpu(fe->i_blkno),
+ res->sr_bits);
out_loc_only:
*bits_left = le16_to_cpu(bg->bg_free_bits_count);
@@ -1870,7 +1867,8 @@ bail:
brelse(group_bh);
brelse(prev_group_bh);
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -1888,8 +1886,6 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac,
struct ocfs2_chain_list *cl;
struct ocfs2_dinode *fe;
- mlog_entry_void();
-
BUG_ON(ac->ac_bits_given >= ac->ac_bits_wanted);
BUG_ON(bits_wanted > (ac->ac_bits_wanted - ac->ac_bits_given));
BUG_ON(!ac->ac_bh);
@@ -1945,8 +1941,7 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac,
goto bail;
}
- mlog(0, "Search of victim chain %u came up with nothing, "
- "trying all chains now.\n", victim);
+ trace_ocfs2_claim_suballoc_bits(victim);
/* If we didn't pick a good victim, then just default to
* searching each chain in order. Don't allow chain relinking
@@ -1984,7 +1979,8 @@ set_hint:
}
bail:
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -2021,7 +2017,8 @@ int ocfs2_claim_metadata(handle_t *handle,
*num_bits = res.sr_bits;
status = 0;
bail:
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -2172,8 +2169,8 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
goto out;
}
- mlog(0, "Allocated %u bits from suballocator %llu\n", res->sr_bits,
- (unsigned long long)di_blkno);
+ trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
+ res->sr_bits);
atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
@@ -2201,8 +2198,6 @@ int ocfs2_claim_new_inode(handle_t *handle,
int status;
struct ocfs2_suballoc_result res;
- mlog_entry_void();
-
BUG_ON(!ac);
BUG_ON(ac->ac_bits_given != 0);
BUG_ON(ac->ac_bits_wanted != 1);
@@ -2230,7 +2225,8 @@ int ocfs2_claim_new_inode(handle_t *handle,
ocfs2_save_inode_ac_group(dir, ac);
status = 0;
bail:
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -2307,8 +2303,6 @@ int __ocfs2_claim_clusters(handle_t *handle,
struct ocfs2_suballoc_result res = { .sr_blkno = 0, };
struct ocfs2_super *osb = OCFS2_SB(ac->ac_inode->i_sb);
- mlog_entry_void();
-
BUG_ON(ac->ac_bits_given >= ac->ac_bits_wanted);
BUG_ON(ac->ac_which != OCFS2_AC_USE_LOCAL
@@ -2363,7 +2357,8 @@ int __ocfs2_claim_clusters(handle_t *handle,
ac->ac_bits_given += *num_clusters;
bail:
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -2392,13 +2387,11 @@ static int ocfs2_block_group_clear_bits(handle_t *handle,
unsigned int tmp;
struct ocfs2_group_desc *undo_bg = NULL;
- mlog_entry_void();
-
/* The caller got this descriptor from
* ocfs2_read_group_descriptor(). Any corruption is a code bug. */
BUG_ON(!OCFS2_IS_VALID_GROUP_DESC(bg));
- mlog(0, "off = %u, num = %u\n", bit_off, num_bits);
+ trace_ocfs2_block_group_clear_bits(bit_off, num_bits);
BUG_ON(undo_fn && !ocfs2_is_cluster_bitmap(alloc_inode));
status = ocfs2_journal_access_gd(handle, INODE_CACHE(alloc_inode),
@@ -2463,19 +2456,18 @@ static int _ocfs2_free_suballoc_bits(handle_t *handle,
struct buffer_head *group_bh = NULL;
struct ocfs2_group_desc *group;
- mlog_entry_void();
-
/* The alloc_bh comes from ocfs2_free_dinode() or
* ocfs2_free_clusters(). The callers have all locked the
* allocator and gotten alloc_bh from the lock call. This
- * validates the dinode buffer. Any corruption that has happended
+ * validates the dinode buffer. Any corruption that has happened
* is a code bug. */
BUG_ON(!OCFS2_IS_VALID_DINODE(fe));
BUG_ON((count + start_bit) > ocfs2_bits_per_group(cl));
- mlog(0, "%llu: freeing %u bits from group %llu, starting at %u\n",
- (unsigned long long)OCFS2_I(alloc_inode)->ip_blkno, count,
- (unsigned long long)bg_blkno, start_bit);
+ trace_ocfs2_free_suballoc_bits(
+ (unsigned long long)OCFS2_I(alloc_inode)->ip_blkno,
+ (unsigned long long)bg_blkno,
+ start_bit, count);
status = ocfs2_read_group_descriptor(alloc_inode, fe, bg_blkno,
&group_bh);
@@ -2511,7 +2503,8 @@ static int _ocfs2_free_suballoc_bits(handle_t *handle,
bail:
brelse(group_bh);
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -2556,11 +2549,8 @@ static int _ocfs2_free_clusters(handle_t *handle,
/* You can't ever have a contiguous set of clusters
* bigger than a block group bitmap so we never have to worry
- * about looping on them. */
-
- mlog_entry_void();
-
- /* This is expensive. We can safely remove once this stuff has
+ * about looping on them.
+ * This is expensive. We can safely remove once this stuff has
* gotten tested really well. */
BUG_ON(start_blk != ocfs2_clusters_to_blocks(bitmap_inode->i_sb, ocfs2_blocks_to_clusters(bitmap_inode->i_sb, start_blk)));
@@ -2569,10 +2559,9 @@ static int _ocfs2_free_clusters(handle_t *handle,
ocfs2_block_to_cluster_group(bitmap_inode, start_blk, &bg_blkno,
&bg_start_bit);
- mlog(0, "want to free %u clusters starting at block %llu\n",
- num_clusters, (unsigned long long)start_blk);
- mlog(0, "bg_blkno = %llu, bg_start_bit = %u\n",
- (unsigned long long)bg_blkno, bg_start_bit);
+ trace_ocfs2_free_clusters((unsigned long long)bg_blkno,
+ (unsigned long long)start_blk,
+ bg_start_bit, num_clusters);
status = _ocfs2_free_suballoc_bits(handle, bitmap_inode, bitmap_bh,
bg_start_bit, bg_blkno,
@@ -2586,7 +2575,8 @@ static int _ocfs2_free_clusters(handle_t *handle,
num_clusters);
out:
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -2756,7 +2746,7 @@ static int ocfs2_get_suballoc_slot_bit(struct ocfs2_super *osb, u64 blkno,
struct buffer_head *inode_bh = NULL;
struct ocfs2_dinode *inode_fe;
- mlog_entry("blkno: %llu\n", (unsigned long long)blkno);
+ trace_ocfs2_get_suballoc_slot_bit((unsigned long long)blkno);
/* dirty read disk */
status = ocfs2_read_blocks_sync(osb, blkno, 1, &inode_bh);
@@ -2793,7 +2783,8 @@ static int ocfs2_get_suballoc_slot_bit(struct ocfs2_super *osb, u64 blkno,
bail:
brelse(inode_bh);
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -2816,8 +2807,8 @@ static int ocfs2_test_suballoc_bit(struct ocfs2_super *osb,
u64 bg_blkno;
int status;
- mlog_entry("blkno: %llu bit: %u\n", (unsigned long long)blkno,
- (unsigned int)bit);
+ trace_ocfs2_test_suballoc_bit((unsigned long long)blkno,
+ (unsigned int)bit);
alloc_di = (struct ocfs2_dinode *)alloc_bh->b_data;
if ((bit + 1) > ocfs2_bits_per_group(&alloc_di->id2.i_chain)) {
@@ -2844,7 +2835,8 @@ static int ocfs2_test_suballoc_bit(struct ocfs2_super *osb,
bail:
brelse(group_bh);
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -2869,7 +2861,7 @@ int ocfs2_test_inode_bit(struct ocfs2_super *osb, u64 blkno, int *res)
struct inode *inode_alloc_inode;
struct buffer_head *alloc_bh = NULL;
- mlog_entry("blkno: %llu", (unsigned long long)blkno);
+ trace_ocfs2_test_inode_bit((unsigned long long)blkno);
status = ocfs2_get_suballoc_slot_bit(osb, blkno, &suballoc_slot,
&group_blkno, &suballoc_bit);
@@ -2910,6 +2902,7 @@ int ocfs2_test_inode_bit(struct ocfs2_super *osb, u64 blkno, int *res)
iput(inode_alloc_inode);
brelse(alloc_bh);
bail:
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 236ed1b..5a521c7 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -42,7 +42,9 @@
#include <linux/seq_file.h>
#include <linux/quotaops.h>
-#define MLOG_MASK_PREFIX ML_SUPER
+#define CREATE_TRACE_POINTS
+#include "ocfs2_trace.h"
+
#include <cluster/masklog.h>
#include "ocfs2.h"
@@ -76,7 +78,7 @@ static struct kmem_cache *ocfs2_inode_cachep = NULL;
struct kmem_cache *ocfs2_dquot_cachep;
struct kmem_cache *ocfs2_qf_chunk_cachep;
-/* OCFS2 needs to schedule several differnt types of work which
+/* OCFS2 needs to schedule several different types of work which
* require cluster locking, disk I/O, recovery waits, etc. Since these
* types of work tend to be heavy we avoid using the kernel events
* workqueue and schedule on our own. */
@@ -441,8 +443,6 @@ static int ocfs2_init_global_system_inodes(struct ocfs2_super *osb)
int status = 0;
int i;
- mlog_entry_void();
-
new = ocfs2_iget(osb, osb->root_blkno, OCFS2_FI_FLAG_SYSFILE, 0);
if (IS_ERR(new)) {
status = PTR_ERR(new);
@@ -478,7 +478,8 @@ static int ocfs2_init_global_system_inodes(struct ocfs2_super *osb)
}
bail:
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -488,8 +489,6 @@ static int ocfs2_init_local_system_inodes(struct ocfs2_super *osb)
int status = 0;
int i;
- mlog_entry_void();
-
for (i = OCFS2_LAST_GLOBAL_SYSTEM_INODE + 1;
i < NUM_SYSTEM_INODES;
i++) {
@@ -508,7 +507,8 @@ static int ocfs2_init_local_system_inodes(struct ocfs2_super *osb)
}
bail:
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -517,8 +517,6 @@ static void ocfs2_release_system_inodes(struct ocfs2_super *osb)
int i;
struct inode *inode;
- mlog_entry_void();
-
for (i = 0; i < NUM_GLOBAL_SYSTEM_INODES; i++) {
inode = osb->global_system_inodes[i];
if (inode) {
@@ -540,7 +538,7 @@ static void ocfs2_release_system_inodes(struct ocfs2_super *osb)
}
if (!osb->local_system_inodes)
- goto out;
+ return;
for (i = 0; i < NUM_LOCAL_SYSTEM_INODES * osb->max_slots; i++) {
if (osb->local_system_inodes[i]) {
@@ -551,9 +549,6 @@ static void ocfs2_release_system_inodes(struct ocfs2_super *osb)
kfree(osb->local_system_inodes);
osb->local_system_inodes = NULL;
-
-out:
- mlog_exit(0);
}
/* We're allocating fs objects, use GFP_NOFS */
@@ -684,12 +679,9 @@ static int ocfs2_remount(struct super_block *sb, int *flags, char *data)
}
if (*flags & MS_RDONLY) {
- mlog(0, "Going to ro mode.\n");
sb->s_flags |= MS_RDONLY;
osb->osb_flags |= OCFS2_OSB_SOFT_RO;
} else {
- mlog(0, "Making ro filesystem writeable.\n");
-
if (osb->osb_flags & OCFS2_OSB_ERROR_FS) {
mlog(ML_ERROR, "Cannot remount RDWR "
"filesystem due to previous errors.\n");
@@ -707,6 +699,7 @@ static int ocfs2_remount(struct super_block *sb, int *flags, char *data)
sb->s_flags &= ~MS_RDONLY;
osb->osb_flags &= ~OCFS2_OSB_SOFT_RO;
}
+ trace_ocfs2_remount(sb->s_flags, osb->osb_flags, *flags);
unlock_osb:
spin_unlock(&osb->osb_lock);
/* Enable quota accounting after remounting RW */
@@ -1032,7 +1025,7 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
char nodestr[8];
struct ocfs2_blockcheck_stats stats;
- mlog_entry("%p, %p, %i", sb, data, silent);
+ trace_ocfs2_fill_super(sb, data, silent);
if (!ocfs2_parse_options(sb, data, &parsed_options, 0)) {
status = -EINVAL;
@@ -1208,7 +1201,6 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
mlog_errno(status);
atomic_set(&osb->vol_state, VOLUME_DISABLED);
wake_up(&osb->osb_mount_event);
- mlog_exit(status);
return status;
}
}
@@ -1222,7 +1214,6 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
/* Start this when the mount is almost sure of being successful */
ocfs2_orphan_scan_start(osb);
- mlog_exit(status);
return status;
read_super_error:
@@ -1237,7 +1228,8 @@ read_super_error:
ocfs2_dismount_volume(sb, 1);
}
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -1320,8 +1312,7 @@ static int ocfs2_parse_options(struct super_block *sb,
char *p;
u32 tmp;
- mlog_entry("remount: %d, options: \"%s\"\n", is_remount,
- options ? options : "(none)");
+ trace_ocfs2_parse_options(is_remount, options ? options : "(none)");
mopt->commit_interval = 0;
mopt->mount_opt = OCFS2_MOUNT_NOINTR;
@@ -1538,7 +1529,6 @@ static int ocfs2_parse_options(struct super_block *sb,
status = 1;
bail:
- mlog_exit(status);
return status;
}
@@ -1629,8 +1619,6 @@ static int __init ocfs2_init(void)
{
int status;
- mlog_entry_void();
-
ocfs2_print_version();
status = init_ocfs2_uptodate_cache();
@@ -1664,10 +1652,9 @@ leave:
if (status < 0) {
ocfs2_free_mem_caches();
exit_ocfs2_uptodate_cache();
+ mlog_errno(status);
}
- mlog_exit(status);
-
if (status >= 0) {
return register_filesystem(&ocfs2_fs_type);
} else
@@ -1676,8 +1663,6 @@ leave:
static void __exit ocfs2_exit(void)
{
- mlog_entry_void();
-
if (ocfs2_wq) {
flush_workqueue(ocfs2_wq);
destroy_workqueue(ocfs2_wq);
@@ -1692,18 +1677,14 @@ static void __exit ocfs2_exit(void)
unregister_filesystem(&ocfs2_fs_type);
exit_ocfs2_uptodate_cache();
-
- mlog_exit_void();
}
static void ocfs2_put_super(struct super_block *sb)
{
- mlog_entry("(0x%p)\n", sb);
+ trace_ocfs2_put_super(sb);
ocfs2_sync_blockdev(sb);
ocfs2_dismount_volume(sb, 0);
-
- mlog_exit_void();
}
static int ocfs2_statfs(struct dentry *dentry, struct kstatfs *buf)
@@ -1715,7 +1696,7 @@ static int ocfs2_statfs(struct dentry *dentry, struct kstatfs *buf)
struct buffer_head *bh = NULL;
struct inode *inode = NULL;
- mlog_entry("(%p, %p)\n", dentry->d_sb, buf);
+ trace_ocfs2_statfs(dentry->d_sb, buf);
osb = OCFS2_SB(dentry->d_sb);
@@ -1762,7 +1743,8 @@ bail:
if (inode)
iput(inode);
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -1882,8 +1864,6 @@ static int ocfs2_mount_volume(struct super_block *sb)
int unlock_super = 0;
struct ocfs2_super *osb = OCFS2_SB(sb);
- mlog_entry_void();
-
if (ocfs2_is_hard_readonly(osb))
goto leave;
@@ -1928,7 +1908,6 @@ leave:
if (unlock_super)
ocfs2_super_unlock(osb, 1);
- mlog_exit(status);
return status;
}
@@ -1938,7 +1917,7 @@ static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err)
struct ocfs2_super *osb = NULL;
char nodestr[8];
- mlog_entry("(0x%p)\n", sb);
+ trace_ocfs2_dismount_volume(sb);
BUG_ON(!sb);
osb = OCFS2_SB(sb);
@@ -2090,8 +2069,6 @@ static int ocfs2_initialize_super(struct super_block *sb,
struct ocfs2_super *osb;
u64 total_blocks;
- mlog_entry_void();
-
osb = kzalloc(sizeof(struct ocfs2_super), GFP_KERNEL);
if (!osb) {
status = -ENOMEM;
@@ -2155,7 +2132,6 @@ static int ocfs2_initialize_super(struct super_block *sb,
status = -EINVAL;
goto bail;
}
- mlog(0, "max_slots for this device: %u\n", osb->max_slots);
ocfs2_orphan_scan_init(osb);
@@ -2294,7 +2270,6 @@ static int ocfs2_initialize_super(struct super_block *sb,
osb->s_clustersize_bits =
le32_to_cpu(di->id2.i_super.s_clustersize_bits);
osb->s_clustersize = 1 << osb->s_clustersize_bits;
- mlog(0, "clusterbits=%d\n", osb->s_clustersize_bits);
if (osb->s_clustersize < OCFS2_MIN_CLUSTERSIZE ||
osb->s_clustersize > OCFS2_MAX_CLUSTERSIZE) {
@@ -2333,11 +2308,10 @@ static int ocfs2_initialize_super(struct super_block *sb,
le64_to_cpu(di->id2.i_super.s_first_cluster_group);
osb->fs_generation = le32_to_cpu(di->i_fs_generation);
osb->uuid_hash = le32_to_cpu(di->id2.i_super.s_uuid_hash);
- mlog(0, "vol_label: %s\n", osb->vol_label);
- mlog(0, "uuid: %s\n", osb->uuid_str);
- mlog(0, "root_blkno=%llu, system_dir_blkno=%llu\n",
- (unsigned long long)osb->root_blkno,
- (unsigned long long)osb->system_dir_blkno);
+ trace_ocfs2_initialize_super(osb->vol_label, osb->uuid_str,
+ (unsigned long long)osb->root_blkno,
+ (unsigned long long)osb->system_dir_blkno,
+ osb->s_clustersize_bits);
osb->osb_dlm_debug = ocfs2_new_dlm_debug();
if (!osb->osb_dlm_debug) {
@@ -2380,7 +2354,6 @@ static int ocfs2_initialize_super(struct super_block *sb,
}
bail:
- mlog_exit(status);
return status;
}
@@ -2396,8 +2369,6 @@ static int ocfs2_verify_volume(struct ocfs2_dinode *di,
{
int status = -EAGAIN;
- mlog_entry_void();
-
if (memcmp(di->i_signature, OCFS2_SUPER_BLOCK_SIGNATURE,
strlen(OCFS2_SUPER_BLOCK_SIGNATURE)) == 0) {
/* We have to do a raw check of the feature here */
@@ -2452,7 +2423,8 @@ static int ocfs2_verify_volume(struct ocfs2_dinode *di,
}
out:
- mlog_exit(status);
+ if (status && status != -EAGAIN)
+ mlog_errno(status);
return status;
}
@@ -2465,8 +2437,6 @@ static int ocfs2_check_volume(struct ocfs2_super *osb)
* recover
* ourselves. */
- mlog_entry_void();
-
/* Init our journal object. */
status = ocfs2_journal_init(osb->journal, &dirty);
if (status < 0) {
@@ -2516,8 +2486,6 @@ static int ocfs2_check_volume(struct ocfs2_super *osb)
* ourselves as mounted. */
}
- mlog(0, "Journal loaded.\n");
-
status = ocfs2_load_local_alloc(osb);
if (status < 0) {
mlog_errno(status);
@@ -2549,7 +2517,8 @@ finally:
if (local_alloc)
kfree(local_alloc);
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -2561,8 +2530,6 @@ finally:
*/
static void ocfs2_delete_osb(struct ocfs2_super *osb)
{
- mlog_entry_void();
-
/* This function assumes that the caller has the main osb resource */
ocfs2_free_slot_info(osb);
@@ -2580,8 +2547,6 @@ static void ocfs2_delete_osb(struct ocfs2_super *osb)
kfree(osb->uuid_str);
ocfs2_put_dlm_debug(osb->osb_dlm_debug);
memset(osb, 0, sizeof(struct ocfs2_super));
-
- mlog_exit_void();
}
/* Put OCFS2 into a readonly state, or (if the user specifies it),
diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c
index 9975457..5d22872 100644
--- a/fs/ocfs2/symlink.c
+++ b/fs/ocfs2/symlink.c
@@ -40,7 +40,6 @@
#include <linux/pagemap.h>
#include <linux/namei.h>
-#define MLOG_MASK_PREFIX ML_NAMEI
#include <cluster/masklog.h>
#include "ocfs2.h"
@@ -62,8 +61,6 @@ static char *ocfs2_fast_symlink_getlink(struct inode *inode,
char *link = NULL;
struct ocfs2_dinode *fe;
- mlog_entry_void();
-
status = ocfs2_read_inode_block(inode, bh);
if (status < 0) {
mlog_errno(status);
@@ -74,7 +71,6 @@ static char *ocfs2_fast_symlink_getlink(struct inode *inode,
fe = (struct ocfs2_dinode *) (*bh)->b_data;
link = (char *) fe->id2.i_symlink;
bail:
- mlog_exit(status);
return link;
}
@@ -88,8 +84,6 @@ static int ocfs2_readlink(struct dentry *dentry,
struct buffer_head *bh = NULL;
struct inode *inode = dentry->d_inode;
- mlog_entry_void();
-
link = ocfs2_fast_symlink_getlink(inode, &bh);
if (IS_ERR(link)) {
ret = PTR_ERR(link);
@@ -104,7 +98,8 @@ static int ocfs2_readlink(struct dentry *dentry,
brelse(bh);
out:
- mlog_exit(ret);
+ if (ret < 0)
+ mlog_errno(ret);
return ret;
}
@@ -117,8 +112,6 @@ static void *ocfs2_fast_follow_link(struct dentry *dentry,
struct inode *inode = dentry->d_inode;
struct buffer_head *bh = NULL;
- mlog_entry_void();
-
BUG_ON(!ocfs2_inode_is_fast_symlink(inode));
target = ocfs2_fast_symlink_getlink(inode, &bh);
if (IS_ERR(target)) {
@@ -142,7 +135,8 @@ bail:
nd_set_link(nd, status ? ERR_PTR(status) : link);
brelse(bh);
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return NULL;
}
diff --git a/fs/ocfs2/sysfile.c b/fs/ocfs2/sysfile.c
index 902efb2..3d635f4 100644
--- a/fs/ocfs2/sysfile.c
+++ b/fs/ocfs2/sysfile.c
@@ -27,7 +27,6 @@
#include <linux/types.h>
#include <linux/highmem.h>
-#define MLOG_MASK_PREFIX ML_INODE
#include <cluster/masklog.h>
#include "ocfs2.h"
diff --git a/fs/ocfs2/uptodate.c b/fs/ocfs2/uptodate.c
index a0a120e..52eaf33 100644
--- a/fs/ocfs2/uptodate.c
+++ b/fs/ocfs2/uptodate.c
@@ -54,14 +54,13 @@
#include <linux/buffer_head.h>
#include <linux/rbtree.h>
-#define MLOG_MASK_PREFIX ML_UPTODATE
-
#include <cluster/masklog.h>
#include "ocfs2.h"
#include "inode.h"
#include "uptodate.h"
+#include "ocfs2_trace.h"
struct ocfs2_meta_cache_item {
struct rb_node c_node;
@@ -152,8 +151,8 @@ static unsigned int ocfs2_purge_copied_metadata_tree(struct rb_root *root)
while ((node = rb_last(root)) != NULL) {
item = rb_entry(node, struct ocfs2_meta_cache_item, c_node);
- mlog(0, "Purge item %llu\n",
- (unsigned long long) item->c_block);
+ trace_ocfs2_purge_copied_metadata_tree(
+ (unsigned long long) item->c_block);
rb_erase(&item->c_node, root);
kmem_cache_free(ocfs2_uptodate_cachep, item);
@@ -180,9 +179,9 @@ void ocfs2_metadata_cache_purge(struct ocfs2_caching_info *ci)
tree = !(ci->ci_flags & OCFS2_CACHE_FL_INLINE);
to_purge = ci->ci_num_cached;
- mlog(0, "Purge %u %s items from Owner %llu\n", to_purge,
- tree ? "array" : "tree",
- (unsigned long long)ocfs2_metadata_cache_owner(ci));
+ trace_ocfs2_metadata_cache_purge(
+ (unsigned long long)ocfs2_metadata_cache_owner(ci),
+ to_purge, tree);
/* If we're a tree, save off the root so that we can safely
* initialize the cache. We do the work to free tree members
@@ -249,10 +248,10 @@ static int ocfs2_buffer_cached(struct ocfs2_caching_info *ci,
ocfs2_metadata_cache_lock(ci);
- mlog(0, "Owner %llu, query block %llu (inline = %u)\n",
- (unsigned long long)ocfs2_metadata_cache_owner(ci),
- (unsigned long long) bh->b_blocknr,
- !!(ci->ci_flags & OCFS2_CACHE_FL_INLINE));
+ trace_ocfs2_buffer_cached_begin(
+ (unsigned long long)ocfs2_metadata_cache_owner(ci),
+ (unsigned long long) bh->b_blocknr,
+ !!(ci->ci_flags & OCFS2_CACHE_FL_INLINE));
if (ci->ci_flags & OCFS2_CACHE_FL_INLINE)
index = ocfs2_search_cache_array(ci, bh->b_blocknr);
@@ -261,7 +260,7 @@ static int ocfs2_buffer_cached(struct ocfs2_caching_info *ci,
ocfs2_metadata_cache_unlock(ci);
- mlog(0, "index = %d, item = %p\n", index, item);
+ trace_ocfs2_buffer_cached_end(index, item);
return (index != -1) || (item != NULL);
}
@@ -306,8 +305,9 @@ static void ocfs2_append_cache_array(struct ocfs2_caching_info *ci,
{
BUG_ON(ci->ci_num_cached >= OCFS2_CACHE_INFO_MAX_ARRAY);
- mlog(0, "block %llu takes position %u\n", (unsigned long long) block,
- ci->ci_num_cached);
+ trace_ocfs2_append_cache_array(
+ (unsigned long long)ocfs2_metadata_cache_owner(ci),
+ (unsigned long long)block, ci->ci_num_cached);
ci->ci_cache.ci_array[ci->ci_num_cached] = block;
ci->ci_num_cached++;
@@ -324,8 +324,9 @@ static void __ocfs2_insert_cache_tree(struct ocfs2_caching_info *ci,
struct rb_node **p = &ci->ci_cache.ci_tree.rb_node;
struct ocfs2_meta_cache_item *tmp;
- mlog(0, "Insert block %llu num = %u\n", (unsigned long long) block,
- ci->ci_num_cached);
+ trace_ocfs2_insert_cache_tree(
+ (unsigned long long)ocfs2_metadata_cache_owner(ci),
+ (unsigned long long)block, ci->ci_num_cached);
while(*p) {
parent = *p;
@@ -389,9 +390,9 @@ static void ocfs2_expand_cache(struct ocfs2_caching_info *ci,
tree[i] = NULL;
}
- mlog(0, "Expanded %llu to a tree cache: flags 0x%x, num = %u\n",
- (unsigned long long)ocfs2_metadata_cache_owner(ci),
- ci->ci_flags, ci->ci_num_cached);
+ trace_ocfs2_expand_cache(
+ (unsigned long long)ocfs2_metadata_cache_owner(ci),
+ ci->ci_flags, ci->ci_num_cached);
}
/* Slow path function - memory allocation is necessary. See the
@@ -405,9 +406,9 @@ static void __ocfs2_set_buffer_uptodate(struct ocfs2_caching_info *ci,
struct ocfs2_meta_cache_item *tree[OCFS2_CACHE_INFO_MAX_ARRAY] =
{ NULL, };
- mlog(0, "Owner %llu, block %llu, expand = %d\n",
- (unsigned long long)ocfs2_metadata_cache_owner(ci),
- (unsigned long long)block, expand_tree);
+ trace_ocfs2_set_buffer_uptodate(
+ (unsigned long long)ocfs2_metadata_cache_owner(ci),
+ (unsigned long long)block, expand_tree);
new = kmem_cache_alloc(ocfs2_uptodate_cachep, GFP_NOFS);
if (!new) {
@@ -433,7 +434,6 @@ static void __ocfs2_set_buffer_uptodate(struct ocfs2_caching_info *ci,
ocfs2_metadata_cache_lock(ci);
if (ocfs2_insert_can_use_array(ci)) {
- mlog(0, "Someone cleared the tree underneath us\n");
/* Ok, items were removed from the cache in between
* locks. Detect this and revert back to the fast path */
ocfs2_append_cache_array(ci, block);
@@ -490,9 +490,9 @@ void ocfs2_set_buffer_uptodate(struct ocfs2_caching_info *ci,
if (ocfs2_buffer_cached(ci, bh))
return;
- mlog(0, "Owner %llu, inserting block %llu\n",
- (unsigned long long)ocfs2_metadata_cache_owner(ci),
- (unsigned long long)bh->b_blocknr);
+ trace_ocfs2_set_buffer_uptodate_begin(
+ (unsigned long long)ocfs2_metadata_cache_owner(ci),
+ (unsigned long long)bh->b_blocknr);
/* No need to recheck under spinlock - insertion is guarded by
* co_io_lock() */
@@ -542,8 +542,9 @@ static void ocfs2_remove_metadata_array(struct ocfs2_caching_info *ci,
BUG_ON(index >= ci->ci_num_cached);
BUG_ON(!ci->ci_num_cached);
- mlog(0, "remove index %d (num_cached = %u\n", index,
- ci->ci_num_cached);
+ trace_ocfs2_remove_metadata_array(
+ (unsigned long long)ocfs2_metadata_cache_owner(ci),
+ index, ci->ci_num_cached);
ci->ci_num_cached--;
@@ -559,8 +560,9 @@ static void ocfs2_remove_metadata_array(struct ocfs2_caching_info *ci,
static void ocfs2_remove_metadata_tree(struct ocfs2_caching_info *ci,
struct ocfs2_meta_cache_item *item)
{
- mlog(0, "remove block %llu from tree\n",
- (unsigned long long) item->c_block);
+ trace_ocfs2_remove_metadata_tree(
+ (unsigned long long)ocfs2_metadata_cache_owner(ci),
+ (unsigned long long)item->c_block);
rb_erase(&item->c_node, &ci->ci_cache.ci_tree);
ci->ci_num_cached--;
@@ -573,10 +575,10 @@ static void ocfs2_remove_block_from_cache(struct ocfs2_caching_info *ci,
struct ocfs2_meta_cache_item *item = NULL;
ocfs2_metadata_cache_lock(ci);
- mlog(0, "Owner %llu, remove %llu, items = %u, array = %u\n",
- (unsigned long long)ocfs2_metadata_cache_owner(ci),
- (unsigned long long) block, ci->ci_num_cached,
- ci->ci_flags & OCFS2_CACHE_FL_INLINE);
+ trace_ocfs2_remove_block_from_cache(
+ (unsigned long long)ocfs2_metadata_cache_owner(ci),
+ (unsigned long long) block, ci->ci_num_cached,
+ ci->ci_flags);
if (ci->ci_flags & OCFS2_CACHE_FL_INLINE) {
index = ocfs2_search_cache_array(ci, block);
@@ -626,9 +628,6 @@ int __init init_ocfs2_uptodate_cache(void)
if (!ocfs2_uptodate_cachep)
return -ENOMEM;
- mlog(0, "%u inlined cache items per inode.\n",
- OCFS2_CACHE_INFO_MAX_ARRAY);
-
return 0;
}
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index 6bb6024..81ecf9c 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -37,7 +37,6 @@
#include <linux/string.h>
#include <linux/security.h>
-#define MLOG_MASK_PREFIX ML_XATTR
#include <cluster/masklog.h>
#include "ocfs2.h"
@@ -57,6 +56,7 @@
#include "xattr.h"
#include "refcounttree.h"
#include "acl.h"
+#include "ocfs2_trace.h"
struct ocfs2_xattr_def_value_root {
struct ocfs2_xattr_value_root xv;
@@ -474,8 +474,7 @@ static int ocfs2_validate_xattr_block(struct super_block *sb,
struct ocfs2_xattr_block *xb =
(struct ocfs2_xattr_block *)bh->b_data;
- mlog(0, "Validating xattr block %llu\n",
- (unsigned long long)bh->b_blocknr);
+ trace_ocfs2_validate_xattr_block((unsigned long long)bh->b_blocknr);
BUG_ON(!buffer_uptodate(bh));
@@ -715,11 +714,11 @@ static int ocfs2_xattr_extend_allocation(struct inode *inode,
u32 prev_clusters, logical_start = le32_to_cpu(vb->vb_xv->xr_clusters);
struct ocfs2_extent_tree et;
- mlog(0, "(clusters_to_add for xattr= %u)\n", clusters_to_add);
-
ocfs2_init_xattr_value_extent_tree(&et, INODE_CACHE(inode), vb);
while (clusters_to_add) {
+ trace_ocfs2_xattr_extend_allocation(clusters_to_add);
+
status = vb->vb_access(handle, INODE_CACHE(inode), vb->vb_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
@@ -754,8 +753,6 @@ static int ocfs2_xattr_extend_allocation(struct inode *inode,
*/
BUG_ON(why == RESTART_META);
- mlog(0, "restarting xattr value extension for %u"
- " clusters,.\n", clusters_to_add);
credits = ocfs2_calc_extend_credits(inode->i_sb,
&vb->vb_xv->xr_list,
clusters_to_add);
@@ -3246,8 +3243,8 @@ static int ocfs2_init_xattr_set_ctxt(struct inode *inode,
}
meta_add += extra_meta;
- mlog(0, "Set xattr %s, reserve meta blocks = %d, clusters = %d, "
- "credits = %d\n", xi->xi_name, meta_add, clusters_add, *credits);
+ trace_ocfs2_init_xattr_set_ctxt(xi->xi_name, meta_add,
+ clusters_add, *credits);
if (meta_add) {
ret = ocfs2_reserve_new_metadata_blocks(osb, meta_add,
@@ -3557,7 +3554,7 @@ int ocfs2_xattr_set(struct inode *inode,
down_write(&OCFS2_I(inode)->ip_xattr_sem);
/*
* Scan inode and external block to find the same name
- * extended attribute and collect search infomation.
+ * extended attribute and collect search information.
*/
ret = ocfs2_xattr_ibody_find(inode, name_index, name, &xis);
if (ret)
@@ -3581,7 +3578,7 @@ int ocfs2_xattr_set(struct inode *inode,
goto cleanup;
}
- /* Check whether the value is refcounted and do some prepartion. */
+ /* Check whether the value is refcounted and do some preparation. */
if (OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL &&
(!xis.not_found || !xbs.not_found)) {
ret = ocfs2_prepare_refcount_xattr(inode, di, &xi,
@@ -3887,8 +3884,10 @@ static int ocfs2_xattr_bucket_find(struct inode *inode,
if (found) {
xs->here = &xs->header->xh_entries[index];
- mlog(0, "find xattr %s in bucket %llu, entry = %u\n", name,
- (unsigned long long)bucket_blkno(xs->bucket), index);
+ trace_ocfs2_xattr_bucket_find(OCFS2_I(inode)->ip_blkno,
+ name, name_index, name_hash,
+ (unsigned long long)bucket_blkno(xs->bucket),
+ index);
} else
ret = -ENODATA;
@@ -3915,8 +3914,10 @@ static int ocfs2_xattr_index_block_find(struct inode *inode,
if (le16_to_cpu(el->l_next_free_rec) == 0)
return -ENODATA;
- mlog(0, "find xattr %s, hash = %u, index = %d in xattr tree\n",
- name, name_hash, name_index);
+ trace_ocfs2_xattr_index_block_find(OCFS2_I(inode)->ip_blkno,
+ name, name_index, name_hash,
+ (unsigned long long)root_bh->b_blocknr,
+ -1);
ret = ocfs2_xattr_get_rec(inode, name_hash, &p_blkno, &first_hash,
&num_clusters, el);
@@ -3927,9 +3928,10 @@ static int ocfs2_xattr_index_block_find(struct inode *inode,
BUG_ON(p_blkno == 0 || num_clusters == 0 || first_hash > name_hash);
- mlog(0, "find xattr extent rec %u clusters from %llu, the first hash "
- "in the rec is %u\n", num_clusters, (unsigned long long)p_blkno,
- first_hash);
+ trace_ocfs2_xattr_index_block_find_rec(OCFS2_I(inode)->ip_blkno,
+ name, name_index, first_hash,
+ (unsigned long long)p_blkno,
+ num_clusters);
ret = ocfs2_xattr_bucket_find(inode, name_index, name, name_hash,
p_blkno, first_hash, num_clusters, xs);
@@ -3955,8 +3957,9 @@ static int ocfs2_iterate_xattr_buckets(struct inode *inode,
return -ENOMEM;
}
- mlog(0, "iterating xattr buckets in %u clusters starting from %llu\n",
- clusters, (unsigned long long)blkno);
+ trace_ocfs2_iterate_xattr_buckets(
+ (unsigned long long)OCFS2_I(inode)->ip_blkno,
+ (unsigned long long)blkno, clusters);
for (i = 0; i < num_buckets; i++, blkno += bucket->bu_blocks) {
ret = ocfs2_read_xattr_bucket(bucket, blkno);
@@ -3972,8 +3975,7 @@ static int ocfs2_iterate_xattr_buckets(struct inode *inode,
if (i == 0)
num_buckets = le16_to_cpu(bucket_xh(bucket)->xh_num_buckets);
- mlog(0, "iterating xattr bucket %llu, first hash %u\n",
- (unsigned long long)blkno,
+ trace_ocfs2_iterate_xattr_bucket((unsigned long long)blkno,
le32_to_cpu(bucket_xh(bucket)->xh_entries[0].xe_name_hash));
if (func) {
ret = func(inode, bucket, para);
@@ -4173,9 +4175,9 @@ static void ocfs2_cp_xattr_block_to_bucket(struct inode *inode,
char *src = xb_bh->b_data;
char *target = bucket_block(bucket, blks - 1);
- mlog(0, "cp xattr from block %llu to bucket %llu\n",
- (unsigned long long)xb_bh->b_blocknr,
- (unsigned long long)bucket_blkno(bucket));
+ trace_ocfs2_cp_xattr_block_to_bucket_begin(
+ (unsigned long long)xb_bh->b_blocknr,
+ (unsigned long long)bucket_blkno(bucket));
for (i = 0; i < blks; i++)
memset(bucket_block(bucket, i), 0, blocksize);
@@ -4211,8 +4213,7 @@ static void ocfs2_cp_xattr_block_to_bucket(struct inode *inode,
for (i = 0; i < count; i++)
le16_add_cpu(&xh->xh_entries[i].xe_name_offset, off_change);
- mlog(0, "copy entry: start = %u, size = %u, offset_change = %u\n",
- offset, size, off_change);
+ trace_ocfs2_cp_xattr_block_to_bucket_end(offset, size, off_change);
sort(target + offset, count, sizeof(struct ocfs2_xattr_entry),
cmp_xe, swap_xe);
@@ -4261,8 +4262,8 @@ static int ocfs2_xattr_create_index_block(struct inode *inode,
struct ocfs2_xattr_tree_root *xr;
u16 xb_flags = le16_to_cpu(xb->xb_flags);
- mlog(0, "create xattr index block for %llu\n",
- (unsigned long long)xb_bh->b_blocknr);
+ trace_ocfs2_xattr_create_index_block_begin(
+ (unsigned long long)xb_bh->b_blocknr);
BUG_ON(xb_flags & OCFS2_XATTR_INDEXED);
BUG_ON(!xs->bucket);
@@ -4295,8 +4296,7 @@ static int ocfs2_xattr_create_index_block(struct inode *inode,
*/
blkno = ocfs2_clusters_to_blocks(inode->i_sb, bit_off);
- mlog(0, "allocate 1 cluster from %llu to xattr block\n",
- (unsigned long long)blkno);
+ trace_ocfs2_xattr_create_index_block((unsigned long long)blkno);
ret = ocfs2_init_xattr_bucket(xs->bucket, blkno);
if (ret) {
@@ -4400,8 +4400,7 @@ static int ocfs2_defrag_xattr_bucket(struct inode *inode,
entries = (char *)xh->xh_entries;
xh_free_start = le16_to_cpu(xh->xh_free_start);
- mlog(0, "adjust xattr bucket in %llu, count = %u, "
- "xh_free_start = %u, xh_name_value_len = %u.\n",
+ trace_ocfs2_defrag_xattr_bucket(
(unsigned long long)blkno, le16_to_cpu(xh->xh_count),
xh_free_start, le16_to_cpu(xh->xh_name_value_len));
@@ -4503,8 +4502,9 @@ static int ocfs2_mv_xattr_bucket_cross_cluster(struct inode *inode,
BUG_ON(le16_to_cpu(bucket_xh(first)->xh_num_buckets) < num_buckets);
BUG_ON(OCFS2_XATTR_BUCKET_SIZE == OCFS2_SB(sb)->s_clustersize);
- mlog(0, "move half of xattrs in cluster %llu to %llu\n",
- (unsigned long long)last_cluster_blkno, (unsigned long long)new_blkno);
+ trace_ocfs2_mv_xattr_bucket_cross_cluster(
+ (unsigned long long)last_cluster_blkno,
+ (unsigned long long)new_blkno);
ret = ocfs2_mv_xattr_buckets(inode, handle, bucket_blkno(first),
last_cluster_blkno, new_blkno,
@@ -4614,8 +4614,8 @@ static int ocfs2_divide_xattr_bucket(struct inode *inode,
struct ocfs2_xattr_entry *xe;
int blocksize = inode->i_sb->s_blocksize;
- mlog(0, "move some of xattrs from bucket %llu to %llu\n",
- (unsigned long long)blk, (unsigned long long)new_blk);
+ trace_ocfs2_divide_xattr_bucket_begin((unsigned long long)blk,
+ (unsigned long long)new_blk);
s_bucket = ocfs2_xattr_bucket_new(inode);
t_bucket = ocfs2_xattr_bucket_new(inode);
@@ -4714,9 +4714,9 @@ static int ocfs2_divide_xattr_bucket(struct inode *inode,
*/
xe = &xh->xh_entries[start];
len = sizeof(struct ocfs2_xattr_entry) * (count - start);
- mlog(0, "mv xattr entry len %d from %d to %d\n", len,
- (int)((char *)xe - (char *)xh),
- (int)((char *)xh->xh_entries - (char *)xh));
+ trace_ocfs2_divide_xattr_bucket_move(len,
+ (int)((char *)xe - (char *)xh),
+ (int)((char *)xh->xh_entries - (char *)xh));
memmove((char *)xh->xh_entries, (char *)xe, len);
xe = &xh->xh_entries[count - start];
len = sizeof(struct ocfs2_xattr_entry) * start;
@@ -4788,9 +4788,9 @@ static int ocfs2_cp_xattr_bucket(struct inode *inode,
BUG_ON(s_blkno == t_blkno);
- mlog(0, "cp bucket %llu to %llu, target is %d\n",
- (unsigned long long)s_blkno, (unsigned long long)t_blkno,
- t_is_new);
+ trace_ocfs2_cp_xattr_bucket((unsigned long long)s_blkno,
+ (unsigned long long)t_blkno,
+ t_is_new);
s_bucket = ocfs2_xattr_bucket_new(inode);
t_bucket = ocfs2_xattr_bucket_new(inode);
@@ -4862,8 +4862,8 @@ static int ocfs2_mv_xattr_buckets(struct inode *inode, handle_t *handle,
int num_buckets = ocfs2_xattr_buckets_per_cluster(osb);
struct ocfs2_xattr_bucket *old_first, *new_first;
- mlog(0, "mv xattrs from cluster %llu to %llu\n",
- (unsigned long long)last_blk, (unsigned long long)to_blk);
+ trace_ocfs2_mv_xattr_buckets((unsigned long long)last_blk,
+ (unsigned long long)to_blk);
BUG_ON(start_bucket >= num_buckets);
if (start_bucket) {
@@ -5013,9 +5013,9 @@ static int ocfs2_adjust_xattr_cross_cluster(struct inode *inode,
{
int ret;
- mlog(0, "adjust xattrs from cluster %llu len %u to %llu\n",
- (unsigned long long)bucket_blkno(first), prev_clusters,
- (unsigned long long)new_blk);
+ trace_ocfs2_adjust_xattr_cross_cluster(
+ (unsigned long long)bucket_blkno(first),
+ (unsigned long long)new_blk, prev_clusters);
if (ocfs2_xattr_buckets_per_cluster(OCFS2_SB(inode->i_sb)) > 1) {
ret = ocfs2_mv_xattr_bucket_cross_cluster(inode,
@@ -5088,10 +5088,10 @@ static int ocfs2_add_new_xattr_cluster(struct inode *inode,
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct ocfs2_extent_tree et;
- mlog(0, "Add new xattr cluster for %llu, previous xattr hash = %u, "
- "previous xattr blkno = %llu\n",
- (unsigned long long)OCFS2_I(inode)->ip_blkno,
- prev_cpos, (unsigned long long)bucket_blkno(first));
+ trace_ocfs2_add_new_xattr_cluster_begin(
+ (unsigned long long)OCFS2_I(inode)->ip_blkno,
+ (unsigned long long)bucket_blkno(first),
+ prev_cpos, prev_clusters);
ocfs2_init_xattr_tree_extent_tree(&et, INODE_CACHE(inode), root_bh);
@@ -5113,8 +5113,7 @@ static int ocfs2_add_new_xattr_cluster(struct inode *inode,
BUG_ON(num_bits > clusters_to_add);
block = ocfs2_clusters_to_blocks(osb->sb, bit_off);
- mlog(0, "Allocating %u clusters at block %u for xattr in inode %llu\n",
- num_bits, bit_off, (unsigned long long)OCFS2_I(inode)->ip_blkno);
+ trace_ocfs2_add_new_xattr_cluster((unsigned long long)block, num_bits);
if (bucket_blkno(first) + (prev_clusters * bpc) == block &&
(prev_clusters + num_bits) << osb->s_clustersize_bits <=
@@ -5130,8 +5129,6 @@ static int ocfs2_add_new_xattr_cluster(struct inode *inode,
*/
v_start = prev_cpos + prev_clusters;
*num_clusters = prev_clusters + num_bits;
- mlog(0, "Add contiguous %u clusters to previous extent rec.\n",
- num_bits);
} else {
ret = ocfs2_adjust_xattr_cross_cluster(inode,
handle,
@@ -5147,8 +5144,8 @@ static int ocfs2_add_new_xattr_cluster(struct inode *inode,
}
}
- mlog(0, "Insert %u clusters at block %llu for xattr at %u\n",
- num_bits, (unsigned long long)block, v_start);
+ trace_ocfs2_add_new_xattr_cluster_insert((unsigned long long)block,
+ v_start, num_bits);
ret = ocfs2_insert_extent(handle, &et, v_start, block,
num_bits, 0, ctxt->meta_ac);
if (ret < 0) {
@@ -5183,9 +5180,9 @@ static int ocfs2_extend_xattr_bucket(struct inode *inode,
u64 end_blk;
u16 new_bucket = le16_to_cpu(bucket_xh(first)->xh_num_buckets);
- mlog(0, "extend xattr bucket in %llu, xattr extend rec starting "
- "from %llu, len = %u\n", (unsigned long long)target_blk,
- (unsigned long long)bucket_blkno(first), num_clusters);
+ trace_ocfs2_extend_xattr_bucket((unsigned long long)target_blk,
+ (unsigned long long)bucket_blkno(first),
+ num_clusters, new_bucket);
/* The extent must have room for an additional bucket */
BUG_ON(new_bucket >=
@@ -5265,8 +5262,8 @@ static int ocfs2_add_new_xattr_bucket(struct inode *inode,
/* The bucket at the front of the extent */
struct ocfs2_xattr_bucket *first;
- mlog(0, "Add new xattr bucket starting from %llu\n",
- (unsigned long long)bucket_blkno(target));
+ trace_ocfs2_add_new_xattr_bucket(
+ (unsigned long long)bucket_blkno(target));
/* The first bucket of the original extent */
first = ocfs2_xattr_bucket_new(inode);
@@ -5382,8 +5379,8 @@ static int ocfs2_xattr_bucket_value_truncate(struct inode *inode,
* modified something. We have to assume they did, and dirty
* the whole bucket. This leaves us in a consistent state.
*/
- mlog(0, "truncate %u in xattr bucket %llu to %d bytes.\n",
- xe_off, (unsigned long long)bucket_blkno(bucket), len);
+ trace_ocfs2_xattr_bucket_value_truncate(
+ (unsigned long long)bucket_blkno(bucket), xe_off, len);
ret = ocfs2_xattr_value_truncate(inode, &vb, len, ctxt);
if (ret) {
mlog_errno(ret);
@@ -5433,8 +5430,9 @@ static int ocfs2_rm_xattr_cluster(struct inode *inode,
ocfs2_init_dealloc_ctxt(&dealloc);
- mlog(0, "rm xattr extent rec at %u len = %u, start from %llu\n",
- cpos, len, (unsigned long long)blkno);
+ trace_ocfs2_rm_xattr_cluster(
+ (unsigned long long)OCFS2_I(inode)->ip_blkno,
+ (unsigned long long)blkno, cpos, len);
ocfs2_remove_xattr_clusters_from_cache(INODE_CACHE(inode), blkno,
len);
@@ -5538,7 +5536,7 @@ static int ocfs2_xattr_set_entry_bucket(struct inode *inode,
int ret;
struct ocfs2_xa_loc loc;
- mlog_entry("Set xattr %s in xattr bucket\n", xi->xi_name);
+ trace_ocfs2_xattr_set_entry_bucket(xi->xi_name);
ocfs2_init_xattr_bucket_xa_loc(&loc, xs->bucket,
xs->not_found ? NULL : xs->here);
@@ -5570,7 +5568,6 @@ static int ocfs2_xattr_set_entry_bucket(struct inode *inode,
out:
- mlog_exit(ret);
return ret;
}
@@ -5581,7 +5578,7 @@ static int ocfs2_xattr_set_entry_index_block(struct inode *inode,
{
int ret;
- mlog_entry("Set xattr %s in xattr index block\n", xi->xi_name);
+ trace_ocfs2_xattr_set_entry_index_block(xi->xi_name);
ret = ocfs2_xattr_set_entry_bucket(inode, xi, xs, ctxt);
if (!ret)
@@ -5637,7 +5634,6 @@ static int ocfs2_xattr_set_entry_index_block(struct inode *inode,
mlog_errno(ret);
out:
- mlog_exit(ret);
return ret;
}
@@ -6041,9 +6037,9 @@ static int ocfs2_xattr_bucket_value_refcount(struct inode *inode,
if (ocfs2_meta_ecc(OCFS2_SB(inode->i_sb)))
p = &refcount;
- mlog(0, "refcount bucket %llu, count = %u\n",
- (unsigned long long)bucket_blkno(bucket),
- le16_to_cpu(xh->xh_count));
+ trace_ocfs2_xattr_bucket_value_refcount(
+ (unsigned long long)bucket_blkno(bucket),
+ le16_to_cpu(xh->xh_count));
for (i = 0; i < le16_to_cpu(xh->xh_count); i++) {
xe = &xh->xh_entries[i];
@@ -6339,8 +6335,8 @@ static int ocfs2_reflink_xattr_header(handle_t *handle,
u32 clusters, cpos, p_cluster, num_clusters;
unsigned int ext_flags = 0;
- mlog(0, "reflink xattr in container %llu, count = %u\n",
- (unsigned long long)old_bh->b_blocknr, le16_to_cpu(xh->xh_count));
+ trace_ocfs2_reflink_xattr_header((unsigned long long)old_bh->b_blocknr,
+ le16_to_cpu(xh->xh_count));
last = &new_xh->xh_entries[le16_to_cpu(new_xh->xh_count)];
for (i = 0, j = 0; i < le16_to_cpu(xh->xh_count); i++, j++) {
@@ -6540,8 +6536,8 @@ static int ocfs2_create_empty_xattr_block(struct inode *inode,
goto out;
}
- mlog(0, "create new xattr block for inode %llu, index = %d\n",
- (unsigned long long)fe_bh->b_blocknr, indexed);
+ trace_ocfs2_create_empty_xattr_block(
+ (unsigned long long)fe_bh->b_blocknr, indexed);
ret = ocfs2_create_xattr_block(inode, fe_bh, &ctxt, indexed,
ret_bh);
if (ret)
@@ -6952,8 +6948,8 @@ static int ocfs2_reflink_xattr_buckets(handle_t *handle,
if (ret)
mlog_errno(ret);
- mlog(0, "insert new xattr extent rec start %llu len %u to %u\n",
- (unsigned long long)new_blkno, num_clusters, reflink_cpos);
+ trace_ocfs2_reflink_xattr_buckets((unsigned long long)new_blkno,
+ num_clusters, reflink_cpos);
len -= num_clusters;
blkno += ocfs2_clusters_to_blocks(inode->i_sb, num_clusters);
@@ -6982,8 +6978,7 @@ static int ocfs2_reflink_xattr_rec(struct inode *inode,
struct ocfs2_alloc_context *data_ac = NULL;
struct ocfs2_extent_tree et;
- mlog(0, "reflink xattr buckets %llu len %u\n",
- (unsigned long long)blkno, len);
+ trace_ocfs2_reflink_xattr_rec((unsigned long long)blkno, len);
ocfs2_init_xattr_tree_extent_tree(&et,
INODE_CACHE(args->reflink->new_inode),
diff --git a/fs/omfs/file.c b/fs/omfs/file.c
index 8a6d34f..d738a7e 100644
--- a/fs/omfs/file.c
+++ b/fs/omfs/file.c
@@ -372,7 +372,6 @@ const struct address_space_operations omfs_aops = {
.readpages = omfs_readpages,
.writepage = omfs_writepage,
.writepages = omfs_writepages,
- .sync_page = block_sync_page,
.write_begin = omfs_write_begin,
.write_end = generic_write_end,
.bmap = omfs_bmap,
diff --git a/fs/open.c b/fs/open.c
index f83ca80..b52cf01 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -835,17 +835,8 @@ struct file *dentry_open(struct dentry *dentry, struct vfsmount *mnt, int flags,
validate_creds(cred);
- /*
- * We must always pass in a valid mount pointer. Historically
- * callers got away with not passing it, but we must enforce this at
- * the earliest possible point now to avoid strange problems deep in the
- * filesystem stack.
- */
- if (!mnt) {
- printk(KERN_WARNING "%s called with NULL vfsmount\n", __func__);
- dump_stack();
- return ERR_PTR(-EINVAL);
- }
+ /* We must always pass in a valid mount pointer. */
+ BUG_ON(!mnt);
error = -ENFILE;
f = get_empty_filp();
diff --git a/fs/partitions/check.c b/fs/partitions/check.c
index 9c21119..d545e97 100644
--- a/fs/partitions/check.c
+++ b/fs/partitions/check.c
@@ -290,7 +290,8 @@ ssize_t part_inflight_show(struct device *dev,
{
struct hd_struct *p = dev_to_part(dev);
- return sprintf(buf, "%8u %8u\n", p->in_flight[0], p->in_flight[1]);
+ return sprintf(buf, "%8u %8u\n", atomic_read(&p->in_flight[0]),
+ atomic_read(&p->in_flight[1]));
}
#ifdef CONFIG_FAIL_MAKE_REQUEST
@@ -499,7 +500,7 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno,
/* everything is up and running, commence */
rcu_assign_pointer(ptbl->part[partno], p);
- /* suppress uevent if the disk supresses it */
+ /* suppress uevent if the disk suppresses it */
if (!dev_get_uevent_suppress(ddev))
kobject_uevent(&pdev->kobj, KOBJ_ADD);
@@ -584,7 +585,7 @@ rescan:
/*
* If any partition code tried to read beyond EOD, try
* unlocking native capacity even if partition table is
- * sucessfully read as we could be missing some partitions.
+ * successfully read as we could be missing some partitions.
*/
if (state->access_beyond_eod) {
printk(KERN_WARNING
diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c
index ea648b9..410df07 100644
--- a/fs/partitions/ldm.c
+++ b/fs/partitions/ldm.c
@@ -1299,6 +1299,11 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
BUG_ON (!data || !frags);
+ if (size < 2 * VBLK_SIZE_HEAD) {
+ ldm_error("Value of size is to small.");
+ return false;
+ }
+
group = get_unaligned_be32(data + 0x08);
rec = get_unaligned_be16(data + 0x0C);
num = get_unaligned_be16(data + 0x0E);
@@ -1306,6 +1311,10 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
ldm_error ("A VBLK claims to have %d parts.", num);
return false;
}
+ if (rec >= num) {
+ ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
+ return false;
+ }
list_for_each (item, frags) {
f = list_entry (item, struct frag, list);
@@ -1334,10 +1343,9 @@ found:
f->map |= (1 << rec);
- if (num > 0) {
- data += VBLK_SIZE_HEAD;
- size -= VBLK_SIZE_HEAD;
- }
+ data += VBLK_SIZE_HEAD;
+ size -= VBLK_SIZE_HEAD;
+
memcpy (f->data+rec*(size-VBLK_SIZE_HEAD)+VBLK_SIZE_HEAD, data, size);
return true;
diff --git a/fs/proc/array.c b/fs/proc/array.c
index 7c99c1c..5e4f776 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -489,8 +489,8 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
vsize,
mm ? get_mm_rss(mm) : 0,
rsslim,
- mm ? mm->start_code : 0,
- mm ? mm->end_code : 0,
+ mm ? (permitted ? mm->start_code : 1) : 0,
+ mm ? (permitted ? mm->end_code : 1) : 0,
(permitted && mm) ? mm->start_stack : 0,
esp,
eip,
diff --git a/fs/proc/base.c b/fs/proc/base.c
index d49c4b5..dfa5327 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -191,17 +191,20 @@ static int proc_root_link(struct inode *inode, struct path *path)
return result;
}
-/*
- * Return zero if current may access user memory in @task, -error if not.
- */
-static int check_mem_permission(struct task_struct *task)
+static struct mm_struct *__check_mem_permission(struct task_struct *task)
{
+ struct mm_struct *mm;
+
+ mm = get_task_mm(task);
+ if (!mm)
+ return ERR_PTR(-EINVAL);
+
/*
* A task can always look at itself, in case it chooses
* to use system calls instead of load instructions.
*/
if (task == current)
- return 0;
+ return mm;
/*
* If current is actively ptrace'ing, and would also be
@@ -213,27 +216,53 @@ static int check_mem_permission(struct task_struct *task)
match = (tracehook_tracer_task(task) == current);
rcu_read_unlock();
if (match && ptrace_may_access(task, PTRACE_MODE_ATTACH))
- return 0;
+ return mm;
}
/*
- * Noone else is allowed.
+ * No one else is allowed.
+ */
+ mmput(mm);
+ return ERR_PTR(-EPERM);
+}
+
+/*
+ * If current may access user memory in @task return a reference to the
+ * corresponding mm, otherwise ERR_PTR.
+ */
+static struct mm_struct *check_mem_permission(struct task_struct *task)
+{
+ struct mm_struct *mm;
+ int err;
+
+ /*
+ * Avoid racing if task exec's as we might get a new mm but validate
+ * against old credentials.
*/
- return -EPERM;
+ err = mutex_lock_killable(&task->signal->cred_guard_mutex);
+ if (err)
+ return ERR_PTR(err);
+
+ mm = __check_mem_permission(task);
+ mutex_unlock(&task->signal->cred_guard_mutex);
+
+ return mm;
}
struct mm_struct *mm_for_maps(struct task_struct *task)
{
struct mm_struct *mm;
+ int err;
- if (mutex_lock_killable(&task->signal->cred_guard_mutex))
- return NULL;
+ err = mutex_lock_killable(&task->signal->cred_guard_mutex);
+ if (err)
+ return ERR_PTR(err);
mm = get_task_mm(task);
if (mm && mm != current->mm &&
!ptrace_may_access(task, PTRACE_MODE_READ)) {
mmput(mm);
- mm = NULL;
+ mm = ERR_PTR(-EACCES);
}
mutex_unlock(&task->signal->cred_guard_mutex);
@@ -279,9 +308,9 @@ out:
static int proc_pid_auxv(struct task_struct *task, char *buffer)
{
- int res = 0;
- struct mm_struct *mm = get_task_mm(task);
- if (mm) {
+ struct mm_struct *mm = mm_for_maps(task);
+ int res = PTR_ERR(mm);
+ if (mm && !IS_ERR(mm)) {
unsigned int nwords = 0;
do {
nwords += 2;
@@ -318,6 +347,23 @@ static int proc_pid_wchan(struct task_struct *task, char *buffer)
}
#endif /* CONFIG_KALLSYMS */
+static int lock_trace(struct task_struct *task)
+{
+ int err = mutex_lock_killable(&task->signal->cred_guard_mutex);
+ if (err)
+ return err;
+ if (!ptrace_may_access(task, PTRACE_MODE_ATTACH)) {
+ mutex_unlock(&task->signal->cred_guard_mutex);
+ return -EPERM;
+ }
+ return 0;
+}
+
+static void unlock_trace(struct task_struct *task)
+{
+ mutex_unlock(&task->signal->cred_guard_mutex);
+}
+
#ifdef CONFIG_STACKTRACE
#define MAX_STACK_TRACE_DEPTH 64
@@ -327,6 +373,7 @@ static int proc_pid_stack(struct seq_file *m, struct pid_namespace *ns,
{
struct stack_trace trace;
unsigned long *entries;
+ int err;
int i;
entries = kmalloc(MAX_STACK_TRACE_DEPTH * sizeof(*entries), GFP_KERNEL);
@@ -337,15 +384,20 @@ static int proc_pid_stack(struct seq_file *m, struct pid_namespace *ns,
trace.max_entries = MAX_STACK_TRACE_DEPTH;
trace.entries = entries;
trace.skip = 0;
- save_stack_trace_tsk(task, &trace);
- for (i = 0; i < trace.nr_entries; i++) {
- seq_printf(m, "[<%p>] %pS\n",
- (void *)entries[i], (void *)entries[i]);
+ err = lock_trace(task);
+ if (!err) {
+ save_stack_trace_tsk(task, &trace);
+
+ for (i = 0; i < trace.nr_entries; i++) {
+ seq_printf(m, "[<%pK>] %pS\n",
+ (void *)entries[i], (void *)entries[i]);
+ }
+ unlock_trace(task);
}
kfree(entries);
- return 0;
+ return err;
}
#endif
@@ -508,18 +560,22 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
{
long nr;
unsigned long args[6], sp, pc;
+ int res = lock_trace(task);
+ if (res)
+ return res;
if (task_current_syscall(task, &nr, args, 6, &sp, &pc))
- return sprintf(buffer, "running\n");
-
- if (nr < 0)
- return sprintf(buffer, "%ld 0x%lx 0x%lx\n", nr, sp, pc);
-
- return sprintf(buffer,
+ res = sprintf(buffer, "running\n");
+ else if (nr < 0)
+ res = sprintf(buffer, "%ld 0x%lx 0x%lx\n", nr, sp, pc);
+ else
+ res = sprintf(buffer,
"%ld 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx\n",
nr,
args[0], args[1], args[2], args[3], args[4], args[5],
sp, pc);
+ unlock_trace(task);
+ return res;
}
#endif /* CONFIG_HAVE_ARCH_TRACEHOOK */
@@ -775,18 +831,14 @@ static ssize_t mem_read(struct file * file, char __user * buf,
if (!task)
goto out_no_task;
- if (check_mem_permission(task))
- goto out;
-
ret = -ENOMEM;
page = (char *)__get_free_page(GFP_TEMPORARY);
if (!page)
goto out;
- ret = 0;
-
- mm = get_task_mm(task);
- if (!mm)
+ mm = check_mem_permission(task);
+ ret = PTR_ERR(mm);
+ if (IS_ERR(mm))
goto out_free;
ret = -EIO;
@@ -800,8 +852,8 @@ static ssize_t mem_read(struct file * file, char __user * buf,
int this_len, retval;
this_len = (count > PAGE_SIZE) ? PAGE_SIZE : count;
- retval = access_process_vm(task, src, page, this_len, 0);
- if (!retval || check_mem_permission(task)) {
+ retval = access_remote_vm(mm, src, page, this_len, 0);
+ if (!retval) {
if (!ret)
ret = -EIO;
break;
@@ -829,10 +881,6 @@ out_no_task:
return ret;
}
-#define mem_write NULL
-
-#ifndef mem_write
-/* This is a security hazard */
static ssize_t mem_write(struct file * file, const char __user *buf,
size_t count, loff_t *ppos)
{
@@ -840,18 +888,25 @@ static ssize_t mem_write(struct file * file, const char __user *buf,
char *page;
struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
unsigned long dst = *ppos;
+ struct mm_struct *mm;
copied = -ESRCH;
if (!task)
goto out_no_task;
- if (check_mem_permission(task))
- goto out;
+ mm = check_mem_permission(task);
+ copied = PTR_ERR(mm);
+ if (IS_ERR(mm))
+ goto out_task;
+
+ copied = -EIO;
+ if (file->private_data != (void *)((long)current->self_exec_id))
+ goto out_mm;
copied = -ENOMEM;
page = (char *)__get_free_page(GFP_TEMPORARY);
if (!page)
- goto out;
+ goto out_mm;
copied = 0;
while (count > 0) {
@@ -862,7 +917,7 @@ static ssize_t mem_write(struct file * file, const char __user *buf,
copied = -EFAULT;
break;
}
- retval = access_process_vm(task, dst, page, this_len, 1);
+ retval = access_remote_vm(mm, dst, page, this_len, 1);
if (!retval) {
if (!copied)
copied = -EIO;
@@ -875,12 +930,13 @@ static ssize_t mem_write(struct file * file, const char __user *buf,
}
*ppos = dst;
free_page((unsigned long) page);
-out:
+out_mm:
+ mmput(mm);
+out_task:
put_task_struct(task);
out_no_task:
return copied;
}
-#endif
loff_t mem_lseek(struct file *file, loff_t offset, int orig)
{
@@ -917,20 +973,18 @@ static ssize_t environ_read(struct file *file, char __user *buf,
if (!task)
goto out_no_task;
- if (!ptrace_may_access(task, PTRACE_MODE_READ))
- goto out;
-
ret = -ENOMEM;
page = (char *)__get_free_page(GFP_TEMPORARY);
if (!page)
goto out;
- ret = 0;
- mm = get_task_mm(task);
- if (!mm)
+ mm = mm_for_maps(task);
+ ret = PTR_ERR(mm);
+ if (!mm || IS_ERR(mm))
goto out_free;
+ ret = 0;
while (count > 0) {
int this_len, retval, max_len;
@@ -2748,8 +2802,12 @@ static int proc_tgid_io_accounting(struct task_struct *task, char *buffer)
static int proc_pid_personality(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *task)
{
- seq_printf(m, "%08x\n", task->personality);
- return 0;
+ int err = lock_trace(task);
+ if (!err) {
+ seq_printf(m, "%08x\n", task->personality);
+ unlock_trace(task);
+ }
+ return err;
}
/*
@@ -2768,7 +2826,7 @@ static const struct pid_entry tgid_base_stuff[] = {
REG("environ", S_IRUSR, proc_environ_operations),
INF("auxv", S_IRUSR, proc_pid_auxv),
ONE("status", S_IRUGO, proc_pid_status),
- ONE("personality", S_IRUSR, proc_pid_personality),
+ ONE("personality", S_IRUGO, proc_pid_personality),
INF("limits", S_IRUGO, proc_pid_limits),
#ifdef CONFIG_SCHED_DEBUG
REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
@@ -2778,7 +2836,7 @@ static const struct pid_entry tgid_base_stuff[] = {
#endif
REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
- INF("syscall", S_IRUSR, proc_pid_syscall),
+ INF("syscall", S_IRUGO, proc_pid_syscall),
#endif
INF("cmdline", S_IRUGO, proc_pid_cmdline),
ONE("stat", S_IRUGO, proc_tgid_stat),
@@ -2797,7 +2855,7 @@ static const struct pid_entry tgid_base_stuff[] = {
#ifdef CONFIG_PROC_PAGE_MONITOR
REG("clear_refs", S_IWUSR, proc_clear_refs_operations),
REG("smaps", S_IRUGO, proc_smaps_operations),
- REG("pagemap", S_IRUSR, proc_pagemap_operations),
+ REG("pagemap", S_IRUGO, proc_pagemap_operations),
#endif
#ifdef CONFIG_SECURITY
DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
@@ -2806,7 +2864,7 @@ static const struct pid_entry tgid_base_stuff[] = {
INF("wchan", S_IRUGO, proc_pid_wchan),
#endif
#ifdef CONFIG_STACKTRACE
- ONE("stack", S_IRUSR, proc_pid_stack),
+ ONE("stack", S_IRUGO, proc_pid_stack),
#endif
#ifdef CONFIG_SCHEDSTATS
INF("schedstat", S_IRUGO, proc_pid_schedstat),
@@ -3066,11 +3124,16 @@ static int proc_pid_fill_cache(struct file *filp, void *dirent, filldir_t filldi
/* for the /proc/ directory itself, after non-process stuff has been done */
int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
{
- unsigned int nr = filp->f_pos - FIRST_PROCESS_ENTRY;
- struct task_struct *reaper = get_proc_task(filp->f_path.dentry->d_inode);
+ unsigned int nr;
+ struct task_struct *reaper;
struct tgid_iter iter;
struct pid_namespace *ns;
+ if (filp->f_pos >= PID_MAX_LIMIT + TGID_OFFSET)
+ goto out_no_task;
+ nr = filp->f_pos - FIRST_PROCESS_ENTRY;
+
+ reaper = get_proc_task(filp->f_path.dentry->d_inode);
if (!reaper)
goto out_no_task;
@@ -3108,14 +3171,14 @@ static const struct pid_entry tid_base_stuff[] = {
REG("environ", S_IRUSR, proc_environ_operations),
INF("auxv", S_IRUSR, proc_pid_auxv),
ONE("status", S_IRUGO, proc_pid_status),
- ONE("personality", S_IRUSR, proc_pid_personality),
+ ONE("personality", S_IRUGO, proc_pid_personality),
INF("limits", S_IRUGO, proc_pid_limits),
#ifdef CONFIG_SCHED_DEBUG
REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
#endif
REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
- INF("syscall", S_IRUSR, proc_pid_syscall),
+ INF("syscall", S_IRUGO, proc_pid_syscall),
#endif
INF("cmdline", S_IRUGO, proc_pid_cmdline),
ONE("stat", S_IRUGO, proc_tid_stat),
@@ -3133,7 +3196,7 @@ static const struct pid_entry tid_base_stuff[] = {
#ifdef CONFIG_PROC_PAGE_MONITOR
REG("clear_refs", S_IWUSR, proc_clear_refs_operations),
REG("smaps", S_IRUGO, proc_smaps_operations),
- REG("pagemap", S_IRUSR, proc_pagemap_operations),
+ REG("pagemap", S_IRUGO, proc_pagemap_operations),
#endif
#ifdef CONFIG_SECURITY
DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
@@ -3142,7 +3205,7 @@ static const struct pid_entry tid_base_stuff[] = {
INF("wchan", S_IRUGO, proc_pid_wchan),
#endif
#ifdef CONFIG_STACKTRACE
- ONE("stack", S_IRUSR, proc_pid_stack),
+ ONE("stack", S_IRUGO, proc_pid_stack),
#endif
#ifdef CONFIG_SCHEDSTATS
INF("schedstat", S_IRUGO, proc_pid_schedstat),
@@ -3161,7 +3224,7 @@ static const struct pid_entry tid_base_stuff[] = {
REG("oom_score_adj", S_IRUGO|S_IWUSR, proc_oom_score_adj_operations),
#ifdef CONFIG_AUDITSYSCALL
REG("loginuid", S_IWUSR|S_IRUGO, proc_loginuid_operations),
- REG("sessionid", S_IRUSR, proc_sessionid_operations),
+ REG("sessionid", S_IRUGO, proc_sessionid_operations),
#endif
#ifdef CONFIG_FAULT_INJECTION
REG("make-it-fail", S_IRUGO|S_IWUSR, proc_fault_inject_operations),
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index 01e07f2..f1281339 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -28,7 +28,7 @@
DEFINE_SPINLOCK(proc_subdir_lock);
-static int proc_match(int len, const char *name, struct proc_dir_entry *de)
+static int proc_match(unsigned int len, const char *name, struct proc_dir_entry *de)
{
if (de->namelen != len)
return 0;
@@ -303,7 +303,7 @@ static int __xlate_proc_name(const char *name, struct proc_dir_entry **ret,
{
const char *cp = name, *next;
struct proc_dir_entry *de;
- int len;
+ unsigned int len;
de = *ret;
if (!de)
@@ -602,7 +602,7 @@ static struct proc_dir_entry *__proc_create(struct proc_dir_entry **parent,
{
struct proc_dir_entry *ent = NULL;
const char *fn = name;
- int len;
+ unsigned int len;
/* make sure name is valid */
if (!name || !strlen(name)) goto out;
@@ -786,7 +786,7 @@ void remove_proc_entry(const char *name, struct proc_dir_entry *parent)
struct proc_dir_entry **p;
struct proc_dir_entry *de = NULL;
const char *fn = name;
- int len;
+ unsigned int len;
spin_lock(&proc_subdir_lock);
if (__xlate_proc_name(name, &parent, &fn) != 0) {
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index d6a7ca1..d15aa1b 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -46,8 +46,6 @@ static void proc_evict_inode(struct inode *inode)
}
}
-struct vfsmount *proc_mnt;
-
static struct kmem_cache * proc_inode_cachep;
static struct inode *proc_alloc_inode(struct super_block *sb)
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index 9ad561d..c03e8d3 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -107,7 +107,6 @@ static inline struct proc_dir_entry *pde_get(struct proc_dir_entry *pde)
}
void pde_put(struct proc_dir_entry *pde);
-extern struct vfsmount *proc_mnt;
int proc_fill_super(struct super_block *);
struct inode *proc_get_inode(struct super_block *, struct proc_dir_entry *);
diff --git a/fs/proc/root.c b/fs/proc/root.c
index ef9fa8e..a9000e9 100644
--- a/fs/proc/root.c
+++ b/fs/proc/root.c
@@ -43,17 +43,6 @@ static struct dentry *proc_mount(struct file_system_type *fs_type,
struct pid_namespace *ns;
struct proc_inode *ei;
- if (proc_mnt) {
- /* Seed the root directory with a pid so it doesn't need
- * to be special in base.c. I would do this earlier but
- * the only task alive when /proc is mounted the first time
- * is the init_task and it doesn't have any pids.
- */
- ei = PROC_I(proc_mnt->mnt_sb->s_root->d_inode);
- if (!ei->pid)
- ei->pid = find_get_pid(1);
- }
-
if (flags & MS_KERNMOUNT)
ns = (struct pid_namespace *)data;
else
@@ -71,16 +60,16 @@ static struct dentry *proc_mount(struct file_system_type *fs_type,
return ERR_PTR(err);
}
- ei = PROC_I(sb->s_root->d_inode);
- if (!ei->pid) {
- rcu_read_lock();
- ei->pid = get_pid(find_pid_ns(1, ns));
- rcu_read_unlock();
- }
-
sb->s_flags |= MS_ACTIVE;
}
+ ei = PROC_I(sb->s_root->d_inode);
+ if (!ei->pid) {
+ rcu_read_lock();
+ ei->pid = get_pid(find_pid_ns(1, ns));
+ rcu_read_unlock();
+ }
+
return dget(sb->s_root);
}
@@ -101,19 +90,20 @@ static struct file_system_type proc_fs_type = {
void __init proc_root_init(void)
{
+ struct vfsmount *mnt;
int err;
proc_init_inodecache();
err = register_filesystem(&proc_fs_type);
if (err)
return;
- proc_mnt = kern_mount_data(&proc_fs_type, &init_pid_ns);
- if (IS_ERR(proc_mnt)) {
+ mnt = kern_mount_data(&proc_fs_type, &init_pid_ns);
+ if (IS_ERR(mnt)) {
unregister_filesystem(&proc_fs_type);
return;
}
- init_pid_ns.proc_mnt = proc_mnt;
+ init_pid_ns.proc_mnt = mnt;
proc_symlink("mounts", NULL, "self/mounts");
proc_net_init();
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 60b9148..2e7addf 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -1,5 +1,6 @@
#include <linux/mm.h>
#include <linux/hugetlb.h>
+#include <linux/huge_mm.h>
#include <linux/mount.h>
#include <linux/seq_file.h>
#include <linux/highmem.h>
@@ -7,6 +8,7 @@
#include <linux/slab.h>
#include <linux/pagemap.h>
#include <linux/mempolicy.h>
+#include <linux/rmap.h>
#include <linux/swap.h>
#include <linux/swapops.h>
@@ -119,14 +121,14 @@ static void *m_start(struct seq_file *m, loff_t *pos)
priv->task = get_pid_task(priv->pid, PIDTYPE_PID);
if (!priv->task)
- return NULL;
+ return ERR_PTR(-ESRCH);
mm = mm_for_maps(priv->task);
- if (!mm)
- return NULL;
+ if (!mm || IS_ERR(mm))
+ return mm;
down_read(&mm->mmap_sem);
- tail_vma = get_gate_vma(priv->task);
+ tail_vma = get_gate_vma(priv->task->mm);
priv->tail_vma = tail_vma;
/* Start with last addr hint */
@@ -180,7 +182,8 @@ static void m_stop(struct seq_file *m, void *v)
struct proc_maps_private *priv = m->private;
struct vm_area_struct *vma = v;
- vma_stop(priv, vma);
+ if (!IS_ERR(vma))
+ vma_stop(priv, vma);
if (priv->task)
put_task_struct(priv->task);
}
@@ -249,8 +252,8 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
const char *name = arch_vma_name(vma);
if (!name) {
if (mm) {
- if (vma->vm_start <= mm->start_brk &&
- vma->vm_end >= mm->brk) {
+ if (vma->vm_start <= mm->brk &&
+ vma->vm_end >= mm->start_brk) {
name = "[heap]";
} else if (vma->vm_start <= mm->start_stack &&
vma->vm_end >= mm->start_stack) {
@@ -277,7 +280,8 @@ static int show_map(struct seq_file *m, void *v)
show_map_vma(m, vma);
if (m->count < m->size) /* vma is copied successfully */
- m->version = (vma != get_gate_vma(task))? vma->vm_start: 0;
+ m->version = (vma != get_gate_vma(task->mm))
+ ? vma->vm_start : 0;
return 0;
}
@@ -329,58 +333,86 @@ struct mem_size_stats {
unsigned long private_dirty;
unsigned long referenced;
unsigned long anonymous;
+ unsigned long anonymous_thp;
unsigned long swap;
u64 pss;
};
-static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
- struct mm_walk *walk)
+
+static void smaps_pte_entry(pte_t ptent, unsigned long addr,
+ unsigned long ptent_size, struct mm_walk *walk)
{
struct mem_size_stats *mss = walk->private;
struct vm_area_struct *vma = mss->vma;
- pte_t *pte, ptent;
- spinlock_t *ptl;
struct page *page;
int mapcount;
- pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
- for (; addr != end; pte++, addr += PAGE_SIZE) {
- ptent = *pte;
-
- if (is_swap_pte(ptent)) {
- mss->swap += PAGE_SIZE;
- continue;
- }
+ if (is_swap_pte(ptent)) {
+ mss->swap += ptent_size;
+ return;
+ }
- if (!pte_present(ptent))
- continue;
+ if (!pte_present(ptent))
+ return;
+
+ page = vm_normal_page(vma, addr, ptent);
+ if (!page)
+ return;
+
+ if (PageAnon(page))
+ mss->anonymous += ptent_size;
+
+ mss->resident += ptent_size;
+ /* Accumulate the size in pages that have been accessed. */
+ if (pte_young(ptent) || PageReferenced(page))
+ mss->referenced += ptent_size;
+ mapcount = page_mapcount(page);
+ if (mapcount >= 2) {
+ if (pte_dirty(ptent) || PageDirty(page))
+ mss->shared_dirty += ptent_size;
+ else
+ mss->shared_clean += ptent_size;
+ mss->pss += (ptent_size << PSS_SHIFT) / mapcount;
+ } else {
+ if (pte_dirty(ptent) || PageDirty(page))
+ mss->private_dirty += ptent_size;
+ else
+ mss->private_clean += ptent_size;
+ mss->pss += (ptent_size << PSS_SHIFT);
+ }
+}
- page = vm_normal_page(vma, addr, ptent);
- if (!page)
- continue;
+static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
+ struct mm_walk *walk)
+{
+ struct mem_size_stats *mss = walk->private;
+ struct vm_area_struct *vma = mss->vma;
+ pte_t *pte;
+ spinlock_t *ptl;
- if (PageAnon(page))
- mss->anonymous += PAGE_SIZE;
-
- mss->resident += PAGE_SIZE;
- /* Accumulate the size in pages that have been accessed. */
- if (pte_young(ptent) || PageReferenced(page))
- mss->referenced += PAGE_SIZE;
- mapcount = page_mapcount(page);
- if (mapcount >= 2) {
- if (pte_dirty(ptent) || PageDirty(page))
- mss->shared_dirty += PAGE_SIZE;
- else
- mss->shared_clean += PAGE_SIZE;
- mss->pss += (PAGE_SIZE << PSS_SHIFT) / mapcount;
+ spin_lock(&walk->mm->page_table_lock);
+ if (pmd_trans_huge(*pmd)) {
+ if (pmd_trans_splitting(*pmd)) {
+ spin_unlock(&walk->mm->page_table_lock);
+ wait_split_huge_page(vma->anon_vma, pmd);
} else {
- if (pte_dirty(ptent) || PageDirty(page))
- mss->private_dirty += PAGE_SIZE;
- else
- mss->private_clean += PAGE_SIZE;
- mss->pss += (PAGE_SIZE << PSS_SHIFT);
+ smaps_pte_entry(*(pte_t *)pmd, addr,
+ HPAGE_PMD_SIZE, walk);
+ spin_unlock(&walk->mm->page_table_lock);
+ mss->anonymous_thp += HPAGE_PMD_SIZE;
+ return 0;
}
+ } else {
+ spin_unlock(&walk->mm->page_table_lock);
}
+ /*
+ * The mmap_sem held all the way back in m_start() is what
+ * keeps khugepaged out of here and from collapsing things
+ * in here.
+ */
+ pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
+ for (; addr != end; pte++, addr += PAGE_SIZE)
+ smaps_pte_entry(*pte, addr, PAGE_SIZE, walk);
pte_unmap_unlock(pte - 1, ptl);
cond_resched();
return 0;
@@ -416,6 +448,7 @@ static int show_smap(struct seq_file *m, void *v)
"Private_Dirty: %8lu kB\n"
"Referenced: %8lu kB\n"
"Anonymous: %8lu kB\n"
+ "AnonHugePages: %8lu kB\n"
"Swap: %8lu kB\n"
"KernelPageSize: %8lu kB\n"
"MMUPageSize: %8lu kB\n"
@@ -429,6 +462,7 @@ static int show_smap(struct seq_file *m, void *v)
mss.private_dirty >> 10,
mss.referenced >> 10,
mss.anonymous >> 10,
+ mss.anonymous_thp >> 10,
mss.swap >> 10,
vma_kernel_pagesize(vma) >> 10,
vma_mmu_pagesize(vma) >> 10,
@@ -436,7 +470,8 @@ static int show_smap(struct seq_file *m, void *v)
(unsigned long)(mss.pss >> (10 + PSS_SHIFT)) : 0);
if (m->count < m->size) /* vma is copied successfully */
- m->version = (vma != get_gate_vma(task)) ? vma->vm_start : 0;
+ m->version = (vma != get_gate_vma(task->mm))
+ ? vma->vm_start : 0;
return 0;
}
@@ -467,6 +502,8 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
spinlock_t *ptl;
struct page *page;
+ split_huge_page_pmd(walk->mm, pmd);
+
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
for (; addr != end; pte++, addr += PAGE_SIZE) {
ptent = *pte;
@@ -623,6 +660,8 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
pte_t *pte;
int err = 0;
+ split_huge_page_pmd(walk->mm, pmd);
+
/* find the first VMA at or above 'addr' */
vma = find_vma(walk->mm, addr);
for (; addr != end; addr += PAGE_SIZE) {
@@ -728,8 +767,9 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
if (!task)
goto out;
- ret = -EACCES;
- if (!ptrace_may_access(task, PTRACE_MODE_READ))
+ mm = mm_for_maps(task);
+ ret = PTR_ERR(mm);
+ if (!mm || IS_ERR(mm))
goto out_task;
ret = -EINVAL;
@@ -742,10 +782,6 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
if (!count)
goto out_task;
- mm = get_task_mm(task);
- if (!mm)
- goto out_task;
-
pm.len = PM_ENTRY_BYTES * (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
pm.buffer = kmalloc(pm.len, GFP_TEMPORARY);
ret = -ENOMEM;
diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
index b535d3e..980de54 100644
--- a/fs/proc/task_nommu.c
+++ b/fs/proc/task_nommu.c
@@ -199,13 +199,13 @@ static void *m_start(struct seq_file *m, loff_t *pos)
/* pin the task and mm whilst we play with them */
priv->task = get_pid_task(priv->pid, PIDTYPE_PID);
if (!priv->task)
- return NULL;
+ return ERR_PTR(-ESRCH);
mm = mm_for_maps(priv->task);
- if (!mm) {
+ if (!mm || IS_ERR(mm)) {
put_task_struct(priv->task);
priv->task = NULL;
- return NULL;
+ return mm;
}
down_read(&mm->mmap_sem);
diff --git a/fs/pstore/Kconfig b/fs/pstore/Kconfig
index 867d0ac..8007ae7 100644
--- a/fs/pstore/Kconfig
+++ b/fs/pstore/Kconfig
@@ -1,5 +1,5 @@
config PSTORE
- bool "Persistant store support"
+ bool "Persistent store support"
default n
help
This option enables generic access to platform level
diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c
index 0834223..977ed27 100644
--- a/fs/pstore/inode.c
+++ b/fs/pstore/inode.c
@@ -27,6 +27,7 @@
#include <linux/string.h>
#include <linux/mount.h>
#include <linux/ramfs.h>
+#include <linux/parser.h>
#include <linux/sched.h>
#include <linux/magic.h>
#include <linux/pstore.h>
@@ -73,11 +74,16 @@ static int pstore_unlink(struct inode *dir, struct dentry *dentry)
struct pstore_private *p = dentry->d_inode->i_private;
p->erase(p->id);
- kfree(p);
return simple_unlink(dir, dentry);
}
+static void pstore_evict_inode(struct inode *inode)
+{
+ end_writeback(inode);
+ kfree(inode->i_private);
+}
+
static const struct inode_operations pstore_dir_inode_operations = {
.lookup = simple_lookup,
.unlink = pstore_unlink,
@@ -107,9 +113,52 @@ static struct inode *pstore_get_inode(struct super_block *sb,
return inode;
}
+enum {
+ Opt_kmsg_bytes, Opt_err
+};
+
+static const match_table_t tokens = {
+ {Opt_kmsg_bytes, "kmsg_bytes=%u"},
+ {Opt_err, NULL}
+};
+
+static void parse_options(char *options)
+{
+ char *p;
+ substring_t args[MAX_OPT_ARGS];
+ int option;
+
+ if (!options)
+ return;
+
+ while ((p = strsep(&options, ",")) != NULL) {
+ int token;
+
+ if (!*p)
+ continue;
+
+ token = match_token(p, tokens, args);
+ switch (token) {
+ case Opt_kmsg_bytes:
+ if (!match_int(&args[0], &option))
+ pstore_set_kmsg_bytes(option);
+ break;
+ }
+ }
+}
+
+static int pstore_remount(struct super_block *sb, int *flags, char *data)
+{
+ parse_options(data);
+
+ return 0;
+}
+
static const struct super_operations pstore_ops = {
.statfs = simple_statfs,
.drop_inode = generic_delete_inode,
+ .evict_inode = pstore_evict_inode,
+ .remount_fs = pstore_remount,
.show_options = generic_show_options,
};
@@ -209,6 +258,8 @@ int pstore_fill_super(struct super_block *sb, void *data, int silent)
sb->s_op = &pstore_ops;
sb->s_time_gran = 1;
+ parse_options(data);
+
inode = pstore_get_inode(sb, NULL, S_IFDIR | 0755, 0);
if (!inode) {
err = -ENOMEM;
@@ -252,28 +303,7 @@ static struct file_system_type pstore_fs_type = {
static int __init init_pstore_fs(void)
{
- int rc = 0;
- struct kobject *pstorefs_kobj;
-
- pstorefs_kobj = kobject_create_and_add("pstore", fs_kobj);
- if (!pstorefs_kobj) {
- rc = -ENOMEM;
- goto done;
- }
-
- rc = sysfs_create_file(pstorefs_kobj, &pstore_kmsg_bytes_attr.attr);
- if (rc)
- goto done1;
-
- rc = register_filesystem(&pstore_fs_type);
- if (rc == 0)
- goto done;
-
- sysfs_remove_file(pstorefs_kobj, &pstore_kmsg_bytes_attr.attr);
-done1:
- kobject_put(pstorefs_kobj);
-done:
- return rc;
+ return register_filesystem(&pstore_fs_type);
}
module_init(init_pstore_fs)
diff --git a/fs/pstore/internal.h b/fs/pstore/internal.h
index 76c26d2..8c9f23e 100644
--- a/fs/pstore/internal.h
+++ b/fs/pstore/internal.h
@@ -1,7 +1,6 @@
+extern void pstore_set_kmsg_bytes(int);
extern void pstore_get_records(void);
extern int pstore_mkfile(enum pstore_type_id, char *psname, u64 id,
char *data, size_t size,
struct timespec time, int (*erase)(u64));
extern int pstore_is_mounted(void);
-
-extern struct kobj_attribute pstore_kmsg_bytes_attr;
diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
index 705fdf8..f835a25 100644
--- a/fs/pstore/platform.c
+++ b/fs/pstore/platform.c
@@ -37,27 +37,21 @@
static DEFINE_SPINLOCK(pstore_lock);
static struct pstore_info *psinfo;
-/* How much of the console log to snapshot. /sys/fs/pstore/kmsg_bytes */
+/* How much of the console log to snapshot */
static unsigned long kmsg_bytes = 10240;
-static ssize_t b_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
+void pstore_set_kmsg_bytes(int bytes)
{
- return snprintf(buf, PAGE_SIZE, "%lu\n", kmsg_bytes);
+ kmsg_bytes = bytes;
}
-static ssize_t b_store(struct kobject *kobj, struct kobj_attribute *attr,
- const char *buf, size_t count)
-{
- return (sscanf(buf, "%lu", &kmsg_bytes) > 0) ? count : 0;
-}
-
-struct kobj_attribute pstore_kmsg_bytes_attr =
- __ATTR(kmsg_bytes, S_IRUGO | S_IWUSR, b_show, b_store);
-
/* Tag each group of saved records with a sequence number */
static int oopscount;
+static char *reason_str[] = {
+ "Oops", "Panic", "Kexec", "Restart", "Halt", "Poweroff", "Emergency"
+};
+
/*
* callback from kmsg_dump. (s2,l2) has the most recently
* written bytes, older bytes are in (s1,l1). Save as much
@@ -71,15 +65,20 @@ static void pstore_dump(struct kmsg_dumper *dumper,
unsigned long s1_start, s2_start;
unsigned long l1_cpy, l2_cpy;
unsigned long size, total = 0;
- char *dst;
+ char *dst, *why;
u64 id;
int hsize, part = 1;
+ if (reason < ARRAY_SIZE(reason_str))
+ why = reason_str[reason];
+ else
+ why = "Unknown";
+
mutex_lock(&psinfo->buf_mutex);
oopscount++;
while (total < kmsg_bytes) {
dst = psinfo->buf;
- hsize = sprintf(dst, "Oops#%d Part%d\n", oopscount, part++);
+ hsize = sprintf(dst, "%s#%d Part%d\n", why, oopscount, part++);
size = psinfo->bufsize - hsize;
dst += hsize;
@@ -96,7 +95,7 @@ static void pstore_dump(struct kmsg_dumper *dumper,
memcpy(dst + l1_cpy, s2 + s2_start, l2_cpy);
id = psinfo->write(PSTORE_TYPE_DMESG, hsize + l1_cpy + l2_cpy);
- if (pstore_is_mounted())
+ if (reason == KMSG_DUMP_OOPS && pstore_is_mounted())
pstore_mkfile(PSTORE_TYPE_DMESG, psinfo->name, id,
psinfo->buf, hsize + l1_cpy + l2_cpy,
CURRENT_TIME, psinfo->erase);
diff --git a/fs/qnx4/inode.c b/fs/qnx4/inode.c
index e63b417..2b06466 100644
--- a/fs/qnx4/inode.c
+++ b/fs/qnx4/inode.c
@@ -335,7 +335,6 @@ static sector_t qnx4_bmap(struct address_space *mapping, sector_t block)
static const struct address_space_operations qnx4_aops = {
.readpage = qnx4_readpage,
.writepage = qnx4_writepage,
- .sync_page = block_sync_page,
.write_begin = qnx4_write_begin,
.write_end = generic_write_end,
.bmap = qnx4_bmap
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index a2a622e..d3c032f 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -76,7 +76,7 @@
#include <linux/buffer_head.h>
#include <linux/capability.h>
#include <linux/quotaops.h>
-#include <linux/writeback.h> /* for inode_lock, oddly enough.. */
+#include "../internal.h" /* ugh */
#include <asm/uaccess.h>
@@ -442,7 +442,7 @@ EXPORT_SYMBOL(dquot_acquire);
*/
int dquot_commit(struct dquot *dquot)
{
- int ret = 0, ret2 = 0;
+ int ret = 0;
struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
mutex_lock(&dqopt->dqio_mutex);
@@ -454,15 +454,10 @@ int dquot_commit(struct dquot *dquot)
spin_unlock(&dq_list_lock);
/* Inactive dquot can be only if there was error during read/init
* => we have better not writing it */
- if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
+ if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags))
ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot);
- if (info_dirty(&dqopt->info[dquot->dq_type])) {
- ret2 = dqopt->ops[dquot->dq_type]->write_file_info(
- dquot->dq_sb, dquot->dq_type);
- }
- if (ret >= 0)
- ret = ret2;
- }
+ else
+ ret = -EIO;
out_sem:
mutex_unlock(&dqopt->dqio_mutex);
return ret;
@@ -900,33 +895,38 @@ static void add_dquot_ref(struct super_block *sb, int type)
int reserved = 0;
#endif
- spin_lock(&inode_lock);
+ spin_lock(&inode_sb_list_lock);
list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
- if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW))
+ spin_lock(&inode->i_lock);
+ if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
+ !atomic_read(&inode->i_writecount) ||
+ !dqinit_needed(inode, type)) {
+ spin_unlock(&inode->i_lock);
continue;
+ }
#ifdef CONFIG_QUOTA_DEBUG
if (unlikely(inode_get_rsv_space(inode) > 0))
reserved = 1;
#endif
- if (!atomic_read(&inode->i_writecount))
- continue;
- if (!dqinit_needed(inode, type))
- continue;
-
__iget(inode);
- spin_unlock(&inode_lock);
+ spin_unlock(&inode->i_lock);
+ spin_unlock(&inode_sb_list_lock);
iput(old_inode);
__dquot_initialize(inode, type);
- /* We hold a reference to 'inode' so it couldn't have been
- * removed from s_inodes list while we dropped the inode_lock.
- * We cannot iput the inode now as we can be holding the last
- * reference and we cannot iput it under inode_lock. So we
- * keep the reference and iput it later. */
+
+ /*
+ * We hold a reference to 'inode' so it couldn't have been
+ * removed from s_inodes list while we dropped the
+ * inode_sb_list_lock We cannot iput the inode now as we can be
+ * holding the last reference and we cannot iput it under
+ * inode_sb_list_lock. So we keep the reference and iput it
+ * later.
+ */
old_inode = inode;
- spin_lock(&inode_lock);
+ spin_lock(&inode_sb_list_lock);
}
- spin_unlock(&inode_lock);
+ spin_unlock(&inode_sb_list_lock);
iput(old_inode);
#ifdef CONFIG_QUOTA_DEBUG
@@ -951,7 +951,7 @@ static inline int dqput_blocks(struct dquot *dquot)
/*
* Remove references to dquots from inode and add dquot to list for freeing
- * if we have the last referece to dquot
+ * if we have the last reference to dquot
* We can't race with anybody because we hold dqptr_sem for writing...
*/
static int remove_inode_dquot_ref(struct inode *inode, int type,
@@ -1007,7 +1007,7 @@ static void remove_dquot_ref(struct super_block *sb, int type,
struct inode *inode;
int reserved = 0;
- spin_lock(&inode_lock);
+ spin_lock(&inode_sb_list_lock);
list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
/*
* We have to scan also I_NEW inodes because they can already
@@ -1021,7 +1021,7 @@ static void remove_dquot_ref(struct super_block *sb, int type,
remove_inode_dquot_ref(inode, type, tofree_head);
}
}
- spin_unlock(&inode_lock);
+ spin_unlock(&inode_sb_list_lock);
#ifdef CONFIG_QUOTA_DEBUG
if (reserved) {
printk(KERN_WARNING "VFS (%s): Writes happened after quota"
diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c
index 9eead2c..fbb0b47 100644
--- a/fs/ramfs/file-nommu.c
+++ b/fs/ramfs/file-nommu.c
@@ -112,6 +112,7 @@ int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize)
SetPageDirty(page);
unlock_page(page);
+ put_page(page);
}
return 0;
diff --git a/fs/reiserfs/Makefile b/fs/reiserfs/Makefile
index 792b3cb..3c3b001 100644
--- a/fs/reiserfs/Makefile
+++ b/fs/reiserfs/Makefile
@@ -31,9 +31,7 @@ endif
# and causing a panic. Since this behavior only affects ppc32, this ifeq
# will work around it. If any other architecture displays this behavior,
# add it here.
-ifeq ($(CONFIG_PPC32),y)
-EXTRA_CFLAGS := $(call cc-ifversion, -lt, 0400, -O1)
-endif
+ccflags-$(CONFIG_PPC32) := $(call cc-ifversion, -lt, 0400, -O1)
TAGS:
etags *.c
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index 1bba24b..4fd5bb3 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -3217,7 +3217,6 @@ const struct address_space_operations reiserfs_address_space_operations = {
.readpages = reiserfs_readpages,
.releasepage = reiserfs_releasepage,
.invalidatepage = reiserfs_invalidatepage,
- .sync_page = block_sync_page,
.write_begin = reiserfs_write_begin,
.write_end = reiserfs_write_end,
.bmap = reiserfs_aop_bmap,
diff --git a/fs/reiserfs/ioctl.c b/fs/reiserfs/ioctl.c
index 79265fd..4e15305 100644
--- a/fs/reiserfs/ioctl.c
+++ b/fs/reiserfs/ioctl.c
@@ -59,7 +59,7 @@ long reiserfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (err)
break;
- if (!is_owner_or_cap(inode)) {
+ if (!inode_owner_or_capable(inode)) {
err = -EPERM;
goto setflags_out;
}
@@ -103,7 +103,7 @@ setflags_out:
err = put_user(inode->i_generation, (int __user *)arg);
break;
case REISERFS_IOC_SETVERSION:
- if (!is_owner_or_cap(inode)) {
+ if (!inode_owner_or_capable(inode)) {
err = -EPERM;
break;
}
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index c77514b..c5e82ec 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -1,7 +1,7 @@
/*
** Write ahead logging implementation copyright Chris Mason 2000
**
-** The background commits make this code very interelated, and
+** The background commits make this code very interrelated, and
** overly complex. I need to rethink things a bit....The major players:
**
** journal_begin -- call with the number of blocks you expect to log.
@@ -2725,7 +2725,7 @@ int journal_init(struct super_block *sb, const char *j_dev_name,
REISERFS_DISK_OFFSET_IN_BYTES /
sb->s_blocksize + 2);
- /* Sanity check to see is the standard journal fitting withing first bitmap
+ /* Sanity check to see is the standard journal fitting within first bitmap
(actual for small blocksizes) */
if (!SB_ONDISK_JOURNAL_DEVICE(sb) &&
(SB_JOURNAL_1st_RESERVED_BLOCK(sb) +
diff --git a/fs/reiserfs/lock.c b/fs/reiserfs/lock.c
index b87aa2c..7df1ce4 100644
--- a/fs/reiserfs/lock.c
+++ b/fs/reiserfs/lock.c
@@ -15,7 +15,7 @@
* for this mutex, no need for a system wide mutex facility.
*
* Also this lock is often released before a call that could block because
- * reiserfs performances were partialy based on the release while schedule()
+ * reiserfs performances were partially based on the release while schedule()
* property of the Bkl.
*/
void reiserfs_write_lock(struct super_block *s)
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index 0aab04f..b216ff6 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -393,7 +393,7 @@ void add_save_link(struct reiserfs_transaction_handle *th,
/* body of "save" link */
link = INODE_PKEY(inode)->k_dir_id;
- /* put "save" link inot tree, don't charge quota to anyone */
+ /* put "save" link into tree, don't charge quota to anyone */
retval =
reiserfs_insert_item(th, &path, &key, &ih, NULL, (char *)&link);
if (retval) {
@@ -2104,7 +2104,7 @@ out:
/* Read data from quotafile - avoid pagecache and such because we cannot afford
* acquiring the locks... As quota files are never truncated and quota code
- * itself serializes the operations (and noone else should touch the files)
+ * itself serializes the operations (and no one else should touch the files)
* we don't have to be afraid of races */
static ssize_t reiserfs_quota_read(struct super_block *sb, int type, char *data,
size_t len, loff_t off)
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index 5c11ca8..47d2a44 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -396,7 +396,7 @@ static struct page *reiserfs_get_page(struct inode *dir, size_t n)
struct address_space *mapping = dir->i_mapping;
struct page *page;
/* We can deadlock if we try to free dentries,
- and an unlink/rmdir has just occured - GFP_NOFS avoids this */
+ and an unlink/rmdir has just occurred - GFP_NOFS avoids this */
mapping_set_gfp_mask(mapping, GFP_NOFS);
page = read_mapping_page(mapping, n >> PAGE_CACHE_SHIFT, NULL);
if (!IS_ERR(page)) {
diff --git a/fs/reiserfs/xattr_acl.c b/fs/reiserfs/xattr_acl.c
index 90d2fcb..3dc38f1 100644
--- a/fs/reiserfs/xattr_acl.c
+++ b/fs/reiserfs/xattr_acl.c
@@ -26,7 +26,7 @@ posix_acl_set(struct dentry *dentry, const char *name, const void *value,
size_t jcreate_blocks;
if (!reiserfs_posixacl(inode->i_sb))
return -EOPNOTSUPP;
- if (!is_owner_or_cap(inode))
+ if (!inode_owner_or_capable(inode))
return -EPERM;
if (value) {
diff --git a/fs/select.c b/fs/select.c
index e56560d..d33418f 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -517,9 +517,6 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time)
* Update: ERESTARTSYS breaks at least the xview clock binary, so
* I'm trying ERESTARTNOHAND which restart only when you want to.
*/
-#define MAX_SELECT_SECONDS \
- ((unsigned long) (MAX_SCHEDULE_TIMEOUT / HZ)-1)
-
int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
fd_set __user *exp, struct timespec *end_time)
{
diff --git a/fs/squashfs/Kconfig b/fs/squashfs/Kconfig
index aa68a8a..efc309f 100644
--- a/fs/squashfs/Kconfig
+++ b/fs/squashfs/Kconfig
@@ -5,12 +5,12 @@ config SQUASHFS
help
Saying Y here includes support for SquashFS 4.0 (a Compressed
Read-Only File System). Squashfs is a highly compressed read-only
- filesystem for Linux. It uses zlib/lzo compression to compress both
- files, inodes and directories. Inodes in the system are very small
- and all blocks are packed to minimise data overhead. Block sizes
- greater than 4K are supported up to a maximum of 1 Mbytes (default
- block size 128K). SquashFS 4.0 supports 64 bit filesystems and files
- (larger than 4GB), full uid/gid information, hard links and
+ filesystem for Linux. It uses zlib, lzo or xz compression to
+ compress both files, inodes and directories. Inodes in the system
+ are very small and all blocks are packed to minimise data overhead.
+ Block sizes greater than 4K are supported up to a maximum of 1 Mbytes
+ (default block size 128K). SquashFS 4.0 supports 64 bit filesystems
+ and files (larger than 4GB), full uid/gid information, hard links and
timestamps.
Squashfs is intended for general read-only filesystem use, for
diff --git a/fs/squashfs/cache.c b/fs/squashfs/cache.c
index 26b15ae..c37b520 100644
--- a/fs/squashfs/cache.c
+++ b/fs/squashfs/cache.c
@@ -104,7 +104,7 @@ struct squashfs_cache_entry *squashfs_cache_get(struct super_block *sb,
entry = &cache->entry[i];
/*
- * Initialise choosen cache entry, and fill it in from
+ * Initialise chosen cache entry, and fill it in from
* disk.
*/
cache->unused--;
@@ -286,7 +286,7 @@ cleanup:
/*
- * Copy upto length bytes from cache entry to buffer starting at offset bytes
+ * Copy up to length bytes from cache entry to buffer starting at offset bytes
* into the cache entry. If there's not length bytes then copy the number of
* bytes available. In all cases return the number of bytes copied.
*/
diff --git a/fs/squashfs/decompressor.c b/fs/squashfs/decompressor.c
index a5940e5..e921bd2 100644
--- a/fs/squashfs/decompressor.c
+++ b/fs/squashfs/decompressor.c
@@ -23,6 +23,7 @@
#include <linux/types.h>
#include <linux/mutex.h>
+#include <linux/slab.h>
#include <linux/buffer_head.h>
#include "squashfs_fs.h"
@@ -74,3 +75,36 @@ const struct squashfs_decompressor *squashfs_lookup_decompressor(int id)
return decompressor[i];
}
+
+
+void *squashfs_decompressor_init(struct super_block *sb, unsigned short flags)
+{
+ struct squashfs_sb_info *msblk = sb->s_fs_info;
+ void *strm, *buffer = NULL;
+ int length = 0;
+
+ /*
+ * Read decompressor specific options from file system if present
+ */
+ if (SQUASHFS_COMP_OPTS(flags)) {
+ buffer = kmalloc(PAGE_CACHE_SIZE, GFP_KERNEL);
+ if (buffer == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ length = squashfs_read_data(sb, &buffer,
+ sizeof(struct squashfs_super_block), 0, NULL,
+ PAGE_CACHE_SIZE, 1);
+
+ if (length < 0) {
+ strm = ERR_PTR(length);
+ goto finished;
+ }
+ }
+
+ strm = msblk->decompressor->init(msblk, buffer, length);
+
+finished:
+ kfree(buffer);
+
+ return strm;
+}
diff --git a/fs/squashfs/decompressor.h b/fs/squashfs/decompressor.h
index 3b305a7..099745a 100644
--- a/fs/squashfs/decompressor.h
+++ b/fs/squashfs/decompressor.h
@@ -24,7 +24,7 @@
*/
struct squashfs_decompressor {
- void *(*init)(struct squashfs_sb_info *);
+ void *(*init)(struct squashfs_sb_info *, void *, int);
void (*free)(void *);
int (*decompress)(struct squashfs_sb_info *, void **,
struct buffer_head **, int, int, int, int, int);
@@ -33,11 +33,6 @@ struct squashfs_decompressor {
int supported;
};
-static inline void *squashfs_decompressor_init(struct squashfs_sb_info *msblk)
-{
- return msblk->decompressor->init(msblk);
-}
-
static inline void squashfs_decompressor_free(struct squashfs_sb_info *msblk,
void *s)
{
diff --git a/fs/squashfs/dir.c b/fs/squashfs/dir.c
index 0dc340a..3f79cd1 100644
--- a/fs/squashfs/dir.c
+++ b/fs/squashfs/dir.c
@@ -172,6 +172,11 @@ static int squashfs_readdir(struct file *file, void *dirent, filldir_t filldir)
length += sizeof(dirh);
dir_count = le32_to_cpu(dirh.count) + 1;
+
+ /* dir_count should never be larger than 256 */
+ if (dir_count > 256)
+ goto failed_read;
+
while (dir_count--) {
/*
* Read directory entry.
@@ -183,6 +188,10 @@ static int squashfs_readdir(struct file *file, void *dirent, filldir_t filldir)
size = le16_to_cpu(dire->size) + 1;
+ /* size should never be larger than SQUASHFS_NAME_LEN */
+ if (size > SQUASHFS_NAME_LEN)
+ goto failed_read;
+
err = squashfs_read_metadata(inode->i_sb, dire->name,
&block, &offset, size);
if (err < 0)
diff --git a/fs/squashfs/lzo_wrapper.c b/fs/squashfs/lzo_wrapper.c
index 7da759e..00f4dfc 100644
--- a/fs/squashfs/lzo_wrapper.c
+++ b/fs/squashfs/lzo_wrapper.c
@@ -37,7 +37,7 @@ struct squashfs_lzo {
void *output;
};
-static void *lzo_init(struct squashfs_sb_info *msblk)
+static void *lzo_init(struct squashfs_sb_info *msblk, void *buff, int len)
{
int block_size = max_t(int, msblk->block_size, SQUASHFS_METADATA_SIZE);
@@ -58,7 +58,7 @@ failed2:
failed:
ERROR("Failed to allocate lzo workspace\n");
kfree(stream);
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
diff --git a/fs/squashfs/namei.c b/fs/squashfs/namei.c
index 7a9464d..5d922a6 100644
--- a/fs/squashfs/namei.c
+++ b/fs/squashfs/namei.c
@@ -176,6 +176,11 @@ static struct dentry *squashfs_lookup(struct inode *dir, struct dentry *dentry,
length += sizeof(dirh);
dir_count = le32_to_cpu(dirh.count) + 1;
+
+ /* dir_count should never be larger than 256 */
+ if (dir_count > 256)
+ goto data_error;
+
while (dir_count--) {
/*
* Read directory entry.
@@ -187,6 +192,10 @@ static struct dentry *squashfs_lookup(struct inode *dir, struct dentry *dentry,
size = le16_to_cpu(dire->size) + 1;
+ /* size should never be larger than SQUASHFS_NAME_LEN */
+ if (size > SQUASHFS_NAME_LEN)
+ goto data_error;
+
err = squashfs_read_metadata(dir->i_sb, dire->name,
&block, &offset, size);
if (err < 0)
@@ -228,6 +237,9 @@ exit_lookup:
d_add(dentry, inode);
return ERR_PTR(0);
+data_error:
+ err = -EIO;
+
read_failure:
ERROR("Unable to read directory block [%llx:%x]\n",
squashfs_i(dir)->start + msblk->directory_table,
diff --git a/fs/squashfs/squashfs.h b/fs/squashfs/squashfs.h
index ba729d8..1f2e608 100644
--- a/fs/squashfs/squashfs.h
+++ b/fs/squashfs/squashfs.h
@@ -48,6 +48,7 @@ extern int squashfs_read_table(struct super_block *, void *, u64, int);
/* decompressor.c */
extern const struct squashfs_decompressor *squashfs_lookup_decompressor(int);
+extern void *squashfs_decompressor_init(struct super_block *, unsigned short);
/* export.c */
extern __le64 *squashfs_read_inode_lookup_table(struct super_block *, u64,
diff --git a/fs/squashfs/squashfs_fs.h b/fs/squashfs/squashfs_fs.h
index 39533fe..4582c56 100644
--- a/fs/squashfs/squashfs_fs.h
+++ b/fs/squashfs/squashfs_fs.h
@@ -57,6 +57,7 @@
#define SQUASHFS_ALWAYS_FRAG 5
#define SQUASHFS_DUPLICATE 6
#define SQUASHFS_EXPORT 7
+#define SQUASHFS_COMP_OPT 10
#define SQUASHFS_BIT(flag, bit) ((flag >> bit) & 1)
@@ -81,6 +82,9 @@
#define SQUASHFS_EXPORTABLE(flags) SQUASHFS_BIT(flags, \
SQUASHFS_EXPORT)
+#define SQUASHFS_COMP_OPTS(flags) SQUASHFS_BIT(flags, \
+ SQUASHFS_COMP_OPT)
+
/* Max number of types and file types */
#define SQUASHFS_DIR_TYPE 1
#define SQUASHFS_REG_TYPE 2
diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c
index 20700b9..5c8184c 100644
--- a/fs/squashfs/super.c
+++ b/fs/squashfs/super.c
@@ -199,10 +199,6 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
err = -ENOMEM;
- msblk->stream = squashfs_decompressor_init(msblk);
- if (msblk->stream == NULL)
- goto failed_mount;
-
msblk->block_cache = squashfs_cache_init("metadata",
SQUASHFS_CACHED_BLKS, SQUASHFS_METADATA_SIZE);
if (msblk->block_cache == NULL)
@@ -215,6 +211,13 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
goto failed_mount;
}
+ msblk->stream = squashfs_decompressor_init(sb, flags);
+ if (IS_ERR(msblk->stream)) {
+ err = PTR_ERR(msblk->stream);
+ msblk->stream = NULL;
+ goto failed_mount;
+ }
+
/* Allocate and read id index table */
msblk->id_table = squashfs_read_id_index_table(sb,
le64_to_cpu(sblk->id_table_start), le16_to_cpu(sblk->no_ids));
@@ -370,8 +373,8 @@ static void squashfs_put_super(struct super_block *sb)
}
-static struct dentry *squashfs_mount(struct file_system_type *fs_type, int flags,
- const char *dev_name, void *data)
+static struct dentry *squashfs_mount(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data)
{
return mount_bdev(fs_type, flags, dev_name, data, squashfs_fill_super);
}
diff --git a/fs/squashfs/xz_wrapper.c b/fs/squashfs/xz_wrapper.c
index c4eb400..aa47a28 100644
--- a/fs/squashfs/xz_wrapper.c
+++ b/fs/squashfs/xz_wrapper.c
@@ -26,10 +26,10 @@
#include <linux/buffer_head.h>
#include <linux/slab.h>
#include <linux/xz.h>
+#include <linux/bitops.h>
#include "squashfs_fs.h"
#include "squashfs_fs_sb.h"
-#include "squashfs_fs_i.h"
#include "squashfs.h"
#include "decompressor.h"
@@ -38,24 +38,57 @@ struct squashfs_xz {
struct xz_buf buf;
};
-static void *squashfs_xz_init(struct squashfs_sb_info *msblk)
+struct comp_opts {
+ __le32 dictionary_size;
+ __le32 flags;
+};
+
+static void *squashfs_xz_init(struct squashfs_sb_info *msblk, void *buff,
+ int len)
{
- int block_size = max_t(int, msblk->block_size, SQUASHFS_METADATA_SIZE);
+ struct comp_opts *comp_opts = buff;
+ struct squashfs_xz *stream;
+ int dict_size = msblk->block_size;
+ int err, n;
+
+ if (comp_opts) {
+ /* check compressor options are the expected length */
+ if (len < sizeof(*comp_opts)) {
+ err = -EIO;
+ goto failed;
+ }
- struct squashfs_xz *stream = kmalloc(sizeof(*stream), GFP_KERNEL);
- if (stream == NULL)
+ dict_size = le32_to_cpu(comp_opts->dictionary_size);
+
+ /* the dictionary size should be 2^n or 2^n+2^(n+1) */
+ n = ffs(dict_size) - 1;
+ if (dict_size != (1 << n) && dict_size != (1 << n) +
+ (1 << (n + 1))) {
+ err = -EIO;
+ goto failed;
+ }
+ }
+
+ dict_size = max_t(int, dict_size, SQUASHFS_METADATA_SIZE);
+
+ stream = kmalloc(sizeof(*stream), GFP_KERNEL);
+ if (stream == NULL) {
+ err = -ENOMEM;
goto failed;
+ }
- stream->state = xz_dec_init(XZ_PREALLOC, block_size);
- if (stream->state == NULL)
+ stream->state = xz_dec_init(XZ_PREALLOC, dict_size);
+ if (stream->state == NULL) {
+ kfree(stream);
+ err = -ENOMEM;
goto failed;
+ }
return stream;
failed:
- ERROR("Failed to allocate xz workspace\n");
- kfree(stream);
- return NULL;
+ ERROR("Failed to initialise xz decompressor\n");
+ return ERR_PTR(err);
}
diff --git a/fs/squashfs/zlib_wrapper.c b/fs/squashfs/zlib_wrapper.c
index 4661ae2..517688b3 100644
--- a/fs/squashfs/zlib_wrapper.c
+++ b/fs/squashfs/zlib_wrapper.c
@@ -26,19 +26,19 @@
#include <linux/buffer_head.h>
#include <linux/slab.h>
#include <linux/zlib.h>
+#include <linux/vmalloc.h>
#include "squashfs_fs.h"
#include "squashfs_fs_sb.h"
#include "squashfs.h"
#include "decompressor.h"
-static void *zlib_init(struct squashfs_sb_info *dummy)
+static void *zlib_init(struct squashfs_sb_info *dummy, void *buff, int len)
{
z_stream *stream = kmalloc(sizeof(z_stream), GFP_KERNEL);
if (stream == NULL)
goto failed;
- stream->workspace = kmalloc(zlib_inflate_workspacesize(),
- GFP_KERNEL);
+ stream->workspace = vmalloc(zlib_inflate_workspacesize());
if (stream->workspace == NULL)
goto failed;
@@ -47,7 +47,7 @@ static void *zlib_init(struct squashfs_sb_info *dummy)
failed:
ERROR("Failed to allocate zlib workspace\n");
kfree(stream);
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
@@ -56,7 +56,7 @@ static void zlib_free(void *strm)
z_stream *stream = strm;
if (stream)
- kfree(stream->workspace);
+ vfree(stream->workspace);
kfree(stream);
}
diff --git a/fs/super.c b/fs/super.c
index e848649..8a06881 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -71,6 +71,7 @@ static struct super_block *alloc_super(struct file_system_type *type)
#else
INIT_LIST_HEAD(&s->s_files);
#endif
+ s->s_bdi = &default_backing_dev_info;
INIT_LIST_HEAD(&s->s_instances);
INIT_HLIST_BL_HEAD(&s->s_anon);
INIT_LIST_HEAD(&s->s_inodes);
@@ -936,6 +937,7 @@ mount_fs(struct file_system_type *type, int flags, const char *name, void *data)
sb = root->d_sb;
BUG_ON(!sb);
WARN_ON(!sb->s_bdi);
+ WARN_ON(sb->s_bdi == &default_backing_dev_info);
sb->s_flags |= MS_BORN;
error = security_sb_kern_mount(sb, flags, secdata);
diff --git a/fs/sync.c b/fs/sync.c
index ba76b96..c38ec16 100644
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -7,6 +7,7 @@
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/namei.h>
#include <linux/sched.h>
#include <linux/writeback.h>
#include <linux/syscalls.h>
@@ -33,7 +34,7 @@ static int __sync_filesystem(struct super_block *sb, int wait)
* This should be safe, as we require bdi backing to actually
* write out data in the first place
*/
- if (!sb->s_bdi || sb->s_bdi == &noop_backing_dev_info)
+ if (sb->s_bdi == &noop_backing_dev_info)
return 0;
if (sb->s_qcop && sb->s_qcop->quota_sync)
@@ -79,7 +80,7 @@ EXPORT_SYMBOL_GPL(sync_filesystem);
static void sync_one_sb(struct super_block *sb, void *arg)
{
- if (!(sb->s_flags & MS_RDONLY) && sb->s_bdi)
+ if (!(sb->s_flags & MS_RDONLY))
__sync_filesystem(sb, *(int *)arg);
}
/*
@@ -128,6 +129,29 @@ void emergency_sync(void)
}
}
+/*
+ * sync a single super
+ */
+SYSCALL_DEFINE1(syncfs, int, fd)
+{
+ struct file *file;
+ struct super_block *sb;
+ int ret;
+ int fput_needed;
+
+ file = fget_light(fd, &fput_needed);
+ if (!file)
+ return -EBADF;
+ sb = file->f_dentry->d_sb;
+
+ down_read(&sb->s_umount);
+ ret = sync_filesystem(sb);
+ up_read(&sb->s_umount);
+
+ fput_light(file, fput_needed);
+ return ret;
+}
+
/**
* vfs_fsync_range - helper to sync a range of data & metadata to disk
* @file: file to sync
diff --git a/fs/sysv/itree.c b/fs/sysv/itree.c
index 9ca6627..fa8d43c 100644
--- a/fs/sysv/itree.c
+++ b/fs/sysv/itree.c
@@ -488,7 +488,6 @@ static sector_t sysv_bmap(struct address_space *mapping, sector_t block)
const struct address_space_operations sysv_aops = {
.readpage = sysv_readpage,
.writepage = sysv_writepage,
- .sync_page = block_sync_page,
.write_begin = sysv_write_begin,
.write_end = generic_write_end,
.bmap = sysv_bmap
diff --git a/fs/ubifs/Kconfig b/fs/ubifs/Kconfig
index 1d1859d..f8b0160 100644
--- a/fs/ubifs/Kconfig
+++ b/fs/ubifs/Kconfig
@@ -47,7 +47,7 @@ config UBIFS_FS_DEBUG
bool "Enable debugging support"
depends on UBIFS_FS
select DEBUG_FS
- select KALLSYMS_ALL
+ select KALLSYMS
help
This option enables UBIFS debugging support. It makes sure various
assertions, self-checks, debugging messages and test modes are compiled
@@ -58,12 +58,3 @@ config UBIFS_FS_DEBUG
down UBIFS. You can then further enable / disable individual debugging
features using UBIFS module parameters and the corresponding sysfs
interfaces.
-
-config UBIFS_FS_DEBUG_CHKS
- bool "Enable extra checks"
- depends on UBIFS_FS_DEBUG
- help
- If extra checks are enabled UBIFS will check the consistency of its
- internal data structures during operation. However, UBIFS performance
- is dramatically slower when this option is selected especially if the
- file system is large.
diff --git a/fs/ubifs/budget.c b/fs/ubifs/budget.c
index c8ff0d1..8b3a7da 100644
--- a/fs/ubifs/budget.c
+++ b/fs/ubifs/budget.c
@@ -147,7 +147,7 @@ static int make_free_space(struct ubifs_info *c)
if (liab2 < liab1)
return -EAGAIN;
- dbg_budg("new liability %lld (not shrinked)", liab2);
+ dbg_budg("new liability %lld (not shrunk)", liab2);
/* Liability did not shrink again, try GC */
dbg_budg("Run GC");
diff --git a/fs/ubifs/commit.c b/fs/ubifs/commit.c
index b148fbc..1bd01de 100644
--- a/fs/ubifs/commit.c
+++ b/fs/ubifs/commit.c
@@ -577,7 +577,7 @@ int dbg_check_old_index(struct ubifs_info *c, struct ubifs_zbranch *zroot)
size_t sz;
if (!(ubifs_chk_flags & UBIFS_CHK_OLD_IDX))
- goto out;
+ return 0;
INIT_LIST_HEAD(&list);
diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c
index 01c2b02..004d374 100644
--- a/fs/ubifs/debug.c
+++ b/fs/ubifs/debug.c
@@ -818,7 +818,7 @@ void dbg_dump_leb(const struct ubifs_info *c, int lnum)
printk(KERN_DEBUG "(pid %d) start dumping LEB %d\n",
current->pid, lnum);
- buf = __vmalloc(c->leb_size, GFP_KERNEL | GFP_NOFS, PAGE_KERNEL);
+ buf = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL);
if (!buf) {
ubifs_err("cannot allocate memory for dumping LEB %d", lnum);
return;
@@ -972,11 +972,39 @@ void dbg_dump_index(struct ubifs_info *c)
void dbg_save_space_info(struct ubifs_info *c)
{
struct ubifs_debug_info *d = c->dbg;
-
- ubifs_get_lp_stats(c, &d->saved_lst);
+ int freeable_cnt;
spin_lock(&c->space_lock);
+ memcpy(&d->saved_lst, &c->lst, sizeof(struct ubifs_lp_stats));
+
+ /*
+ * We use a dirty hack here and zero out @c->freeable_cnt, because it
+ * affects the free space calculations, and UBIFS might not know about
+ * all freeable eraseblocks. Indeed, we know about freeable eraseblocks
+ * only when we read their lprops, and we do this only lazily, upon the
+ * need. So at any given point of time @c->freeable_cnt might be not
+ * exactly accurate.
+ *
+ * Just one example about the issue we hit when we did not zero
+ * @c->freeable_cnt.
+ * 1. The file-system is mounted R/O, c->freeable_cnt is %0. We save the
+ * amount of free space in @d->saved_free
+ * 2. We re-mount R/W, which makes UBIFS to read the "lsave"
+ * information from flash, where we cache LEBs from various
+ * categories ('ubifs_remount_fs()' -> 'ubifs_lpt_init()'
+ * -> 'lpt_init_wr()' -> 'read_lsave()' -> 'ubifs_lpt_lookup()'
+ * -> 'ubifs_get_pnode()' -> 'update_cats()'
+ * -> 'ubifs_add_to_cat()').
+ * 3. Lsave contains a freeable eraseblock, and @c->freeable_cnt
+ * becomes %1.
+ * 4. We calculate the amount of free space when the re-mount is
+ * finished in 'dbg_check_space_info()' and it does not match
+ * @d->saved_free.
+ */
+ freeable_cnt = c->freeable_cnt;
+ c->freeable_cnt = 0;
d->saved_free = ubifs_get_free_space_nolock(c);
+ c->freeable_cnt = freeable_cnt;
spin_unlock(&c->space_lock);
}
@@ -993,12 +1021,15 @@ int dbg_check_space_info(struct ubifs_info *c)
{
struct ubifs_debug_info *d = c->dbg;
struct ubifs_lp_stats lst;
- long long avail, free;
+ long long free;
+ int freeable_cnt;
spin_lock(&c->space_lock);
- avail = ubifs_calc_available(c, c->min_idx_lebs);
+ freeable_cnt = c->freeable_cnt;
+ c->freeable_cnt = 0;
+ free = ubifs_get_free_space_nolock(c);
+ c->freeable_cnt = freeable_cnt;
spin_unlock(&c->space_lock);
- free = ubifs_get_free_space(c);
if (free != d->saved_free) {
ubifs_err("free space changed from %lld to %lld",
@@ -2806,40 +2837,38 @@ int dbg_debugfs_init_fs(struct ubifs_info *c)
struct ubifs_debug_info *d = c->dbg;
sprintf(d->dfs_dir_name, "ubi%d_%d", c->vi.ubi_num, c->vi.vol_id);
- d->dfs_dir = debugfs_create_dir(d->dfs_dir_name, dfs_rootdir);
- if (IS_ERR(d->dfs_dir)) {
- err = PTR_ERR(d->dfs_dir);
- ubifs_err("cannot create \"%s\" debugfs directory, error %d\n",
- d->dfs_dir_name, err);
+ fname = d->dfs_dir_name;
+ dent = debugfs_create_dir(fname, dfs_rootdir);
+ if (IS_ERR_OR_NULL(dent))
goto out;
- }
+ d->dfs_dir = dent;
fname = "dump_lprops";
dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, c, &dfs_fops);
- if (IS_ERR(dent))
+ if (IS_ERR_OR_NULL(dent))
goto out_remove;
d->dfs_dump_lprops = dent;
fname = "dump_budg";
dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, c, &dfs_fops);
- if (IS_ERR(dent))
+ if (IS_ERR_OR_NULL(dent))
goto out_remove;
d->dfs_dump_budg = dent;
fname = "dump_tnc";
dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, c, &dfs_fops);
- if (IS_ERR(dent))
+ if (IS_ERR_OR_NULL(dent))
goto out_remove;
d->dfs_dump_tnc = dent;
return 0;
out_remove:
- err = PTR_ERR(dent);
- ubifs_err("cannot create \"%s\" debugfs directory, error %d\n",
- fname, err);
debugfs_remove_recursive(d->dfs_dir);
out:
+ err = dent ? PTR_ERR(dent) : -ENODEV;
+ ubifs_err("cannot create \"%s\" debugfs directory, error %d\n",
+ fname, err);
return err;
}
diff --git a/fs/ubifs/debug.h b/fs/ubifs/debug.h
index 919f0de..e6493ca 100644
--- a/fs/ubifs/debug.h
+++ b/fs/ubifs/debug.h
@@ -23,6 +23,12 @@
#ifndef __UBIFS_DEBUG_H__
#define __UBIFS_DEBUG_H__
+/* Checking helper functions */
+typedef int (*dbg_leaf_callback)(struct ubifs_info *c,
+ struct ubifs_zbranch *zbr, void *priv);
+typedef int (*dbg_znode_callback)(struct ubifs_info *c,
+ struct ubifs_znode *znode, void *priv);
+
#ifdef CONFIG_UBIFS_FS_DEBUG
/**
@@ -270,11 +276,6 @@ void dbg_dump_tnc(struct ubifs_info *c);
void dbg_dump_index(struct ubifs_info *c);
void dbg_dump_lpt_lebs(const struct ubifs_info *c);
-/* Checking helper functions */
-typedef int (*dbg_leaf_callback)(struct ubifs_info *c,
- struct ubifs_zbranch *zbr, void *priv);
-typedef int (*dbg_znode_callback)(struct ubifs_info *c,
- struct ubifs_znode *znode, void *priv);
int dbg_walk_index(struct ubifs_info *c, dbg_leaf_callback leaf_cb,
dbg_znode_callback znode_cb, void *priv);
@@ -295,7 +296,6 @@ int dbg_check_idx_size(struct ubifs_info *c, long long idx_size);
int dbg_check_filesystem(struct ubifs_info *c);
void dbg_check_heap(struct ubifs_info *c, struct ubifs_lpt_heap *heap, int cat,
int add_pos);
-int dbg_check_lprops(struct ubifs_info *c);
int dbg_check_lpt_nodes(struct ubifs_info *c, struct ubifs_cnode *cnode,
int row, int col);
int dbg_check_inode_size(struct ubifs_info *c, const struct inode *inode,
@@ -401,58 +401,94 @@ void dbg_debugfs_exit_fs(struct ubifs_info *c);
#define DBGKEY(key) ((char *)(key))
#define DBGKEY1(key) ((char *)(key))
-#define ubifs_debugging_init(c) 0
-#define ubifs_debugging_exit(c) ({})
-
-#define dbg_ntype(type) ""
-#define dbg_cstate(cmt_state) ""
-#define dbg_jhead(jhead) ""
-#define dbg_get_key_dump(c, key) ({})
-#define dbg_dump_inode(c, inode) ({})
-#define dbg_dump_node(c, node) ({})
-#define dbg_dump_lpt_node(c, node, lnum, offs) ({})
-#define dbg_dump_budget_req(req) ({})
-#define dbg_dump_lstats(lst) ({})
-#define dbg_dump_budg(c) ({})
-#define dbg_dump_lprop(c, lp) ({})
-#define dbg_dump_lprops(c) ({})
-#define dbg_dump_lpt_info(c) ({})
-#define dbg_dump_leb(c, lnum) ({})
-#define dbg_dump_znode(c, znode) ({})
-#define dbg_dump_heap(c, heap, cat) ({})
-#define dbg_dump_pnode(c, pnode, parent, iip) ({})
-#define dbg_dump_tnc(c) ({})
-#define dbg_dump_index(c) ({})
-#define dbg_dump_lpt_lebs(c) ({})
-
-#define dbg_walk_index(c, leaf_cb, znode_cb, priv) 0
-#define dbg_old_index_check_init(c, zroot) 0
-#define dbg_save_space_info(c) ({})
-#define dbg_check_space_info(c) 0
-#define dbg_check_old_index(c, zroot) 0
-#define dbg_check_cats(c) 0
-#define dbg_check_ltab(c) 0
-#define dbg_chk_lpt_free_spc(c) 0
-#define dbg_chk_lpt_sz(c, action, len) 0
-#define dbg_check_synced_i_size(inode) 0
-#define dbg_check_dir_size(c, dir) 0
-#define dbg_check_tnc(c, x) 0
-#define dbg_check_idx_size(c, idx_size) 0
-#define dbg_check_filesystem(c) 0
-#define dbg_check_heap(c, heap, cat, add_pos) ({})
-#define dbg_check_lprops(c) 0
-#define dbg_check_lpt_nodes(c, cnode, row, col) 0
-#define dbg_check_inode_size(c, inode, size) 0
-#define dbg_check_data_nodes_order(c, head) 0
-#define dbg_check_nondata_nodes_order(c, head) 0
-#define dbg_force_in_the_gaps_enabled 0
-#define dbg_force_in_the_gaps() 0
-#define dbg_failure_mode 0
-
-#define dbg_debugfs_init() 0
-#define dbg_debugfs_exit()
-#define dbg_debugfs_init_fs(c) 0
-#define dbg_debugfs_exit_fs(c) 0
+static inline int ubifs_debugging_init(struct ubifs_info *c) { return 0; }
+static inline void ubifs_debugging_exit(struct ubifs_info *c) { return; }
+static inline const char *dbg_ntype(int type) { return ""; }
+static inline const char *dbg_cstate(int cmt_state) { return ""; }
+static inline const char *dbg_jhead(int jhead) { return ""; }
+static inline const char *
+dbg_get_key_dump(const struct ubifs_info *c,
+ const union ubifs_key *key) { return ""; }
+static inline void dbg_dump_inode(const struct ubifs_info *c,
+ const struct inode *inode) { return; }
+static inline void dbg_dump_node(const struct ubifs_info *c,
+ const void *node) { return; }
+static inline void dbg_dump_lpt_node(const struct ubifs_info *c,
+ void *node, int lnum,
+ int offs) { return; }
+static inline void
+dbg_dump_budget_req(const struct ubifs_budget_req *req) { return; }
+static inline void
+dbg_dump_lstats(const struct ubifs_lp_stats *lst) { return; }
+static inline void dbg_dump_budg(struct ubifs_info *c) { return; }
+static inline void dbg_dump_lprop(const struct ubifs_info *c,
+ const struct ubifs_lprops *lp) { return; }
+static inline void dbg_dump_lprops(struct ubifs_info *c) { return; }
+static inline void dbg_dump_lpt_info(struct ubifs_info *c) { return; }
+static inline void dbg_dump_leb(const struct ubifs_info *c,
+ int lnum) { return; }
+static inline void
+dbg_dump_znode(const struct ubifs_info *c,
+ const struct ubifs_znode *znode) { return; }
+static inline void dbg_dump_heap(struct ubifs_info *c,
+ struct ubifs_lpt_heap *heap,
+ int cat) { return; }
+static inline void dbg_dump_pnode(struct ubifs_info *c,
+ struct ubifs_pnode *pnode,
+ struct ubifs_nnode *parent,
+ int iip) { return; }
+static inline void dbg_dump_tnc(struct ubifs_info *c) { return; }
+static inline void dbg_dump_index(struct ubifs_info *c) { return; }
+static inline void dbg_dump_lpt_lebs(const struct ubifs_info *c) { return; }
+
+static inline int dbg_walk_index(struct ubifs_info *c,
+ dbg_leaf_callback leaf_cb,
+ dbg_znode_callback znode_cb,
+ void *priv) { return 0; }
+static inline void dbg_save_space_info(struct ubifs_info *c) { return; }
+static inline int dbg_check_space_info(struct ubifs_info *c) { return 0; }
+static inline int dbg_check_lprops(struct ubifs_info *c) { return 0; }
+static inline int
+dbg_old_index_check_init(struct ubifs_info *c,
+ struct ubifs_zbranch *zroot) { return 0; }
+static inline int
+dbg_check_old_index(struct ubifs_info *c,
+ struct ubifs_zbranch *zroot) { return 0; }
+static inline int dbg_check_cats(struct ubifs_info *c) { return 0; }
+static inline int dbg_check_ltab(struct ubifs_info *c) { return 0; }
+static inline int dbg_chk_lpt_free_spc(struct ubifs_info *c) { return 0; }
+static inline int dbg_chk_lpt_sz(struct ubifs_info *c,
+ int action, int len) { return 0; }
+static inline int dbg_check_synced_i_size(struct inode *inode) { return 0; }
+static inline int dbg_check_dir_size(struct ubifs_info *c,
+ const struct inode *dir) { return 0; }
+static inline int dbg_check_tnc(struct ubifs_info *c, int extra) { return 0; }
+static inline int dbg_check_idx_size(struct ubifs_info *c,
+ long long idx_size) { return 0; }
+static inline int dbg_check_filesystem(struct ubifs_info *c) { return 0; }
+static inline void dbg_check_heap(struct ubifs_info *c,
+ struct ubifs_lpt_heap *heap,
+ int cat, int add_pos) { return; }
+static inline int dbg_check_lpt_nodes(struct ubifs_info *c,
+ struct ubifs_cnode *cnode, int row, int col) { return 0; }
+static inline int dbg_check_inode_size(struct ubifs_info *c,
+ const struct inode *inode,
+ loff_t size) { return 0; }
+static inline int
+dbg_check_data_nodes_order(struct ubifs_info *c,
+ struct list_head *head) { return 0; }
+static inline int
+dbg_check_nondata_nodes_order(struct ubifs_info *c,
+ struct list_head *head) { return 0; }
+
+static inline int dbg_force_in_the_gaps(void) { return 0; }
+#define dbg_force_in_the_gaps_enabled 0
+#define dbg_failure_mode 0
+
+static inline int dbg_debugfs_init(void) { return 0; }
+static inline void dbg_debugfs_exit(void) { return; }
+static inline int dbg_debugfs_init_fs(struct ubifs_info *c) { return 0; }
+static inline int dbg_debugfs_exit_fs(struct ubifs_info *c) { return 0; }
#endif /* !CONFIG_UBIFS_FS_DEBUG */
#endif /* !__UBIFS_DEBUG_H__ */
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index d77db7e..b286db7 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -448,10 +448,12 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping,
if (!(pos & ~PAGE_CACHE_MASK) && len == PAGE_CACHE_SIZE) {
/*
* We change whole page so no need to load it. But we
- * have to set the @PG_checked flag to make the further
- * code know that the page is new. This might be not
- * true, but it is better to budget more than to read
- * the page from the media.
+ * do not know whether this page exists on the media or
+ * not, so we assume the latter because it requires
+ * larger budget. The assumption is that it is better
+ * to budget a bit more than to read the page from the
+ * media. Thus, we are setting the @PG_checked flag
+ * here.
*/
SetPageChecked(page);
skipped_read = 1;
@@ -559,6 +561,7 @@ static int ubifs_write_end(struct file *file, struct address_space *mapping,
dbg_gen("copied %d instead of %d, read page and repeat",
copied, len);
cancel_budget(c, page, ui, appending);
+ ClearPageChecked(page);
/*
* Return 0 to force VFS to repeat the whole operation, or the
@@ -1309,6 +1312,9 @@ int ubifs_fsync(struct file *file, int datasync)
dbg_gen("syncing inode %lu", inode->i_ino);
+ if (inode->i_sb->s_flags & MS_RDONLY)
+ return 0;
+
/*
* VFS has already synchronized dirty pages for this inode. Synchronize
* the inode unless this is a 'datasync()' call.
diff --git a/fs/ubifs/ioctl.c b/fs/ubifs/ioctl.c
index 8aacd64..548acf4 100644
--- a/fs/ubifs/ioctl.c
+++ b/fs/ubifs/ioctl.c
@@ -160,7 +160,7 @@ long ubifs_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (IS_RDONLY(inode))
return -EROFS;
- if (!is_owner_or_cap(inode))
+ if (!inode_owner_or_capable(inode))
return -EACCES;
if (get_user(flags, (int __user *) arg))
diff --git a/fs/ubifs/lprops.c b/fs/ubifs/lprops.c
index c7b25e2..0ee0847 100644
--- a/fs/ubifs/lprops.c
+++ b/fs/ubifs/lprops.c
@@ -1094,7 +1094,7 @@ static int scan_check_cb(struct ubifs_info *c,
}
}
- buf = __vmalloc(c->leb_size, GFP_KERNEL | GFP_NOFS, PAGE_KERNEL);
+ buf = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL);
if (!buf) {
ubifs_err("cannot allocate memory to scan LEB %d", lnum);
goto out;
diff --git a/fs/ubifs/lpt.c b/fs/ubifs/lpt.c
index 72775d3..ef5155e 100644
--- a/fs/ubifs/lpt.c
+++ b/fs/ubifs/lpt.c
@@ -1270,10 +1270,9 @@ static int read_pnode(struct ubifs_info *c, struct ubifs_nnode *parent, int iip)
lnum = branch->lnum;
offs = branch->offs;
pnode = kzalloc(sizeof(struct ubifs_pnode), GFP_NOFS);
- if (!pnode) {
- err = -ENOMEM;
- goto out;
- }
+ if (!pnode)
+ return -ENOMEM;
+
if (lnum == 0) {
/*
* This pnode was not written which just means that the LEB
diff --git a/fs/ubifs/lpt_commit.c b/fs/ubifs/lpt_commit.c
index 0a3c2c3..0c9c69b 100644
--- a/fs/ubifs/lpt_commit.c
+++ b/fs/ubifs/lpt_commit.c
@@ -1633,7 +1633,7 @@ static int dbg_check_ltab_lnum(struct ubifs_info *c, int lnum)
if (!(ubifs_chk_flags & UBIFS_CHK_LPROPS))
return 0;
- buf = p = __vmalloc(c->leb_size, GFP_KERNEL | GFP_NOFS, PAGE_KERNEL);
+ buf = p = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL);
if (!buf) {
ubifs_err("cannot allocate memory for ltab checking");
return 0;
@@ -1885,7 +1885,7 @@ static void dump_lpt_leb(const struct ubifs_info *c, int lnum)
printk(KERN_DEBUG "(pid %d) start dumping LEB %d\n",
current->pid, lnum);
- buf = p = __vmalloc(c->leb_size, GFP_KERNEL | GFP_NOFS, PAGE_KERNEL);
+ buf = p = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL);
if (!buf) {
ubifs_err("cannot allocate memory to dump LPT");
return;
diff --git a/fs/ubifs/orphan.c b/fs/ubifs/orphan.c
index 2cdbd31..09df318 100644
--- a/fs/ubifs/orphan.c
+++ b/fs/ubifs/orphan.c
@@ -898,7 +898,7 @@ static int dbg_scan_orphans(struct ubifs_info *c, struct check_info *ci)
if (c->no_orphs)
return 0;
- buf = __vmalloc(c->leb_size, GFP_KERNEL | GFP_NOFS, PAGE_KERNEL);
+ buf = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL);
if (!buf) {
ubifs_err("cannot allocate memory to check orphans");
return 0;
diff --git a/fs/ubifs/recovery.c b/fs/ubifs/recovery.c
index 936f2cb..3dbad6f 100644
--- a/fs/ubifs/recovery.c
+++ b/fs/ubifs/recovery.c
@@ -317,6 +317,32 @@ int ubifs_recover_master_node(struct ubifs_info *c)
goto out_free;
}
memcpy(c->rcvrd_mst_node, c->mst_node, UBIFS_MST_NODE_SZ);
+
+ /*
+ * We had to recover the master node, which means there was an
+ * unclean reboot. However, it is possible that the master node
+ * is clean at this point, i.e., %UBIFS_MST_DIRTY is not set.
+ * E.g., consider the following chain of events:
+ *
+ * 1. UBIFS was cleanly unmounted, so the master node is clean
+ * 2. UBIFS is being mounted R/W and starts changing the master
+ * node in the first (%UBIFS_MST_LNUM). A power cut happens,
+ * so this LEB ends up with some amount of garbage at the
+ * end.
+ * 3. UBIFS is being mounted R/O. We reach this place and
+ * recover the master node from the second LEB
+ * (%UBIFS_MST_LNUM + 1). But we cannot update the media
+ * because we are being mounted R/O. We have to defer the
+ * operation.
+ * 4. However, this master node (@c->mst_node) is marked as
+ * clean (since the step 1). And if we just return, the
+ * mount code will be confused and won't recover the master
+ * node when it is re-mounter R/W later.
+ *
+ * Thus, to force the recovery by marking the master node as
+ * dirty.
+ */
+ c->mst_node->flags |= cpu_to_le32(UBIFS_MST_DIRTY);
} else {
/* Write the recovered master node */
c->max_sqnum = le64_to_cpu(mst->ch.sqnum) - 1;
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index e5dc1e1..be6c7b0 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -1568,6 +1568,7 @@ static int ubifs_remount_rw(struct ubifs_info *c)
mutex_lock(&c->umount_mutex);
dbg_save_space_info(c);
c->remounting_rw = 1;
+ c->ro_mount = 0;
err = check_free_space(c);
if (err)
@@ -1670,19 +1671,30 @@ static int ubifs_remount_rw(struct ubifs_info *c)
if (err)
goto out;
+ dbg_gen("re-mounted read-write");
+ c->remounting_rw = 0;
+
if (c->need_recovery) {
c->need_recovery = 0;
ubifs_msg("deferred recovery completed");
+ } else {
+ /*
+ * Do not run the debugging space check if the were doing
+ * recovery, because when we saved the information we had the
+ * file-system in a state where the TNC and lprops has been
+ * modified in memory, but all the I/O operations (including a
+ * commit) were deferred. So the file-system was in
+ * "non-committed" state. Now the file-system is in committed
+ * state, and of course the amount of free space will change
+ * because, for example, the old index size was imprecise.
+ */
+ err = dbg_check_space_info(c);
}
-
- dbg_gen("re-mounted read-write");
- c->ro_mount = 0;
- c->remounting_rw = 0;
- err = dbg_check_space_info(c);
mutex_unlock(&c->umount_mutex);
return err;
out:
+ c->ro_mount = 1;
vfree(c->orph_buf);
c->orph_buf = NULL;
if (c->bgt) {
@@ -1760,10 +1772,12 @@ static void ubifs_put_super(struct super_block *sb)
* of the media. For example, there will be dirty inodes if we failed
* to write them back because of I/O errors.
*/
- ubifs_assert(atomic_long_read(&c->dirty_pg_cnt) == 0);
- ubifs_assert(c->budg_idx_growth == 0);
- ubifs_assert(c->budg_dd_growth == 0);
- ubifs_assert(c->budg_data_growth == 0);
+ if (!c->ro_error) {
+ ubifs_assert(atomic_long_read(&c->dirty_pg_cnt) == 0);
+ ubifs_assert(c->budg_idx_growth == 0);
+ ubifs_assert(c->budg_dd_growth == 0);
+ ubifs_assert(c->budg_data_growth == 0);
+ }
/*
* The 'c->umount_lock' prevents races between UBIFS memory shrinker
@@ -2011,7 +2025,6 @@ static int ubifs_fill_super(struct super_block *sb, void *data, int silent)
*/
c->bdi.name = "ubifs",
c->bdi.capabilities = BDI_CAP_MAP_COPY;
- c->bdi.unplug_io_fn = default_unplug_io_fn;
err = bdi_init(&c->bdi);
if (err)
goto out_close;
diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c
index c74400f..3299f46 100644
--- a/fs/ubifs/xattr.c
+++ b/fs/ubifs/xattr.c
@@ -56,6 +56,7 @@
*/
#include "ubifs.h"
+#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/xattr.h>
#include <linux/posix_acl_xattr.h>
@@ -80,7 +81,6 @@ enum {
};
static const struct inode_operations none_inode_operations;
-static const struct address_space_operations none_address_operations;
static const struct file_operations none_file_operations;
/**
@@ -130,7 +130,7 @@ static int create_xattr(struct ubifs_info *c, struct inode *host,
}
/* Re-define all operations to be "nothing" */
- inode->i_mapping->a_ops = &none_address_operations;
+ inode->i_mapping->a_ops = &empty_aops;
inode->i_op = &none_inode_operations;
inode->i_fop = &none_file_operations;
diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c
index 8994dd0..95518a9 100644
--- a/fs/udf/balloc.c
+++ b/fs/udf/balloc.c
@@ -27,11 +27,10 @@
#include "udf_i.h"
#include "udf_sb.h"
-#define udf_clear_bit(nr, addr) ext2_clear_bit(nr, addr)
-#define udf_set_bit(nr, addr) ext2_set_bit(nr, addr)
-#define udf_test_bit(nr, addr) ext2_test_bit(nr, addr)
-#define udf_find_next_one_bit(addr, size, offset) \
- ext2_find_next_bit((unsigned long *)(addr), size, offset)
+#define udf_clear_bit __test_and_clear_bit_le
+#define udf_set_bit __test_and_set_bit_le
+#define udf_test_bit test_bit_le
+#define udf_find_next_one_bit find_next_bit_le
static int read_block_bitmap(struct super_block *sb,
struct udf_bitmap *bitmap, unsigned int block,
diff --git a/fs/udf/file.c b/fs/udf/file.c
index f391a2a..2a346bb 100644
--- a/fs/udf/file.c
+++ b/fs/udf/file.c
@@ -98,7 +98,6 @@ static int udf_adinicb_write_end(struct file *file,
const struct address_space_operations udf_adinicb_aops = {
.readpage = udf_adinicb_readpage,
.writepage = udf_adinicb_writepage,
- .sync_page = block_sync_page,
.write_begin = simple_write_begin,
.write_end = udf_adinicb_write_end,
};
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index ccc8143..1d1358e 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -140,7 +140,6 @@ static sector_t udf_bmap(struct address_space *mapping, sector_t block)
const struct address_space_operations udf_aops = {
.readpage = udf_readpage,
.writepage = udf_writepage,
- .sync_page = block_sync_page,
.write_begin = udf_write_begin,
.write_end = generic_write_end,
.bmap = udf_bmap,
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
index 6863599..b4d791a 100644
--- a/fs/ufs/inode.c
+++ b/fs/ufs/inode.c
@@ -78,7 +78,7 @@ static int ufs_block_to_path(struct inode *inode, sector_t i_block, sector_t off
/*
* Returns the location of the fragment from
- * the begining of the filesystem.
+ * the beginning of the filesystem.
*/
static u64 ufs_frag_map(struct inode *inode, sector_t frag, bool needs_lock)
@@ -552,7 +552,6 @@ static sector_t ufs_bmap(struct address_space *mapping, sector_t block)
const struct address_space_operations ufs_aops = {
.readpage = ufs_readpage,
.writepage = ufs_writepage,
- .sync_page = block_sync_page,
.write_begin = ufs_write_begin,
.write_end = generic_write_end,
.bmap = ufs_bmap
diff --git a/fs/ufs/super.c b/fs/ufs/super.c
index 7693d62..3915ade 100644
--- a/fs/ufs/super.c
+++ b/fs/ufs/super.c
@@ -483,9 +483,9 @@ static int ufs_parse_options (char * options, unsigned * mount_options)
}
/*
- * Diffrent types of UFS hold fs_cstotal in different
- * places, and use diffrent data structure for it.
- * To make things simplier we just copy fs_cstotal to ufs_sb_private_info
+ * Different types of UFS hold fs_cstotal in different
+ * places, and use different data structure for it.
+ * To make things simpler we just copy fs_cstotal to ufs_sb_private_info
*/
static void ufs_setup_cstotal(struct super_block *sb)
{
diff --git a/fs/ufs/truncate.c b/fs/ufs/truncate.c
index e56a4f5..5f821db 100644
--- a/fs/ufs/truncate.c
+++ b/fs/ufs/truncate.c
@@ -479,7 +479,6 @@ int ufs_truncate(struct inode *inode, loff_t old_i_size)
break;
if (IS_SYNC(inode) && (inode->i_state & I_DIRTY))
ufs_sync_inode (inode);
- blk_run_address_space(inode->i_mapping);
yield();
}
diff --git a/fs/ufs/util.h b/fs/ufs/util.h
index 9f8775c..9541759 100644
--- a/fs/ufs/util.h
+++ b/fs/ufs/util.h
@@ -408,7 +408,7 @@ static inline unsigned _ubh_find_next_zero_bit_(
for (;;) {
count = min_t(unsigned int, size + offset, uspi->s_bpf);
size -= count - offset;
- pos = ext2_find_next_zero_bit (ubh->bh[base]->b_data, count, offset);
+ pos = find_next_zero_bit_le(ubh->bh[base]->b_data, count, offset);
if (pos < count || !size)
break;
base++;
diff --git a/fs/utimes.c b/fs/utimes.c
index 179b586..ba653f3 100644
--- a/fs/utimes.c
+++ b/fs/utimes.c
@@ -95,7 +95,7 @@ static int utimes_common(struct path *path, struct timespec *times)
if (IS_IMMUTABLE(inode))
goto mnt_drop_write_and_out;
- if (!is_owner_or_cap(inode)) {
+ if (!inode_owner_or_capable(inode)) {
error = inode_permission(inode, MAY_WRITE);
if (error)
goto mnt_drop_write_and_out;
diff --git a/fs/xattr.c b/fs/xattr.c
index 01bb813..f1ef949 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -59,7 +59,7 @@ xattr_permission(struct inode *inode, const char *name, int mask)
if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode))
return -EPERM;
if (S_ISDIR(inode->i_mode) && (inode->i_mode & S_ISVTX) &&
- (mask & MAY_WRITE) && !is_owner_or_cap(inode))
+ (mask & MAY_WRITE) && !inode_owner_or_capable(inode))
return -EPERM;
}
@@ -666,7 +666,7 @@ generic_setxattr(struct dentry *dentry, const char *name, const void *value, siz
handler = xattr_resolve_name(dentry->d_sb->s_xattr, &name);
if (!handler)
return -EOPNOTSUPP;
- return handler->set(dentry, name, value, size, 0, handler->flags);
+ return handler->set(dentry, name, value, size, flags, handler->flags);
}
/*
diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile
index faca449..284a7c8 100644
--- a/fs/xfs/Makefile
+++ b/fs/xfs/Makefile
@@ -16,14 +16,11 @@
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
-EXTRA_CFLAGS += -I$(src) -I$(src)/linux-2.6
+ccflags-y := -I$(src) -I$(src)/linux-2.6
+ccflags-$(CONFIG_XFS_DEBUG) += -g
XFS_LINUX := linux-2.6
-ifeq ($(CONFIG_XFS_DEBUG),y)
- EXTRA_CFLAGS += -g
-endif
-
obj-$(CONFIG_XFS_FS) += xfs.o
xfs-y += linux-2.6/xfs_trace.o
@@ -105,11 +102,10 @@ xfs-y += $(addprefix $(XFS_LINUX)/, \
xfs_globals.o \
xfs_ioctl.o \
xfs_iops.o \
+ xfs_message.o \
xfs_super.o \
xfs_sync.o \
xfs_xattr.o)
# Objects in support/
-xfs-y += $(addprefix support/, \
- debug.o \
- uuid.o)
+xfs-y += support/uuid.o
diff --git a/fs/xfs/linux-2.6/kmem.c b/fs/xfs/linux-2.6/kmem.c
index 666c9db..a907de5 100644
--- a/fs/xfs/linux-2.6/kmem.c
+++ b/fs/xfs/linux-2.6/kmem.c
@@ -23,6 +23,7 @@
#include <linux/backing-dev.h>
#include "time.h"
#include "kmem.h"
+#include "xfs_message.h"
/*
* Greedy allocation. May fail and may return vmalloced memory.
@@ -56,8 +57,8 @@ kmem_alloc(size_t size, unsigned int __nocast flags)
if (ptr || (flags & (KM_MAYFAIL|KM_NOSLEEP)))
return ptr;
if (!(++retries % 100))
- printk(KERN_ERR "XFS: possible memory allocation "
- "deadlock in %s (mode:0x%x)\n",
+ xfs_err(NULL,
+ "possible memory allocation deadlock in %s (mode:0x%x)",
__func__, lflags);
congestion_wait(BLK_RW_ASYNC, HZ/50);
} while (1);
@@ -112,8 +113,8 @@ kmem_zone_alloc(kmem_zone_t *zone, unsigned int __nocast flags)
if (ptr || (flags & (KM_MAYFAIL|KM_NOSLEEP)))
return ptr;
if (!(++retries % 100))
- printk(KERN_ERR "XFS: possible memory allocation "
- "deadlock in %s (mode:0x%x)\n",
+ xfs_err(NULL,
+ "possible memory allocation deadlock in %s (mode:0x%x)",
__func__, lflags);
congestion_wait(BLK_RW_ASYNC, HZ/50);
} while (1);
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index ec7bbb5..79ce38b 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -413,8 +413,7 @@ xfs_submit_ioend_bio(
if (xfs_ioend_new_eof(ioend))
xfs_mark_inode_dirty(XFS_I(ioend->io_inode));
- submit_bio(wbc->sync_mode == WB_SYNC_ALL ?
- WRITE_SYNC_PLUG : WRITE, bio);
+ submit_bio(wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE, bio);
}
STATIC struct bio *
@@ -854,7 +853,7 @@ xfs_aops_discard_page(
if (XFS_FORCED_SHUTDOWN(ip->i_mount))
goto out_invalidate;
- xfs_fs_cmn_err(CE_ALERT, ip->i_mount,
+ xfs_alert(ip->i_mount,
"page discard on page %p, inode 0x%llx, offset %llu.",
page, ip->i_ino, offset);
@@ -872,7 +871,7 @@ xfs_aops_discard_page(
if (error) {
/* something screwed, just bail */
if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
- xfs_fs_cmn_err(CE_ALERT, ip->i_mount,
+ xfs_alert(ip->i_mount,
"page discard unable to remove delalloc mapping.");
}
break;
@@ -1296,7 +1295,7 @@ xfs_get_blocks_direct(
* If the private argument is non-NULL __xfs_get_blocks signals us that we
* need to issue a transaction to convert the range from unwritten to written
* extents. In case this is regular synchronous I/O we just call xfs_end_io
- * to do this and we are done. But in case this was a successfull AIO
+ * to do this and we are done. But in case this was a successful AIO
* request this handler is called from interrupt context, from which we
* can't start transactions. In that case offload the I/O completion to
* the workqueues we also use for buffered I/O completion.
@@ -1411,7 +1410,7 @@ xfs_vm_write_failed(
if (error) {
/* something screwed, just bail */
if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
- xfs_fs_cmn_err(CE_ALERT, ip->i_mount,
+ xfs_alert(ip->i_mount,
"xfs_vm_write_failed: unable to clean up ino %lld",
ip->i_ino);
}
@@ -1495,7 +1494,6 @@ const struct address_space_operations xfs_address_space_operations = {
.readpages = xfs_vm_readpages,
.writepage = xfs_vm_writepage,
.writepages = xfs_vm_writepages,
- .sync_page = block_sync_page,
.releasepage = xfs_vm_releasepage,
.invalidatepage = xfs_vm_invalidatepage,
.write_begin = xfs_vm_write_begin,
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index f83a4c8..9ef9ed2 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -94,75 +94,6 @@ xfs_buf_vmap_len(
}
/*
- * Page Region interfaces.
- *
- * For pages in filesystems where the blocksize is smaller than the
- * pagesize, we use the page->private field (long) to hold a bitmap
- * of uptodate regions within the page.
- *
- * Each such region is "bytes per page / bits per long" bytes long.
- *
- * NBPPR == number-of-bytes-per-page-region
- * BTOPR == bytes-to-page-region (rounded up)
- * BTOPRT == bytes-to-page-region-truncated (rounded down)
- */
-#if (BITS_PER_LONG == 32)
-#define PRSHIFT (PAGE_CACHE_SHIFT - 5) /* (32 == 1<<5) */
-#elif (BITS_PER_LONG == 64)
-#define PRSHIFT (PAGE_CACHE_SHIFT - 6) /* (64 == 1<<6) */
-#else
-#error BITS_PER_LONG must be 32 or 64
-#endif
-#define NBPPR (PAGE_CACHE_SIZE/BITS_PER_LONG)
-#define BTOPR(b) (((unsigned int)(b) + (NBPPR - 1)) >> PRSHIFT)
-#define BTOPRT(b) (((unsigned int)(b) >> PRSHIFT))
-
-STATIC unsigned long
-page_region_mask(
- size_t offset,
- size_t length)
-{
- unsigned long mask;
- int first, final;
-
- first = BTOPR(offset);
- final = BTOPRT(offset + length - 1);
- first = min(first, final);
-
- mask = ~0UL;
- mask <<= BITS_PER_LONG - (final - first);
- mask >>= BITS_PER_LONG - (final);
-
- ASSERT(offset + length <= PAGE_CACHE_SIZE);
- ASSERT((final - first) < BITS_PER_LONG && (final - first) >= 0);
-
- return mask;
-}
-
-STATIC void
-set_page_region(
- struct page *page,
- size_t offset,
- size_t length)
-{
- set_page_private(page,
- page_private(page) | page_region_mask(offset, length));
- if (page_private(page) == ~0UL)
- SetPageUptodate(page);
-}
-
-STATIC int
-test_page_region(
- struct page *page,
- size_t offset,
- size_t length)
-{
- unsigned long mask = page_region_mask(offset, length);
-
- return (mask && (page_private(page) & mask) == mask);
-}
-
-/*
* xfs_buf_lru_add - add a buffer to the LRU.
*
* The LRU takes a new reference to the buffer so that it will only be freed
@@ -189,7 +120,7 @@ xfs_buf_lru_add(
* The unlocked check is safe here because it only occurs when there are not
* b_lru_ref counts left on the inode under the pag->pag_buf_lock. it is there
* to optimise the shrinker removing the buffer from the LRU and calling
- * xfs_buf_free(). i.e. it removes an unneccessary round trip on the
+ * xfs_buf_free(). i.e. it removes an unnecessary round trip on the
* bt_lru_lock.
*/
STATIC void
@@ -332,7 +263,7 @@ xfs_buf_free(
ASSERT(list_empty(&bp->b_lru));
- if (bp->b_flags & (_XBF_PAGE_CACHE|_XBF_PAGES)) {
+ if (bp->b_flags & _XBF_PAGES) {
uint i;
if (xfs_buf_is_vmapped(bp))
@@ -342,56 +273,77 @@ xfs_buf_free(
for (i = 0; i < bp->b_page_count; i++) {
struct page *page = bp->b_pages[i];
- if (bp->b_flags & _XBF_PAGE_CACHE)
- ASSERT(!PagePrivate(page));
- page_cache_release(page);
+ __free_page(page);
}
- }
+ } else if (bp->b_flags & _XBF_KMEM)
+ kmem_free(bp->b_addr);
_xfs_buf_free_pages(bp);
xfs_buf_deallocate(bp);
}
/*
- * Finds all pages for buffer in question and builds it's page list.
+ * Allocates all the pages for buffer in question and builds it's page list.
*/
STATIC int
-_xfs_buf_lookup_pages(
+xfs_buf_allocate_memory(
xfs_buf_t *bp,
uint flags)
{
- struct address_space *mapping = bp->b_target->bt_mapping;
- size_t blocksize = bp->b_target->bt_bsize;
size_t size = bp->b_count_desired;
size_t nbytes, offset;
gfp_t gfp_mask = xb_to_gfp(flags);
unsigned short page_count, i;
- pgoff_t first;
xfs_off_t end;
int error;
+ /*
+ * for buffers that are contained within a single page, just allocate
+ * the memory from the heap - there's no need for the complexity of
+ * page arrays to keep allocation down to order 0.
+ */
+ if (bp->b_buffer_length < PAGE_SIZE) {
+ bp->b_addr = kmem_alloc(bp->b_buffer_length, xb_to_km(flags));
+ if (!bp->b_addr) {
+ /* low memory - use alloc_page loop instead */
+ goto use_alloc_page;
+ }
+
+ if (((unsigned long)(bp->b_addr + bp->b_buffer_length - 1) &
+ PAGE_MASK) !=
+ ((unsigned long)bp->b_addr & PAGE_MASK)) {
+ /* b_addr spans two pages - use alloc_page instead */
+ kmem_free(bp->b_addr);
+ bp->b_addr = NULL;
+ goto use_alloc_page;
+ }
+ bp->b_offset = offset_in_page(bp->b_addr);
+ bp->b_pages = bp->b_page_array;
+ bp->b_pages[0] = virt_to_page(bp->b_addr);
+ bp->b_page_count = 1;
+ bp->b_flags |= XBF_MAPPED | _XBF_KMEM;
+ return 0;
+ }
+
+use_alloc_page:
end = bp->b_file_offset + bp->b_buffer_length;
page_count = xfs_buf_btoc(end) - xfs_buf_btoct(bp->b_file_offset);
-
error = _xfs_buf_get_pages(bp, page_count, flags);
if (unlikely(error))
return error;
- bp->b_flags |= _XBF_PAGE_CACHE;
offset = bp->b_offset;
- first = bp->b_file_offset >> PAGE_CACHE_SHIFT;
+ bp->b_flags |= _XBF_PAGES;
for (i = 0; i < bp->b_page_count; i++) {
struct page *page;
uint retries = 0;
-
- retry:
- page = find_or_create_page(mapping, first + i, gfp_mask);
+retry:
+ page = alloc_page(gfp_mask);
if (unlikely(page == NULL)) {
if (flags & XBF_READ_AHEAD) {
bp->b_page_count = i;
- for (i = 0; i < bp->b_page_count; i++)
- unlock_page(bp->b_pages[i]);
- return -ENOMEM;
+ error = ENOMEM;
+ goto out_free_pages;
}
/*
@@ -401,9 +353,8 @@ _xfs_buf_lookup_pages(
* handle buffer allocation failures we can't do much.
*/
if (!(++retries % 100))
- printk(KERN_ERR
- "XFS: possible memory allocation "
- "deadlock in %s (mode:0x%x)\n",
+ xfs_err(NULL,
+ "possible memory allocation deadlock in %s (mode:0x%x)",
__func__, gfp_mask);
XFS_STATS_INC(xb_page_retries);
@@ -413,52 +364,44 @@ _xfs_buf_lookup_pages(
XFS_STATS_INC(xb_page_found);
- nbytes = min_t(size_t, size, PAGE_CACHE_SIZE - offset);
+ nbytes = min_t(size_t, size, PAGE_SIZE - offset);
size -= nbytes;
-
- ASSERT(!PagePrivate(page));
- if (!PageUptodate(page)) {
- page_count--;
- if (blocksize >= PAGE_CACHE_SIZE) {
- if (flags & XBF_READ)
- bp->b_flags |= _XBF_PAGE_LOCKED;
- } else if (!PagePrivate(page)) {
- if (test_page_region(page, offset, nbytes))
- page_count++;
- }
- }
-
bp->b_pages[i] = page;
offset = 0;
}
+ return 0;
- if (!(bp->b_flags & _XBF_PAGE_LOCKED)) {
- for (i = 0; i < bp->b_page_count; i++)
- unlock_page(bp->b_pages[i]);
- }
-
- if (page_count == bp->b_page_count)
- bp->b_flags |= XBF_DONE;
-
+out_free_pages:
+ for (i = 0; i < bp->b_page_count; i++)
+ __free_page(bp->b_pages[i]);
return error;
}
/*
- * Map buffer into kernel address-space if nessecary.
+ * Map buffer into kernel address-space if necessary.
*/
STATIC int
_xfs_buf_map_pages(
xfs_buf_t *bp,
uint flags)
{
- /* A single page buffer is always mappable */
+ ASSERT(bp->b_flags & _XBF_PAGES);
if (bp->b_page_count == 1) {
+ /* A single page buffer is always mappable */
bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
bp->b_flags |= XBF_MAPPED;
} else if (flags & XBF_MAPPED) {
- bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,
- -1, PAGE_KERNEL);
- if (unlikely(bp->b_addr == NULL))
+ int retried = 0;
+
+ do {
+ bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,
+ -1, PAGE_KERNEL);
+ if (bp->b_addr)
+ break;
+ vm_unmap_aliases();
+ } while (retried++ <= 1);
+
+ if (!bp->b_addr)
return -ENOMEM;
bp->b_addr += bp->b_offset;
bp->b_flags |= XBF_MAPPED;
@@ -569,9 +512,14 @@ found:
}
}
+ /*
+ * if the buffer is stale, clear all the external state associated with
+ * it. We need to keep flags such as how we allocated the buffer memory
+ * intact here.
+ */
if (bp->b_flags & XBF_STALE) {
ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
- bp->b_flags &= XBF_MAPPED;
+ bp->b_flags &= XBF_MAPPED | _XBF_KMEM | _XBF_PAGES;
}
trace_xfs_buf_find(bp, flags, _RET_IP_);
@@ -592,7 +540,7 @@ xfs_buf_get(
xfs_buf_flags_t flags)
{
xfs_buf_t *bp, *new_bp;
- int error = 0, i;
+ int error = 0;
new_bp = xfs_buf_allocate(flags);
if (unlikely(!new_bp))
@@ -600,7 +548,7 @@ xfs_buf_get(
bp = _xfs_buf_find(target, ioff, isize, flags, new_bp);
if (bp == new_bp) {
- error = _xfs_buf_lookup_pages(bp, flags);
+ error = xfs_buf_allocate_memory(bp, flags);
if (error)
goto no_buffer;
} else {
@@ -609,14 +557,11 @@ xfs_buf_get(
return NULL;
}
- for (i = 0; i < bp->b_page_count; i++)
- mark_page_accessed(bp->b_pages[i]);
-
if (!(bp->b_flags & XBF_MAPPED)) {
error = _xfs_buf_map_pages(bp, flags);
if (unlikely(error)) {
- printk(KERN_WARNING "%s: failed to map pages\n",
- __func__);
+ xfs_warn(target->bt_mount,
+ "%s: failed to map pages\n", __func__);
goto no_buffer;
}
}
@@ -710,10 +655,7 @@ xfs_buf_readahead(
xfs_off_t ioff,
size_t isize)
{
- struct backing_dev_info *bdi;
-
- bdi = target->bt_mapping->backing_dev_info;
- if (bdi_read_congested(bdi))
+ if (bdi_read_congested(target->bt_bdi))
return;
xfs_buf_read(target, ioff, isize,
@@ -791,10 +733,10 @@ xfs_buf_associate_memory(
size_t buflen;
int page_count;
- pageaddr = (unsigned long)mem & PAGE_CACHE_MASK;
+ pageaddr = (unsigned long)mem & PAGE_MASK;
offset = (unsigned long)mem - pageaddr;
- buflen = PAGE_CACHE_ALIGN(len + offset);
- page_count = buflen >> PAGE_CACHE_SHIFT;
+ buflen = PAGE_ALIGN(len + offset);
+ page_count = buflen >> PAGE_SHIFT;
/* Free any previous set of page pointers */
if (bp->b_pages)
@@ -811,13 +753,12 @@ xfs_buf_associate_memory(
for (i = 0; i < bp->b_page_count; i++) {
bp->b_pages[i] = mem_to_page((void *)pageaddr);
- pageaddr += PAGE_CACHE_SIZE;
+ pageaddr += PAGE_SIZE;
}
bp->b_count_desired = len;
bp->b_buffer_length = buflen;
bp->b_flags |= XBF_MAPPED;
- bp->b_flags &= ~_XBF_PAGE_LOCKED;
return 0;
}
@@ -850,8 +791,8 @@ xfs_buf_get_uncached(
error = _xfs_buf_map_pages(bp, XBF_MAPPED);
if (unlikely(error)) {
- printk(KERN_WARNING "%s: failed to map pages\n",
- __func__);
+ xfs_warn(target->bt_mount,
+ "%s: failed to map pages\n", __func__);
goto fail_free_mem;
}
@@ -924,20 +865,7 @@ xfs_buf_rele(
/*
- * Mutual exclusion on buffers. Locking model:
- *
- * Buffers associated with inodes for which buffer locking
- * is not enabled are not protected by semaphores, and are
- * assumed to be exclusively owned by the caller. There is a
- * spinlock in the buffer, used by the caller when concurrent
- * access is possible.
- */
-
-/*
- * Locks a buffer object, if it is not already locked. Note that this in
- * no way locks the underlying pages, so it is only useful for
- * synchronizing concurrent use of buffer objects, not for synchronizing
- * independent access to the underlying pages.
+ * Lock a buffer object, if it is not already locked.
*
* If we come across a stale, pinned, locked buffer, we know that we are
* being asked to lock a buffer that has been reallocated. Because it is
@@ -971,10 +899,7 @@ xfs_buf_lock_value(
}
/*
- * Locks a buffer object.
- * Note that this in no way locks the underlying pages, so it is only
- * useful for synchronizing concurrent use of buffer objects, not for
- * synchronizing independent access to the underlying pages.
+ * Lock a buffer object.
*
* If we come across a stale, pinned, locked buffer, we know that we
* are being asked to lock a buffer that has been reallocated. Because
@@ -990,8 +915,6 @@ xfs_buf_lock(
if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
xfs_log_force(bp->b_target->bt_mount, 0);
- if (atomic_read(&bp->b_io_remaining))
- blk_run_address_space(bp->b_target->bt_mapping);
down(&bp->b_sema);
XB_SET_OWNER(bp);
@@ -1035,9 +958,7 @@ xfs_buf_wait_unpin(
set_current_state(TASK_UNINTERRUPTIBLE);
if (atomic_read(&bp->b_pin_count) == 0)
break;
- if (atomic_read(&bp->b_io_remaining))
- blk_run_address_space(bp->b_target->bt_mapping);
- schedule();
+ io_schedule();
}
remove_wait_queue(&bp->b_waiters, &wait);
set_current_state(TASK_RUNNING);
@@ -1249,10 +1170,8 @@ _xfs_buf_ioend(
xfs_buf_t *bp,
int schedule)
{
- if (atomic_dec_and_test(&bp->b_io_remaining) == 1) {
- bp->b_flags &= ~_XBF_PAGE_LOCKED;
+ if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
xfs_buf_ioend(bp, schedule);
- }
}
STATIC void
@@ -1261,35 +1180,12 @@ xfs_buf_bio_end_io(
int error)
{
xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private;
- unsigned int blocksize = bp->b_target->bt_bsize;
- struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
xfs_buf_ioerror(bp, -error);
if (!error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
- do {
- struct page *page = bvec->bv_page;
-
- ASSERT(!PagePrivate(page));
- if (unlikely(bp->b_error)) {
- if (bp->b_flags & XBF_READ)
- ClearPageUptodate(page);
- } else if (blocksize >= PAGE_CACHE_SIZE) {
- SetPageUptodate(page);
- } else if (!PagePrivate(page) &&
- (bp->b_flags & _XBF_PAGE_CACHE)) {
- set_page_region(page, bvec->bv_offset, bvec->bv_len);
- }
-
- if (--bvec >= bio->bi_io_vec)
- prefetchw(&bvec->bv_page->flags);
-
- if (bp->b_flags & _XBF_PAGE_LOCKED)
- unlock_page(page);
- } while (bvec >= bio->bi_io_vec);
-
_xfs_buf_ioend(bp, 1);
bio_put(bio);
}
@@ -1303,7 +1199,6 @@ _xfs_buf_ioapply(
int offset = bp->b_offset;
int size = bp->b_count_desired;
sector_t sector = bp->b_bn;
- unsigned int blocksize = bp->b_target->bt_bsize;
total_nr_pages = bp->b_page_count;
map_i = 0;
@@ -1324,29 +1219,6 @@ _xfs_buf_ioapply(
(bp->b_flags & XBF_READ_AHEAD) ? READA : READ;
}
- /* Special code path for reading a sub page size buffer in --
- * we populate up the whole page, and hence the other metadata
- * in the same page. This optimization is only valid when the
- * filesystem block size is not smaller than the page size.
- */
- if ((bp->b_buffer_length < PAGE_CACHE_SIZE) &&
- ((bp->b_flags & (XBF_READ|_XBF_PAGE_LOCKED)) ==
- (XBF_READ|_XBF_PAGE_LOCKED)) &&
- (blocksize >= PAGE_CACHE_SIZE)) {
- bio = bio_alloc(GFP_NOIO, 1);
-
- bio->bi_bdev = bp->b_target->bt_bdev;
- bio->bi_sector = sector - (offset >> BBSHIFT);
- bio->bi_end_io = xfs_buf_bio_end_io;
- bio->bi_private = bp;
-
- bio_add_page(bio, bp->b_pages[0], PAGE_CACHE_SIZE, 0);
- size = 0;
-
- atomic_inc(&bp->b_io_remaining);
-
- goto submit_io;
- }
next_chunk:
atomic_inc(&bp->b_io_remaining);
@@ -1360,8 +1232,9 @@ next_chunk:
bio->bi_end_io = xfs_buf_bio_end_io;
bio->bi_private = bp;
+
for (; size && nr_pages; nr_pages--, map_i++) {
- int rbytes, nbytes = PAGE_CACHE_SIZE - offset;
+ int rbytes, nbytes = PAGE_SIZE - offset;
if (nbytes > size)
nbytes = size;
@@ -1376,7 +1249,6 @@ next_chunk:
total_nr_pages--;
}
-submit_io:
if (likely(bio->bi_size)) {
if (xfs_buf_is_vmapped(bp)) {
flush_kernel_vmap_range(bp->b_addr,
@@ -1386,18 +1258,7 @@ submit_io:
if (size)
goto next_chunk;
} else {
- /*
- * if we get here, no pages were added to the bio. However,
- * we can't just error out here - if the pages are locked then
- * we have to unlock them otherwise we can hang on a later
- * access to the page.
- */
xfs_buf_ioerror(bp, EIO);
- if (bp->b_flags & _XBF_PAGE_LOCKED) {
- int i;
- for (i = 0; i < bp->b_page_count; i++)
- unlock_page(bp->b_pages[i]);
- }
bio_put(bio);
}
}
@@ -1442,8 +1303,6 @@ xfs_buf_iowait(
{
trace_xfs_buf_iowait(bp, _RET_IP_);
- if (atomic_read(&bp->b_io_remaining))
- blk_run_address_space(bp->b_target->bt_mapping);
wait_for_completion(&bp->b_iowait);
trace_xfs_buf_iowait_done(bp, _RET_IP_);
@@ -1461,8 +1320,8 @@ xfs_buf_offset(
return XFS_BUF_PTR(bp) + offset;
offset += bp->b_offset;
- page = bp->b_pages[offset >> PAGE_CACHE_SHIFT];
- return (xfs_caddr_t)page_address(page) + (offset & (PAGE_CACHE_SIZE-1));
+ page = bp->b_pages[offset >> PAGE_SHIFT];
+ return (xfs_caddr_t)page_address(page) + (offset & (PAGE_SIZE-1));
}
/*
@@ -1484,9 +1343,9 @@ xfs_buf_iomove(
page = bp->b_pages[xfs_buf_btoct(boff + bp->b_offset)];
cpoff = xfs_buf_poff(boff + bp->b_offset);
csize = min_t(size_t,
- PAGE_CACHE_SIZE-cpoff, bp->b_count_desired-boff);
+ PAGE_SIZE-cpoff, bp->b_count_desired-boff);
- ASSERT(((csize + cpoff) <= PAGE_CACHE_SIZE));
+ ASSERT(((csize + cpoff) <= PAGE_SIZE));
switch (mode) {
case XBRW_ZERO:
@@ -1599,7 +1458,6 @@ xfs_free_buftarg(
xfs_flush_buftarg(btp, 1);
if (mp->m_flags & XFS_MOUNT_BARRIER)
xfs_blkdev_issue_flush(btp);
- iput(btp->bt_mapping->host);
kthread_stop(btp->bt_task);
kmem_free(btp);
@@ -1617,21 +1475,12 @@ xfs_setsize_buftarg_flags(
btp->bt_smask = sectorsize - 1;
if (set_blocksize(btp->bt_bdev, sectorsize)) {
- printk(KERN_WARNING
- "XFS: Cannot set_blocksize to %u on device %s\n",
+ xfs_warn(btp->bt_mount,
+ "Cannot set_blocksize to %u on device %s\n",
sectorsize, XFS_BUFTARG_NAME(btp));
return EINVAL;
}
- if (verbose &&
- (PAGE_CACHE_SIZE / BITS_PER_LONG) > sectorsize) {
- printk(KERN_WARNING
- "XFS: %u byte sectors in use on device %s. "
- "This is suboptimal; %u or greater is ideal.\n",
- sectorsize, XFS_BUFTARG_NAME(btp),
- (unsigned int)PAGE_CACHE_SIZE / BITS_PER_LONG);
- }
-
return 0;
}
@@ -1646,7 +1495,7 @@ xfs_setsize_buftarg_early(
struct block_device *bdev)
{
return xfs_setsize_buftarg_flags(btp,
- PAGE_CACHE_SIZE, bdev_logical_block_size(bdev), 0);
+ PAGE_SIZE, bdev_logical_block_size(bdev), 0);
}
int
@@ -1659,41 +1508,6 @@ xfs_setsize_buftarg(
}
STATIC int
-xfs_mapping_buftarg(
- xfs_buftarg_t *btp,
- struct block_device *bdev)
-{
- struct backing_dev_info *bdi;
- struct inode *inode;
- struct address_space *mapping;
- static const struct address_space_operations mapping_aops = {
- .sync_page = block_sync_page,
- .migratepage = fail_migrate_page,
- };
-
- inode = new_inode(bdev->bd_inode->i_sb);
- if (!inode) {
- printk(KERN_WARNING
- "XFS: Cannot allocate mapping inode for device %s\n",
- XFS_BUFTARG_NAME(btp));
- return ENOMEM;
- }
- inode->i_ino = get_next_ino();
- inode->i_mode = S_IFBLK;
- inode->i_bdev = bdev;
- inode->i_rdev = bdev->bd_dev;
- bdi = blk_get_backing_dev_info(bdev);
- if (!bdi)
- bdi = &default_backing_dev_info;
- mapping = &inode->i_data;
- mapping->a_ops = &mapping_aops;
- mapping->backing_dev_info = bdi;
- mapping_set_gfp_mask(mapping, GFP_NOFS);
- btp->bt_mapping = mapping;
- return 0;
-}
-
-STATIC int
xfs_alloc_delwrite_queue(
xfs_buftarg_t *btp,
const char *fsname)
@@ -1721,12 +1535,14 @@ xfs_alloc_buftarg(
btp->bt_mount = mp;
btp->bt_dev = bdev->bd_dev;
btp->bt_bdev = bdev;
+ btp->bt_bdi = blk_get_backing_dev_info(bdev);
+ if (!btp->bt_bdi)
+ goto error;
+
INIT_LIST_HEAD(&btp->bt_lru);
spin_lock_init(&btp->bt_lru_lock);
if (xfs_setsize_buftarg_early(btp, bdev))
goto error;
- if (xfs_mapping_buftarg(btp, bdev))
- goto error;
if (xfs_alloc_delwrite_queue(btp, fsname))
goto error;
btp->bt_shrinker.shrink = xfs_buftarg_shrink;
@@ -1923,8 +1739,8 @@ xfsbufd(
do {
long age = xfs_buf_age_centisecs * msecs_to_jiffies(10);
long tout = xfs_buf_timer_centisecs * msecs_to_jiffies(10);
- int count = 0;
struct list_head tmp;
+ struct blk_plug plug;
if (unlikely(freezing(current))) {
set_bit(XBT_FORCE_SLEEP, &target->bt_flags);
@@ -1940,16 +1756,15 @@ xfsbufd(
xfs_buf_delwri_split(target, &tmp, age);
list_sort(NULL, &tmp, xfs_buf_cmp);
+
+ blk_start_plug(&plug);
while (!list_empty(&tmp)) {
struct xfs_buf *bp;
bp = list_first_entry(&tmp, struct xfs_buf, b_list);
list_del_init(&bp->b_list);
xfs_bdstrat_cb(bp);
- count++;
}
- if (count)
- blk_run_address_space(target->bt_mapping);
-
+ blk_finish_plug(&plug);
} while (!kthread_should_stop());
return 0;
@@ -1969,6 +1784,7 @@ xfs_flush_buftarg(
int pincount = 0;
LIST_HEAD(tmp_list);
LIST_HEAD(wait_list);
+ struct blk_plug plug;
xfs_buf_runall_queues(xfsconvertd_workqueue);
xfs_buf_runall_queues(xfsdatad_workqueue);
@@ -1983,6 +1799,8 @@ xfs_flush_buftarg(
* we do that after issuing all the IO.
*/
list_sort(NULL, &tmp_list, xfs_buf_cmp);
+
+ blk_start_plug(&plug);
while (!list_empty(&tmp_list)) {
bp = list_first_entry(&tmp_list, struct xfs_buf, b_list);
ASSERT(target == bp->b_target);
@@ -1993,10 +1811,10 @@ xfs_flush_buftarg(
}
xfs_bdstrat_cb(bp);
}
+ blk_finish_plug(&plug);
if (wait) {
- /* Expedite and wait for IO to complete. */
- blk_run_address_space(target->bt_mapping);
+ /* Wait for IO to complete. */
while (!list_empty(&wait_list)) {
bp = list_first_entry(&wait_list, struct xfs_buf, b_list);
diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h
index cbe6595..a9a1c45 100644
--- a/fs/xfs/linux-2.6/xfs_buf.h
+++ b/fs/xfs/linux-2.6/xfs_buf.h
@@ -61,30 +61,11 @@ typedef enum {
#define XBF_DONT_BLOCK (1 << 16)/* do not block in current thread */
/* flags used only internally */
-#define _XBF_PAGE_CACHE (1 << 17)/* backed by pagecache */
#define _XBF_PAGES (1 << 18)/* backed by refcounted pages */
#define _XBF_RUN_QUEUES (1 << 19)/* run block device task queue */
+#define _XBF_KMEM (1 << 20)/* backed by heap memory */
#define _XBF_DELWRI_Q (1 << 21)/* buffer on delwri queue */
-/*
- * Special flag for supporting metadata blocks smaller than a FSB.
- *
- * In this case we can have multiple xfs_buf_t on a single page and
- * need to lock out concurrent xfs_buf_t readers as they only
- * serialise access to the buffer.
- *
- * If the FSB size >= PAGE_CACHE_SIZE case, we have no serialisation
- * between reads of the page. Hence we can have one thread read the
- * page and modify it, but then race with another thread that thinks
- * the page is not up-to-date and hence reads it again.
- *
- * The result is that the first modifcation to the page is lost.
- * This sort of AGF/AGI reading race can happen when unlinking inodes
- * that require truncation and results in the AGI unlinked list
- * modifications being lost.
- */
-#define _XBF_PAGE_LOCKED (1 << 22)
-
typedef unsigned int xfs_buf_flags_t;
#define XFS_BUF_FLAGS \
@@ -100,12 +81,10 @@ typedef unsigned int xfs_buf_flags_t;
{ XBF_LOCK, "LOCK" }, /* should never be set */\
{ XBF_TRYLOCK, "TRYLOCK" }, /* ditto */\
{ XBF_DONT_BLOCK, "DONT_BLOCK" }, /* ditto */\
- { _XBF_PAGE_CACHE, "PAGE_CACHE" }, \
{ _XBF_PAGES, "PAGES" }, \
{ _XBF_RUN_QUEUES, "RUN_QUEUES" }, \
- { _XBF_DELWRI_Q, "DELWRI_Q" }, \
- { _XBF_PAGE_LOCKED, "PAGE_LOCKED" }
-
+ { _XBF_KMEM, "KMEM" }, \
+ { _XBF_DELWRI_Q, "DELWRI_Q" }
typedef enum {
XBT_FORCE_SLEEP = 0,
@@ -120,7 +99,7 @@ typedef struct xfs_bufhash {
typedef struct xfs_buftarg {
dev_t bt_dev;
struct block_device *bt_bdev;
- struct address_space *bt_mapping;
+ struct backing_dev_info *bt_bdi;
struct xfs_mount *bt_mount;
unsigned int bt_bsize;
unsigned int bt_sshift;
@@ -139,17 +118,6 @@ typedef struct xfs_buftarg {
unsigned int bt_lru_nr;
} xfs_buftarg_t;
-/*
- * xfs_buf_t: Buffer structure for pagecache-based buffers
- *
- * This buffer structure is used by the pagecache buffer management routines
- * to refer to an assembly of pages forming a logical buffer.
- *
- * The buffer structure is used on a temporary basis only, and discarded when
- * released. The real data storage is recorded in the pagecache. Buffers are
- * hashed to the block device on which the file system resides.
- */
-
struct xfs_buf;
typedef void (*xfs_buf_iodone_t)(struct xfs_buf *);
diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c
index a55c1b4..f4213ba 100644
--- a/fs/xfs/linux-2.6/xfs_file.c
+++ b/fs/xfs/linux-2.6/xfs_file.c
@@ -381,7 +381,7 @@ xfs_aio_write_isize_update(
/*
* If this was a direct or synchronous I/O that failed (such as ENOSPC) then
- * part of the I/O may have been written to disk before the error occured. In
+ * part of the I/O may have been written to disk before the error occurred. In
* this case the on-disk file size may have been adjusted beyond the in-memory
* file size and now needs to be truncated back.
*/
@@ -896,6 +896,7 @@ xfs_file_fallocate(
xfs_flock64_t bf;
xfs_inode_t *ip = XFS_I(inode);
int cmd = XFS_IOC_RESVSP;
+ int attr_flags = XFS_ATTR_NOLOCK;
if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
return -EOPNOTSUPP;
@@ -918,7 +919,10 @@ xfs_file_fallocate(
goto out_unlock;
}
- error = -xfs_change_file_space(ip, cmd, &bf, 0, XFS_ATTR_NOLOCK);
+ if (file->f_flags & O_DSYNC)
+ attr_flags |= XFS_ATTR_SYNC;
+
+ error = -xfs_change_file_space(ip, cmd, &bf, 0, attr_flags);
if (error)
goto out_unlock;
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
index 0ca0e3c..acca2c5 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl.c
@@ -624,6 +624,10 @@ xfs_ioc_space(
if (filp->f_flags & (O_NDELAY|O_NONBLOCK))
attr_flags |= XFS_ATTR_NONBLOCK;
+
+ if (filp->f_flags & O_DSYNC)
+ attr_flags |= XFS_ATTR_SYNC;
+
if (ioflags & IO_INVIS)
attr_flags |= XFS_ATTR_DMI;
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
index 9ff7fc6..dd21784 100644
--- a/fs/xfs/linux-2.6/xfs_iops.c
+++ b/fs/xfs/linux-2.6/xfs_iops.c
@@ -70,7 +70,7 @@ xfs_synchronize_times(
/*
* If the linux inode is valid, mark it dirty.
- * Used when commiting a dirty inode into a transaction so that
+ * Used when committing a dirty inode into a transaction so that
* the inode will get written back by the linux code
*/
void
diff --git a/fs/xfs/linux-2.6/xfs_linux.h b/fs/xfs/linux-2.6/xfs_linux.h
index 0964949..244be9c 100644
--- a/fs/xfs/linux-2.6/xfs_linux.h
+++ b/fs/xfs/linux-2.6/xfs_linux.h
@@ -39,7 +39,6 @@
#include <mrlock.h>
#include <time.h>
-#include <support/debug.h>
#include <support/uuid.h>
#include <linux/semaphore.h>
@@ -86,6 +85,7 @@
#include <xfs_aops.h>
#include <xfs_super.h>
#include <xfs_buf.h>
+#include <xfs_message.h>
/*
* Feature macros (disable/enable)
@@ -280,4 +280,25 @@ static inline __uint64_t howmany_64(__uint64_t x, __uint32_t y)
#define __arch_pack
#endif
+#define ASSERT_ALWAYS(expr) \
+ (unlikely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__))
+
+#ifndef DEBUG
+#define ASSERT(expr) ((void)0)
+
+#ifndef STATIC
+# define STATIC static noinline
+#endif
+
+#else /* DEBUG */
+
+#define ASSERT(expr) \
+ (unlikely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__))
+
+#ifndef STATIC
+# define STATIC noinline
+#endif
+
+#endif /* DEBUG */
+
#endif /* __XFS_LINUX__ */
diff --git a/fs/xfs/linux-2.6/xfs_message.c b/fs/xfs/linux-2.6/xfs_message.c
new file mode 100644
index 0000000..9f76cce
--- /dev/null
+++ b/fs/xfs/linux-2.6/xfs_message.c
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2011 Red Hat, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_types.h"
+#include "xfs_log.h"
+#include "xfs_inum.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_ag.h"
+#include "xfs_mount.h"
+
+/*
+ * XFS logging functions
+ */
+static void
+__xfs_printk(
+ const char *level,
+ const struct xfs_mount *mp,
+ struct va_format *vaf)
+{
+ if (mp && mp->m_fsname) {
+ printk("%sXFS (%s): %pV\n", level, mp->m_fsname, vaf);
+ return;
+ }
+ printk("%sXFS: %pV\n", level, vaf);
+}
+
+void xfs_printk(
+ const char *level,
+ const struct xfs_mount *mp,
+ const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ __xfs_printk(level, mp, &vaf);
+ va_end(args);
+}
+
+#define define_xfs_printk_level(func, kern_level) \
+void func(const struct xfs_mount *mp, const char *fmt, ...) \
+{ \
+ struct va_format vaf; \
+ va_list args; \
+ \
+ va_start(args, fmt); \
+ \
+ vaf.fmt = fmt; \
+ vaf.va = &args; \
+ \
+ __xfs_printk(kern_level, mp, &vaf); \
+ va_end(args); \
+} \
+
+define_xfs_printk_level(xfs_emerg, KERN_EMERG);
+define_xfs_printk_level(xfs_alert, KERN_ALERT);
+define_xfs_printk_level(xfs_crit, KERN_CRIT);
+define_xfs_printk_level(xfs_err, KERN_ERR);
+define_xfs_printk_level(xfs_warn, KERN_WARNING);
+define_xfs_printk_level(xfs_notice, KERN_NOTICE);
+define_xfs_printk_level(xfs_info, KERN_INFO);
+#ifdef DEBUG
+define_xfs_printk_level(xfs_debug, KERN_DEBUG);
+#endif
+
+void
+xfs_alert_tag(
+ const struct xfs_mount *mp,
+ int panic_tag,
+ const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+ int do_panic = 0;
+
+ if (xfs_panic_mask && (xfs_panic_mask & panic_tag)) {
+ xfs_printk(KERN_ALERT, mp,
+ "XFS: Transforming an alert into a BUG.");
+ do_panic = 1;
+ }
+
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ __xfs_printk(KERN_ALERT, mp, &vaf);
+ va_end(args);
+
+ BUG_ON(do_panic);
+}
+
+void
+assfail(char *expr, char *file, int line)
+{
+ xfs_emerg(NULL, "Assertion failed: %s, file: %s, line: %d",
+ expr, file, line);
+ BUG();
+}
+
+void
+xfs_hex_dump(void *p, int length)
+{
+ print_hex_dump(KERN_ALERT, "", DUMP_PREFIX_ADDRESS, 16, 1, p, length, 1);
+}
diff --git a/fs/xfs/linux-2.6/xfs_message.h b/fs/xfs/linux-2.6/xfs_message.h
new file mode 100644
index 0000000..f1b3fc1
--- /dev/null
+++ b/fs/xfs/linux-2.6/xfs_message.h
@@ -0,0 +1,40 @@
+#ifndef __XFS_MESSAGE_H
+#define __XFS_MESSAGE_H 1
+
+struct xfs_mount;
+
+extern void xfs_printk(const char *level, const struct xfs_mount *mp,
+ const char *fmt, ...)
+ __attribute__ ((format (printf, 3, 4)));
+extern void xfs_emerg(const struct xfs_mount *mp, const char *fmt, ...)
+ __attribute__ ((format (printf, 2, 3)));
+extern void xfs_alert(const struct xfs_mount *mp, const char *fmt, ...)
+ __attribute__ ((format (printf, 2, 3)));
+extern void xfs_alert_tag(const struct xfs_mount *mp, int tag,
+ const char *fmt, ...)
+ __attribute__ ((format (printf, 3, 4)));
+extern void xfs_crit(const struct xfs_mount *mp, const char *fmt, ...)
+ __attribute__ ((format (printf, 2, 3)));
+extern void xfs_err(const struct xfs_mount *mp, const char *fmt, ...)
+ __attribute__ ((format (printf, 2, 3)));
+extern void xfs_warn(const struct xfs_mount *mp, const char *fmt, ...)
+ __attribute__ ((format (printf, 2, 3)));
+extern void xfs_notice(const struct xfs_mount *mp, const char *fmt, ...)
+ __attribute__ ((format (printf, 2, 3)));
+extern void xfs_info(const struct xfs_mount *mp, const char *fmt, ...)
+ __attribute__ ((format (printf, 2, 3)));
+
+#ifdef DEBUG
+extern void xfs_debug(const struct xfs_mount *mp, const char *fmt, ...)
+ __attribute__ ((format (printf, 2, 3)));
+#else
+static inline void xfs_debug(const struct xfs_mount *mp, const char *fmt, ...)
+{
+}
+#endif
+
+extern void assfail(char *expr, char *f, int l);
+
+extern void xfs_hex_dump(void *p, int length);
+
+#endif /* __XFS_MESSAGE_H */
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index 9731898..b38e58d 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -173,6 +173,15 @@ xfs_parseargs(
__uint8_t iosizelog = 0;
/*
+ * set up the mount name first so all the errors will refer to the
+ * correct device.
+ */
+ mp->m_fsname = kstrndup(sb->s_id, MAXNAMELEN, GFP_KERNEL);
+ if (!mp->m_fsname)
+ return ENOMEM;
+ mp->m_fsname_len = strlen(mp->m_fsname) + 1;
+
+ /*
* Copy binary VFS mount flags we are interested in.
*/
if (sb->s_flags & MS_RDONLY)
@@ -189,6 +198,7 @@ xfs_parseargs(
mp->m_flags |= XFS_MOUNT_BARRIER;
mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE;
mp->m_flags |= XFS_MOUNT_SMALL_INUMS;
+ mp->m_flags |= XFS_MOUNT_DELAYLOG;
/*
* These can be overridden by the mount option parsing.
@@ -207,24 +217,21 @@ xfs_parseargs(
if (!strcmp(this_char, MNTOPT_LOGBUFS)) {
if (!value || !*value) {
- cmn_err(CE_WARN,
- "XFS: %s option requires an argument",
+ xfs_warn(mp, "%s option requires an argument",
this_char);
return EINVAL;
}
mp->m_logbufs = simple_strtoul(value, &eov, 10);
} else if (!strcmp(this_char, MNTOPT_LOGBSIZE)) {
if (!value || !*value) {
- cmn_err(CE_WARN,
- "XFS: %s option requires an argument",
+ xfs_warn(mp, "%s option requires an argument",
this_char);
return EINVAL;
}
mp->m_logbsize = suffix_strtoul(value, &eov, 10);
} else if (!strcmp(this_char, MNTOPT_LOGDEV)) {
if (!value || !*value) {
- cmn_err(CE_WARN,
- "XFS: %s option requires an argument",
+ xfs_warn(mp, "%s option requires an argument",
this_char);
return EINVAL;
}
@@ -232,14 +239,12 @@ xfs_parseargs(
if (!mp->m_logname)
return ENOMEM;
} else if (!strcmp(this_char, MNTOPT_MTPT)) {
- cmn_err(CE_WARN,
- "XFS: %s option not allowed on this system",
+ xfs_warn(mp, "%s option not allowed on this system",
this_char);
return EINVAL;
} else if (!strcmp(this_char, MNTOPT_RTDEV)) {
if (!value || !*value) {
- cmn_err(CE_WARN,
- "XFS: %s option requires an argument",
+ xfs_warn(mp, "%s option requires an argument",
this_char);
return EINVAL;
}
@@ -248,8 +253,7 @@ xfs_parseargs(
return ENOMEM;
} else if (!strcmp(this_char, MNTOPT_BIOSIZE)) {
if (!value || !*value) {
- cmn_err(CE_WARN,
- "XFS: %s option requires an argument",
+ xfs_warn(mp, "%s option requires an argument",
this_char);
return EINVAL;
}
@@ -257,8 +261,7 @@ xfs_parseargs(
iosizelog = ffs(iosize) - 1;
} else if (!strcmp(this_char, MNTOPT_ALLOCSIZE)) {
if (!value || !*value) {
- cmn_err(CE_WARN,
- "XFS: %s option requires an argument",
+ xfs_warn(mp, "%s option requires an argument",
this_char);
return EINVAL;
}
@@ -280,16 +283,14 @@ xfs_parseargs(
mp->m_flags |= XFS_MOUNT_SWALLOC;
} else if (!strcmp(this_char, MNTOPT_SUNIT)) {
if (!value || !*value) {
- cmn_err(CE_WARN,
- "XFS: %s option requires an argument",
+ xfs_warn(mp, "%s option requires an argument",
this_char);
return EINVAL;
}
dsunit = simple_strtoul(value, &eov, 10);
} else if (!strcmp(this_char, MNTOPT_SWIDTH)) {
if (!value || !*value) {
- cmn_err(CE_WARN,
- "XFS: %s option requires an argument",
+ xfs_warn(mp, "%s option requires an argument",
this_char);
return EINVAL;
}
@@ -297,8 +298,7 @@ xfs_parseargs(
} else if (!strcmp(this_char, MNTOPT_64BITINODE)) {
mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS;
#if !XFS_BIG_INUMS
- cmn_err(CE_WARN,
- "XFS: %s option not allowed on this system",
+ xfs_warn(mp, "%s option not allowed on this system",
this_char);
return EINVAL;
#endif
@@ -356,20 +356,19 @@ xfs_parseargs(
} else if (!strcmp(this_char, MNTOPT_NODELAYLOG)) {
mp->m_flags &= ~XFS_MOUNT_DELAYLOG;
} else if (!strcmp(this_char, "ihashsize")) {
- cmn_err(CE_WARN,
- "XFS: ihashsize no longer used, option is deprecated.");
+ xfs_warn(mp,
+ "ihashsize no longer used, option is deprecated.");
} else if (!strcmp(this_char, "osyncisdsync")) {
- cmn_err(CE_WARN,
- "XFS: osyncisdsync has no effect, option is deprecated.");
+ xfs_warn(mp,
+ "osyncisdsync has no effect, option is deprecated.");
} else if (!strcmp(this_char, "osyncisosync")) {
- cmn_err(CE_WARN,
- "XFS: osyncisosync has no effect, option is deprecated.");
+ xfs_warn(mp,
+ "osyncisosync has no effect, option is deprecated.");
} else if (!strcmp(this_char, "irixsgid")) {
- cmn_err(CE_WARN,
- "XFS: irixsgid is now a sysctl(2) variable, option is deprecated.");
+ xfs_warn(mp,
+ "irixsgid is now a sysctl(2) variable, option is deprecated.");
} else {
- cmn_err(CE_WARN,
- "XFS: unknown mount option [%s].", this_char);
+ xfs_warn(mp, "unknown mount option [%s].", this_char);
return EINVAL;
}
}
@@ -379,40 +378,37 @@ xfs_parseargs(
*/
if ((mp->m_flags & XFS_MOUNT_NORECOVERY) &&
!(mp->m_flags & XFS_MOUNT_RDONLY)) {
- cmn_err(CE_WARN, "XFS: no-recovery mounts must be read-only.");
+ xfs_warn(mp, "no-recovery mounts must be read-only.");
return EINVAL;
}
if ((mp->m_flags & XFS_MOUNT_NOALIGN) && (dsunit || dswidth)) {
- cmn_err(CE_WARN,
- "XFS: sunit and swidth options incompatible with the noalign option");
+ xfs_warn(mp,
+ "sunit and swidth options incompatible with the noalign option");
return EINVAL;
}
#ifndef CONFIG_XFS_QUOTA
if (XFS_IS_QUOTA_RUNNING(mp)) {
- cmn_err(CE_WARN,
- "XFS: quota support not available in this kernel.");
+ xfs_warn(mp, "quota support not available in this kernel.");
return EINVAL;
}
#endif
if ((mp->m_qflags & (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE)) &&
(mp->m_qflags & (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE))) {
- cmn_err(CE_WARN,
- "XFS: cannot mount with both project and group quota");
+ xfs_warn(mp, "cannot mount with both project and group quota");
return EINVAL;
}
if ((dsunit && !dswidth) || (!dsunit && dswidth)) {
- cmn_err(CE_WARN,
- "XFS: sunit and swidth must be specified together");
+ xfs_warn(mp, "sunit and swidth must be specified together");
return EINVAL;
}
if (dsunit && (dswidth % dsunit != 0)) {
- cmn_err(CE_WARN,
- "XFS: stripe width (%d) must be a multiple of the stripe unit (%d)",
+ xfs_warn(mp,
+ "stripe width (%d) must be a multiple of the stripe unit (%d)",
dswidth, dsunit);
return EINVAL;
}
@@ -438,8 +434,7 @@ done:
mp->m_logbufs != 0 &&
(mp->m_logbufs < XLOG_MIN_ICLOGS ||
mp->m_logbufs > XLOG_MAX_ICLOGS)) {
- cmn_err(CE_WARN,
- "XFS: invalid logbufs value: %d [not %d-%d]",
+ xfs_warn(mp, "invalid logbufs value: %d [not %d-%d]",
mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS);
return XFS_ERROR(EINVAL);
}
@@ -448,22 +443,16 @@ done:
(mp->m_logbsize < XLOG_MIN_RECORD_BSIZE ||
mp->m_logbsize > XLOG_MAX_RECORD_BSIZE ||
!is_power_of_2(mp->m_logbsize))) {
- cmn_err(CE_WARN,
- "XFS: invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]",
+ xfs_warn(mp,
+ "invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]",
mp->m_logbsize);
return XFS_ERROR(EINVAL);
}
- mp->m_fsname = kstrndup(sb->s_id, MAXNAMELEN, GFP_KERNEL);
- if (!mp->m_fsname)
- return ENOMEM;
- mp->m_fsname_len = strlen(mp->m_fsname) + 1;
-
if (iosizelog) {
if (iosizelog > XFS_MAX_IO_LOG ||
iosizelog < XFS_MIN_IO_LOG) {
- cmn_err(CE_WARN,
- "XFS: invalid log iosize: %d [not %d-%d]",
+ xfs_warn(mp, "invalid log iosize: %d [not %d-%d]",
iosizelog, XFS_MIN_IO_LOG,
XFS_MAX_IO_LOG);
return XFS_ERROR(EINVAL);
@@ -610,7 +599,7 @@ xfs_blkdev_get(
mp);
if (IS_ERR(*bdevp)) {
error = PTR_ERR(*bdevp);
- printk("XFS: Invalid device [%s], error=%d\n", name, error);
+ xfs_warn(mp, "Invalid device [%s], error=%d\n", name, error);
}
return -error;
@@ -664,23 +653,23 @@ xfs_mountfs_check_barriers(xfs_mount_t *mp)
int error;
if (mp->m_logdev_targp != mp->m_ddev_targp) {
- xfs_fs_cmn_err(CE_NOTE, mp,
+ xfs_notice(mp,
"Disabling barriers, not supported with external log device");
mp->m_flags &= ~XFS_MOUNT_BARRIER;
return;
}
if (xfs_readonly_buftarg(mp->m_ddev_targp)) {
- xfs_fs_cmn_err(CE_NOTE, mp,
- "Disabling barriers, underlying device is readonly");
+ xfs_notice(mp,
+ "Disabling barriers, underlying device is readonly");
mp->m_flags &= ~XFS_MOUNT_BARRIER;
return;
}
error = xfs_barrier_test(mp);
if (error) {
- xfs_fs_cmn_err(CE_NOTE, mp,
- "Disabling barriers, trial barrier write failed");
+ xfs_notice(mp,
+ "Disabling barriers, trial barrier write failed");
mp->m_flags &= ~XFS_MOUNT_BARRIER;
return;
}
@@ -743,8 +732,8 @@ xfs_open_devices(
goto out_close_logdev;
if (rtdev == ddev || rtdev == logdev) {
- cmn_err(CE_WARN,
- "XFS: Cannot mount filesystem with identical rtdev and ddev/logdev.");
+ xfs_warn(mp,
+ "Cannot mount filesystem with identical rtdev and ddev/logdev.");
error = EINVAL;
goto out_close_rtdev;
}
@@ -827,75 +816,6 @@ xfs_setup_devices(
return 0;
}
-/*
- * XFS AIL push thread support
- */
-void
-xfsaild_wakeup(
- struct xfs_ail *ailp,
- xfs_lsn_t threshold_lsn)
-{
- /* only ever move the target forwards */
- if (XFS_LSN_CMP(threshold_lsn, ailp->xa_target) > 0) {
- ailp->xa_target = threshold_lsn;
- wake_up_process(ailp->xa_task);
- }
-}
-
-STATIC int
-xfsaild(
- void *data)
-{
- struct xfs_ail *ailp = data;
- xfs_lsn_t last_pushed_lsn = 0;
- long tout = 0; /* milliseconds */
-
- while (!kthread_should_stop()) {
- /*
- * for short sleeps indicating congestion, don't allow us to
- * get woken early. Otherwise all we do is bang on the AIL lock
- * without making progress.
- */
- if (tout && tout <= 20)
- __set_current_state(TASK_KILLABLE);
- else
- __set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(tout ?
- msecs_to_jiffies(tout) : MAX_SCHEDULE_TIMEOUT);
-
- /* swsusp */
- try_to_freeze();
-
- ASSERT(ailp->xa_mount->m_log);
- if (XFS_FORCED_SHUTDOWN(ailp->xa_mount))
- continue;
-
- tout = xfsaild_push(ailp, &last_pushed_lsn);
- }
-
- return 0;
-} /* xfsaild */
-
-int
-xfsaild_start(
- struct xfs_ail *ailp)
-{
- ailp->xa_target = 0;
- ailp->xa_task = kthread_run(xfsaild, ailp, "xfsaild/%s",
- ailp->xa_mount->m_fsname);
- if (IS_ERR(ailp->xa_task))
- return -PTR_ERR(ailp->xa_task);
- return 0;
-}
-
-void
-xfsaild_stop(
- struct xfs_ail *ailp)
-{
- kthread_stop(ailp->xa_task);
-}
-
-
/* Catch misguided souls that try to use this interface on XFS */
STATIC struct inode *
xfs_fs_alloc_inode(
@@ -1089,7 +1009,7 @@ xfs_fs_write_inode(
error = 0;
goto out_unlock;
}
- error = xfs_iflush(ip, 0);
+ error = xfs_iflush(ip, SYNC_TRYLOCK);
}
out_unlock:
@@ -1202,22 +1122,12 @@ xfs_fs_sync_fs(
return -error;
if (laptop_mode) {
- int prev_sync_seq = mp->m_sync_seq;
-
/*
* The disk must be active because we're syncing.
* We schedule xfssyncd now (now that the disk is
* active) instead of later (when it might not be).
*/
- wake_up_process(mp->m_sync_task);
- /*
- * We have to wait for the sync iteration to complete.
- * If we don't, the disk activity caused by the sync
- * will come after the sync is completed, and that
- * triggers another sync from laptop mode.
- */
- wait_event(mp->m_wait_single_sync_task,
- mp->m_sync_seq != prev_sync_seq);
+ flush_delayed_work_sync(&mp->m_sync_work);
}
return 0;
@@ -1345,8 +1255,8 @@ xfs_fs_remount(
* options that we can't actually change.
*/
#if 0
- printk(KERN_INFO
- "XFS: mount option \"%s\" not supported for remount\n", p);
+ xfs_info(mp,
+ "mount option \"%s\" not supported for remount\n", p);
return -EINVAL;
#else
break;
@@ -1367,8 +1277,7 @@ xfs_fs_remount(
if (mp->m_update_flags) {
error = xfs_mount_log_sb(mp, mp->m_update_flags);
if (error) {
- cmn_err(CE_WARN,
- "XFS: failed to write sb changes");
+ xfs_warn(mp, "failed to write sb changes");
return error;
}
mp->m_update_flags = 0;
@@ -1452,15 +1361,15 @@ xfs_finish_flags(
mp->m_logbsize = mp->m_sb.sb_logsunit;
} else if (mp->m_logbsize > 0 &&
mp->m_logbsize < mp->m_sb.sb_logsunit) {
- cmn_err(CE_WARN,
- "XFS: logbuf size must be greater than or equal to log stripe size");
+ xfs_warn(mp,
+ "logbuf size must be greater than or equal to log stripe size");
return XFS_ERROR(EINVAL);
}
} else {
/* Fail a mount if the logbuf is larger than 32K */
if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) {
- cmn_err(CE_WARN,
- "XFS: logbuf size for version 1 logs must be 16K or 32K");
+ xfs_warn(mp,
+ "logbuf size for version 1 logs must be 16K or 32K");
return XFS_ERROR(EINVAL);
}
}
@@ -1477,8 +1386,8 @@ xfs_finish_flags(
* prohibit r/w mounts of read-only filesystems
*/
if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !ronly) {
- cmn_err(CE_WARN,
- "XFS: cannot mount a read-only filesystem as read-write");
+ xfs_warn(mp,
+ "cannot mount a read-only filesystem as read-write");
return XFS_ERROR(EROFS);
}
@@ -1502,9 +1411,6 @@ xfs_fs_fill_super(
spin_lock_init(&mp->m_sb_lock);
mutex_init(&mp->m_growlock);
atomic_set(&mp->m_active_trans, 0);
- INIT_LIST_HEAD(&mp->m_sync_list);
- spin_lock_init(&mp->m_sync_lock);
- init_waitqueue_head(&mp->m_wait_single_sync_task);
mp->m_super = sb;
sb->s_fs_info = mp;
@@ -1551,10 +1457,14 @@ xfs_fs_fill_super(
if (error)
goto out_free_sb;
- error = xfs_mountfs(mp);
- if (error)
- goto out_filestream_unmount;
-
+ /*
+ * we must configure the block size in the superblock before we run the
+ * full mount process as the mount process can lookup and cache inodes.
+ * For the same reason we must also initialise the syncd and register
+ * the inode cache shrinker so that inodes can be reclaimed during
+ * operations like a quotacheck that iterate all inodes in the
+ * filesystem.
+ */
sb->s_magic = XFS_SB_MAGIC;
sb->s_blocksize = mp->m_sb.sb_blocksize;
sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1;
@@ -1562,6 +1472,16 @@ xfs_fs_fill_super(
sb->s_time_gran = 1;
set_posix_acl_flag(sb);
+ error = xfs_syncd_init(mp);
+ if (error)
+ goto out_filestream_unmount;
+
+ xfs_inode_shrinker_register(mp);
+
+ error = xfs_mountfs(mp);
+ if (error)
+ goto out_syncd_stop;
+
root = igrab(VFS_I(mp->m_rootip));
if (!root) {
error = ENOENT;
@@ -1577,14 +1497,11 @@ xfs_fs_fill_super(
goto fail_vnrele;
}
- error = xfs_syncd_init(mp);
- if (error)
- goto fail_vnrele;
-
- xfs_inode_shrinker_register(mp);
-
return 0;
+ out_syncd_stop:
+ xfs_inode_shrinker_unregister(mp);
+ xfs_syncd_stop(mp);
out_filestream_unmount:
xfs_filestream_unmount(mp);
out_free_sb:
@@ -1608,6 +1525,9 @@ xfs_fs_fill_super(
}
fail_unmount:
+ xfs_inode_shrinker_unregister(mp);
+ xfs_syncd_stop(mp);
+
/*
* Blow away any referenced inode in the filestreams cache.
* This can and will cause log traffic as inodes go inactive
@@ -1797,6 +1717,38 @@ xfs_destroy_zones(void)
}
STATIC int __init
+xfs_init_workqueues(void)
+{
+ /*
+ * max_active is set to 8 to give enough concurency to allow
+ * multiple work operations on each CPU to run. This allows multiple
+ * filesystems to be running sync work concurrently, and scales with
+ * the number of CPUs in the system.
+ */
+ xfs_syncd_wq = alloc_workqueue("xfssyncd", WQ_CPU_INTENSIVE, 8);
+ if (!xfs_syncd_wq)
+ goto out;
+
+ xfs_ail_wq = alloc_workqueue("xfsail", WQ_CPU_INTENSIVE, 8);
+ if (!xfs_ail_wq)
+ goto out_destroy_syncd;
+
+ return 0;
+
+out_destroy_syncd:
+ destroy_workqueue(xfs_syncd_wq);
+out:
+ return -ENOMEM;
+}
+
+STATIC void
+xfs_destroy_workqueues(void)
+{
+ destroy_workqueue(xfs_ail_wq);
+ destroy_workqueue(xfs_syncd_wq);
+}
+
+STATIC int __init
init_xfs_fs(void)
{
int error;
@@ -1811,10 +1763,14 @@ init_xfs_fs(void)
if (error)
goto out;
- error = xfs_mru_cache_init();
+ error = xfs_init_workqueues();
if (error)
goto out_destroy_zones;
+ error = xfs_mru_cache_init();
+ if (error)
+ goto out_destroy_wq;
+
error = xfs_filestream_init();
if (error)
goto out_mru_cache_uninit;
@@ -1831,6 +1787,10 @@ init_xfs_fs(void)
if (error)
goto out_cleanup_procfs;
+ error = xfs_init_workqueues();
+ if (error)
+ goto out_sysctl_unregister;
+
vfs_initquota();
error = register_filesystem(&xfs_fs_type);
@@ -1848,6 +1808,8 @@ init_xfs_fs(void)
xfs_filestream_uninit();
out_mru_cache_uninit:
xfs_mru_cache_uninit();
+ out_destroy_wq:
+ xfs_destroy_workqueues();
out_destroy_zones:
xfs_destroy_zones();
out:
@@ -1864,6 +1826,7 @@ exit_xfs_fs(void)
xfs_buf_terminate();
xfs_filestream_uninit();
xfs_mru_cache_uninit();
+ xfs_destroy_workqueues();
xfs_destroy_zones();
}
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c
index e22f005..e4f9c1b 100644
--- a/fs/xfs/linux-2.6/xfs_sync.c
+++ b/fs/xfs/linux-2.6/xfs_sync.c
@@ -22,6 +22,7 @@
#include "xfs_log.h"
#include "xfs_inum.h"
#include "xfs_trans.h"
+#include "xfs_trans_priv.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_mount.h"
@@ -39,6 +40,8 @@
#include <linux/kthread.h>
#include <linux/freezer.h>
+struct workqueue_struct *xfs_syncd_wq; /* sync workqueue */
+
/*
* The inode lookup is done in batches to keep the amount of lock traffic and
* radix tree lookups to a minimum. The batch size is a trade off between
@@ -401,7 +404,7 @@ xfs_quiesce_fs(
/*
* Second stage of a quiesce. The data is already synced, now we have to take
* care of the metadata. New transactions are already blocked, so we need to
- * wait for any remaining transactions to drain out before proceding.
+ * wait for any remaining transactions to drain out before proceeding.
*/
void
xfs_quiesce_attr(
@@ -425,69 +428,18 @@ xfs_quiesce_attr(
/* Push the superblock and write an unmount record */
error = xfs_log_sbcount(mp, 1);
if (error)
- xfs_fs_cmn_err(CE_WARN, mp,
- "xfs_attr_quiesce: failed to log sb changes. "
+ xfs_warn(mp, "xfs_attr_quiesce: failed to log sb changes. "
"Frozen image may not be consistent.");
xfs_log_unmount_write(mp);
xfs_unmountfs_writesb(mp);
}
-/*
- * Enqueue a work item to be picked up by the vfs xfssyncd thread.
- * Doing this has two advantages:
- * - It saves on stack space, which is tight in certain situations
- * - It can be used (with care) as a mechanism to avoid deadlocks.
- * Flushing while allocating in a full filesystem requires both.
- */
-STATIC void
-xfs_syncd_queue_work(
- struct xfs_mount *mp,
- void *data,
- void (*syncer)(struct xfs_mount *, void *),
- struct completion *completion)
-{
- struct xfs_sync_work *work;
-
- work = kmem_alloc(sizeof(struct xfs_sync_work), KM_SLEEP);
- INIT_LIST_HEAD(&work->w_list);
- work->w_syncer = syncer;
- work->w_data = data;
- work->w_mount = mp;
- work->w_completion = completion;
- spin_lock(&mp->m_sync_lock);
- list_add_tail(&work->w_list, &mp->m_sync_list);
- spin_unlock(&mp->m_sync_lock);
- wake_up_process(mp->m_sync_task);
-}
-
-/*
- * Flush delayed allocate data, attempting to free up reserved space
- * from existing allocations. At this point a new allocation attempt
- * has failed with ENOSPC and we are in the process of scratching our
- * heads, looking about for more room...
- */
-STATIC void
-xfs_flush_inodes_work(
- struct xfs_mount *mp,
- void *arg)
-{
- struct inode *inode = arg;
- xfs_sync_data(mp, SYNC_TRYLOCK);
- xfs_sync_data(mp, SYNC_TRYLOCK | SYNC_WAIT);
- iput(inode);
-}
-
-void
-xfs_flush_inodes(
- xfs_inode_t *ip)
+static void
+xfs_syncd_queue_sync(
+ struct xfs_mount *mp)
{
- struct inode *inode = VFS_I(ip);
- DECLARE_COMPLETION_ONSTACK(completion);
-
- igrab(inode);
- xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_inodes_work, &completion);
- wait_for_completion(&completion);
- xfs_log_force(ip->i_mount, XFS_LOG_SYNC);
+ queue_delayed_work(xfs_syncd_wq, &mp->m_sync_work,
+ msecs_to_jiffies(xfs_syncd_centisecs * 10));
}
/*
@@ -497,9 +449,10 @@ xfs_flush_inodes(
*/
STATIC void
xfs_sync_worker(
- struct xfs_mount *mp,
- void *unused)
+ struct work_struct *work)
{
+ struct xfs_mount *mp = container_of(to_delayed_work(work),
+ struct xfs_mount, m_sync_work);
int error;
if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
@@ -509,73 +462,106 @@ xfs_sync_worker(
error = xfs_fs_log_dummy(mp);
else
xfs_log_force(mp, 0);
- xfs_reclaim_inodes(mp, 0);
error = xfs_qm_sync(mp, SYNC_TRYLOCK);
+
+ /* start pushing all the metadata that is currently dirty */
+ xfs_ail_push_all(mp->m_ail);
}
- mp->m_sync_seq++;
- wake_up(&mp->m_wait_single_sync_task);
+
+ /* queue us up again */
+ xfs_syncd_queue_sync(mp);
}
-STATIC int
-xfssyncd(
- void *arg)
+/*
+ * Queue a new inode reclaim pass if there are reclaimable inodes and there
+ * isn't a reclaim pass already in progress. By default it runs every 5s based
+ * on the xfs syncd work default of 30s. Perhaps this should have it's own
+ * tunable, but that can be done if this method proves to be ineffective or too
+ * aggressive.
+ */
+static void
+xfs_syncd_queue_reclaim(
+ struct xfs_mount *mp)
{
- struct xfs_mount *mp = arg;
- long timeleft;
- xfs_sync_work_t *work, *n;
- LIST_HEAD (tmp);
-
- set_freezable();
- timeleft = xfs_syncd_centisecs * msecs_to_jiffies(10);
- for (;;) {
- if (list_empty(&mp->m_sync_list))
- timeleft = schedule_timeout_interruptible(timeleft);
- /* swsusp */
- try_to_freeze();
- if (kthread_should_stop() && list_empty(&mp->m_sync_list))
- break;
- spin_lock(&mp->m_sync_lock);
- /*
- * We can get woken by laptop mode, to do a sync -
- * that's the (only!) case where the list would be
- * empty with time remaining.
- */
- if (!timeleft || list_empty(&mp->m_sync_list)) {
- if (!timeleft)
- timeleft = xfs_syncd_centisecs *
- msecs_to_jiffies(10);
- INIT_LIST_HEAD(&mp->m_sync_work.w_list);
- list_add_tail(&mp->m_sync_work.w_list,
- &mp->m_sync_list);
- }
- list_splice_init(&mp->m_sync_list, &tmp);
- spin_unlock(&mp->m_sync_lock);
+ /*
+ * We can have inodes enter reclaim after we've shut down the syncd
+ * workqueue during unmount, so don't allow reclaim work to be queued
+ * during unmount.
+ */
+ if (!(mp->m_super->s_flags & MS_ACTIVE))
+ return;
- list_for_each_entry_safe(work, n, &tmp, w_list) {
- (*work->w_syncer)(mp, work->w_data);
- list_del(&work->w_list);
- if (work == &mp->m_sync_work)
- continue;
- if (work->w_completion)
- complete(work->w_completion);
- kmem_free(work);
- }
+ rcu_read_lock();
+ if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
+ queue_delayed_work(xfs_syncd_wq, &mp->m_reclaim_work,
+ msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10));
}
+ rcu_read_unlock();
+}
- return 0;
+/*
+ * This is a fast pass over the inode cache to try to get reclaim moving on as
+ * many inodes as possible in a short period of time. It kicks itself every few
+ * seconds, as well as being kicked by the inode cache shrinker when memory
+ * goes low. It scans as quickly as possible avoiding locked inodes or those
+ * already being flushed, and once done schedules a future pass.
+ */
+STATIC void
+xfs_reclaim_worker(
+ struct work_struct *work)
+{
+ struct xfs_mount *mp = container_of(to_delayed_work(work),
+ struct xfs_mount, m_reclaim_work);
+
+ xfs_reclaim_inodes(mp, SYNC_TRYLOCK);
+ xfs_syncd_queue_reclaim(mp);
+}
+
+/*
+ * Flush delayed allocate data, attempting to free up reserved space
+ * from existing allocations. At this point a new allocation attempt
+ * has failed with ENOSPC and we are in the process of scratching our
+ * heads, looking about for more room.
+ *
+ * Queue a new data flush if there isn't one already in progress and
+ * wait for completion of the flush. This means that we only ever have one
+ * inode flush in progress no matter how many ENOSPC events are occurring and
+ * so will prevent the system from bogging down due to every concurrent
+ * ENOSPC event scanning all the active inodes in the system for writeback.
+ */
+void
+xfs_flush_inodes(
+ struct xfs_inode *ip)
+{
+ struct xfs_mount *mp = ip->i_mount;
+
+ queue_work(xfs_syncd_wq, &mp->m_flush_work);
+ flush_work_sync(&mp->m_flush_work);
+}
+
+STATIC void
+xfs_flush_worker(
+ struct work_struct *work)
+{
+ struct xfs_mount *mp = container_of(work,
+ struct xfs_mount, m_flush_work);
+
+ xfs_sync_data(mp, SYNC_TRYLOCK);
+ xfs_sync_data(mp, SYNC_TRYLOCK | SYNC_WAIT);
}
int
xfs_syncd_init(
struct xfs_mount *mp)
{
- mp->m_sync_work.w_syncer = xfs_sync_worker;
- mp->m_sync_work.w_mount = mp;
- mp->m_sync_work.w_completion = NULL;
- mp->m_sync_task = kthread_run(xfssyncd, mp, "xfssyncd/%s", mp->m_fsname);
- if (IS_ERR(mp->m_sync_task))
- return -PTR_ERR(mp->m_sync_task);
+ INIT_WORK(&mp->m_flush_work, xfs_flush_worker);
+ INIT_DELAYED_WORK(&mp->m_sync_work, xfs_sync_worker);
+ INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
+
+ xfs_syncd_queue_sync(mp);
+ xfs_syncd_queue_reclaim(mp);
+
return 0;
}
@@ -583,7 +569,9 @@ void
xfs_syncd_stop(
struct xfs_mount *mp)
{
- kthread_stop(mp->m_sync_task);
+ cancel_delayed_work_sync(&mp->m_sync_work);
+ cancel_delayed_work_sync(&mp->m_reclaim_work);
+ cancel_work_sync(&mp->m_flush_work);
}
void
@@ -602,6 +590,10 @@ __xfs_inode_set_reclaim_tag(
XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
XFS_ICI_RECLAIM_TAG);
spin_unlock(&ip->i_mount->m_perag_lock);
+
+ /* schedule periodic background inode reclaim */
+ xfs_syncd_queue_reclaim(ip->i_mount);
+
trace_xfs_perag_set_reclaim(ip->i_mount, pag->pag_agno,
-1, _RET_IP_);
}
@@ -762,8 +754,10 @@ xfs_reclaim_inode(
struct xfs_perag *pag,
int sync_mode)
{
- int error = 0;
+ int error;
+restart:
+ error = 0;
xfs_ilock(ip, XFS_ILOCK_EXCL);
if (!xfs_iflock_nowait(ip)) {
if (!(sync_mode & SYNC_WAIT))
@@ -789,9 +783,31 @@ xfs_reclaim_inode(
if (xfs_inode_clean(ip))
goto reclaim;
- /* Now we have an inode that needs flushing */
- error = xfs_iflush(ip, sync_mode);
+ /*
+ * Now we have an inode that needs flushing.
+ *
+ * We do a nonblocking flush here even if we are doing a SYNC_WAIT
+ * reclaim as we can deadlock with inode cluster removal.
+ * xfs_ifree_cluster() can lock the inode buffer before it locks the
+ * ip->i_lock, and we are doing the exact opposite here. As a result,
+ * doing a blocking xfs_itobp() to get the cluster buffer will result
+ * in an ABBA deadlock with xfs_ifree_cluster().
+ *
+ * As xfs_ifree_cluser() must gather all inodes that are active in the
+ * cache to mark them stale, if we hit this case we don't actually want
+ * to do IO here - we want the inode marked stale so we can simply
+ * reclaim it. Hence if we get an EAGAIN error on a SYNC_WAIT flush,
+ * just unlock the inode, back off and try again. Hopefully the next
+ * pass through will see the stale flag set on the inode.
+ */
+ error = xfs_iflush(ip, SYNC_TRYLOCK | sync_mode);
if (sync_mode & SYNC_WAIT) {
+ if (error == EAGAIN) {
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ /* backoff longer than in xfs_ifree_cluster */
+ delay(2);
+ goto restart;
+ }
xfs_iflock(ip);
goto reclaim;
}
@@ -806,7 +822,7 @@ xfs_reclaim_inode(
* pass on the error.
*/
if (error && error != EAGAIN && !XFS_FORCED_SHUTDOWN(ip->i_mount)) {
- xfs_fs_cmn_err(CE_WARN, ip->i_mount,
+ xfs_warn(ip->i_mount,
"inode 0x%llx background reclaim flush failed with %d",
(long long)ip->i_ino, error);
}
@@ -994,7 +1010,13 @@ xfs_reclaim_inodes(
}
/*
- * Shrinker infrastructure.
+ * Inode cache shrinker.
+ *
+ * When called we make sure that there is a background (fast) inode reclaim in
+ * progress, while we will throttle the speed of reclaim via doiing synchronous
+ * reclaim of inodes. That means if we come across dirty inodes, we wait for
+ * them to be cleaned, which we hope will not be very long due to the
+ * background walker having already kicked the IO off on those dirty inodes.
*/
static int
xfs_reclaim_inode_shrink(
@@ -1009,10 +1031,15 @@ xfs_reclaim_inode_shrink(
mp = container_of(shrink, struct xfs_mount, m_inode_shrink);
if (nr_to_scan) {
+ /* kick background reclaimer and push the AIL */
+ xfs_syncd_queue_reclaim(mp);
+ xfs_ail_push_all(mp->m_ail);
+
if (!(gfp_mask & __GFP_FS))
return -1;
- xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK, &nr_to_scan);
+ xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK | SYNC_WAIT,
+ &nr_to_scan);
/* terminate if we don't exhaust the scan */
if (nr_to_scan > 0)
return -1;
diff --git a/fs/xfs/linux-2.6/xfs_sync.h b/fs/xfs/linux-2.6/xfs_sync.h
index 32ba662..e3a6ad2 100644
--- a/fs/xfs/linux-2.6/xfs_sync.h
+++ b/fs/xfs/linux-2.6/xfs_sync.h
@@ -32,6 +32,8 @@ typedef struct xfs_sync_work {
#define SYNC_WAIT 0x0001 /* wait for i/o to complete */
#define SYNC_TRYLOCK 0x0002 /* only try to lock inodes */
+extern struct workqueue_struct *xfs_syncd_wq; /* sync workqueue */
+
int xfs_syncd_init(struct xfs_mount *mp);
void xfs_syncd_stop(struct xfs_mount *mp);
diff --git a/fs/xfs/linux-2.6/xfs_sysctl.c b/fs/xfs/linux-2.6/xfs_sysctl.c
index ee3cee0..ee2d2ad 100644
--- a/fs/xfs/linux-2.6/xfs_sysctl.c
+++ b/fs/xfs/linux-2.6/xfs_sysctl.c
@@ -37,7 +37,7 @@ xfs_stats_clear_proc_handler(
ret = proc_dointvec_minmax(ctl, write, buffer, lenp, ppos);
if (!ret && write && *valp) {
- printk("XFS Clearing xfsstats\n");
+ xfs_notice(NULL, "Clearing xfsstats");
for_each_possible_cpu(c) {
preempt_disable();
/* save vn_active, it's a universal truth! */
diff --git a/fs/xfs/quota/xfs_dquot.c b/fs/xfs/quota/xfs_dquot.c
index d22aa31..6fa2146 100644
--- a/fs/xfs/quota/xfs_dquot.c
+++ b/fs/xfs/quota/xfs_dquot.c
@@ -544,9 +544,10 @@ xfs_qm_dqtobp(
/*
* A simple sanity check in case we got a corrupted dquot...
*/
- if (xfs_qm_dqcheck(ddq, id, dqp->dq_flags & XFS_DQ_ALLTYPES,
+ error = xfs_qm_dqcheck(mp, ddq, id, dqp->dq_flags & XFS_DQ_ALLTYPES,
flags & (XFS_QMOPT_DQREPAIR|XFS_QMOPT_DOWARN),
- "dqtobp")) {
+ "dqtobp");
+ if (error) {
if (!(flags & XFS_QMOPT_DQREPAIR)) {
xfs_trans_brelse(tp, bp);
return XFS_ERROR(EIO);
@@ -599,7 +600,7 @@ xfs_qm_dqread(
/*
* Reservation counters are defined as reservation plus current usage
- * to avoid having to add everytime.
+ * to avoid having to add every time.
*/
dqp->q_res_bcount = be64_to_cpu(ddqp->d_bcount);
dqp->q_res_icount = be64_to_cpu(ddqp->d_icount);
@@ -827,7 +828,7 @@ xfs_qm_dqget(
if (xfs_do_dqerror) {
if ((xfs_dqerror_target == mp->m_ddev_targp) &&
(xfs_dqreq_num++ % xfs_dqerror_mod) == 0) {
- cmn_err(CE_DEBUG, "Returning error in dqget");
+ xfs_debug(mp, "Returning error in dqget");
return (EIO);
}
}
@@ -1207,8 +1208,9 @@ xfs_qm_dqflush(
/*
* A simple sanity check in case we got a corrupted dquot..
*/
- if (xfs_qm_dqcheck(&dqp->q_core, be32_to_cpu(ddqp->d_id), 0,
- XFS_QMOPT_DOWARN, "dqflush (incore copy)")) {
+ error = xfs_qm_dqcheck(mp, &dqp->q_core, be32_to_cpu(ddqp->d_id), 0,
+ XFS_QMOPT_DOWARN, "dqflush (incore copy)");
+ if (error) {
xfs_buf_relse(bp);
xfs_dqfunlock(dqp);
xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
@@ -1391,8 +1393,8 @@ xfs_qm_dqpurge(
*/
error = xfs_qm_dqflush(dqp, SYNC_WAIT);
if (error)
- xfs_fs_cmn_err(CE_WARN, mp,
- "xfs_qm_dqpurge: dquot %p flush failed", dqp);
+ xfs_warn(mp, "%s: dquot %p flush failed",
+ __func__, dqp);
xfs_dqflock(dqp);
}
ASSERT(atomic_read(&dqp->q_pincount) == 0);
@@ -1425,36 +1427,38 @@ xfs_qm_dqpurge(
void
xfs_qm_dqprint(xfs_dquot_t *dqp)
{
- cmn_err(CE_DEBUG, "-----------KERNEL DQUOT----------------");
- cmn_err(CE_DEBUG, "---- dquotID = %d",
+ struct xfs_mount *mp = dqp->q_mount;
+
+ xfs_debug(mp, "-----------KERNEL DQUOT----------------");
+ xfs_debug(mp, "---- dquotID = %d",
(int)be32_to_cpu(dqp->q_core.d_id));
- cmn_err(CE_DEBUG, "---- type = %s", DQFLAGTO_TYPESTR(dqp));
- cmn_err(CE_DEBUG, "---- fs = 0x%p", dqp->q_mount);
- cmn_err(CE_DEBUG, "---- blkno = 0x%x", (int) dqp->q_blkno);
- cmn_err(CE_DEBUG, "---- boffset = 0x%x", (int) dqp->q_bufoffset);
- cmn_err(CE_DEBUG, "---- blkhlimit = %Lu (0x%x)",
+ xfs_debug(mp, "---- type = %s", DQFLAGTO_TYPESTR(dqp));
+ xfs_debug(mp, "---- fs = 0x%p", dqp->q_mount);
+ xfs_debug(mp, "---- blkno = 0x%x", (int) dqp->q_blkno);
+ xfs_debug(mp, "---- boffset = 0x%x", (int) dqp->q_bufoffset);
+ xfs_debug(mp, "---- blkhlimit = %Lu (0x%x)",
be64_to_cpu(dqp->q_core.d_blk_hardlimit),
(int)be64_to_cpu(dqp->q_core.d_blk_hardlimit));
- cmn_err(CE_DEBUG, "---- blkslimit = %Lu (0x%x)",
+ xfs_debug(mp, "---- blkslimit = %Lu (0x%x)",
be64_to_cpu(dqp->q_core.d_blk_softlimit),
(int)be64_to_cpu(dqp->q_core.d_blk_softlimit));
- cmn_err(CE_DEBUG, "---- inohlimit = %Lu (0x%x)",
+ xfs_debug(mp, "---- inohlimit = %Lu (0x%x)",
be64_to_cpu(dqp->q_core.d_ino_hardlimit),
(int)be64_to_cpu(dqp->q_core.d_ino_hardlimit));
- cmn_err(CE_DEBUG, "---- inoslimit = %Lu (0x%x)",
+ xfs_debug(mp, "---- inoslimit = %Lu (0x%x)",
be64_to_cpu(dqp->q_core.d_ino_softlimit),
(int)be64_to_cpu(dqp->q_core.d_ino_softlimit));
- cmn_err(CE_DEBUG, "---- bcount = %Lu (0x%x)",
+ xfs_debug(mp, "---- bcount = %Lu (0x%x)",
be64_to_cpu(dqp->q_core.d_bcount),
(int)be64_to_cpu(dqp->q_core.d_bcount));
- cmn_err(CE_DEBUG, "---- icount = %Lu (0x%x)",
+ xfs_debug(mp, "---- icount = %Lu (0x%x)",
be64_to_cpu(dqp->q_core.d_icount),
(int)be64_to_cpu(dqp->q_core.d_icount));
- cmn_err(CE_DEBUG, "---- btimer = %d",
+ xfs_debug(mp, "---- btimer = %d",
(int)be32_to_cpu(dqp->q_core.d_btimer));
- cmn_err(CE_DEBUG, "---- itimer = %d",
+ xfs_debug(mp, "---- itimer = %d",
(int)be32_to_cpu(dqp->q_core.d_itimer));
- cmn_err(CE_DEBUG, "---------------------------");
+ xfs_debug(mp, "---------------------------");
}
#endif
diff --git a/fs/xfs/quota/xfs_dquot_item.c b/fs/xfs/quota/xfs_dquot_item.c
index 2a1f3dc..9e0e2fa 100644
--- a/fs/xfs/quota/xfs_dquot_item.c
+++ b/fs/xfs/quota/xfs_dquot_item.c
@@ -136,9 +136,8 @@ xfs_qm_dquot_logitem_push(
*/
error = xfs_qm_dqflush(dqp, 0);
if (error)
- xfs_fs_cmn_err(CE_WARN, dqp->q_mount,
- "xfs_qm_dquot_logitem_push: push error %d on dqp %p",
- error, dqp);
+ xfs_warn(dqp->q_mount, "%s: push error %d on dqp %p",
+ __func__, error, dqp);
xfs_dqunlock(dqp);
}
diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c
index 206a281..69228aa 100644
--- a/fs/xfs/quota/xfs_qm.c
+++ b/fs/xfs/quota/xfs_qm.c
@@ -80,7 +80,7 @@ xfs_qm_dquot_list_print(
int i = 0;
list_for_each_entry(dqp, &mp->m_quotainfo->qi_dqlist_lock, qi_mplist) {
- cmn_err(CE_DEBUG, " %d. \"%d (%s)\" "
+ xfs_debug(mp, " %d. \"%d (%s)\" "
"bcnt = %lld, icnt = %lld, refs = %d",
i++, be32_to_cpu(dqp->q_core.d_id),
DQFLAGTO_TYPESTR(dqp),
@@ -205,7 +205,7 @@ xfs_qm_destroy(
list_for_each_entry_safe(dqp, n, &xqm->qm_dqfrlist, q_freelist) {
xfs_dqlock(dqp);
#ifdef QUOTADEBUG
- cmn_err(CE_DEBUG, "FREELIST destroy 0x%p", dqp);
+ xfs_debug(dqp->q_mount, "FREELIST destroy 0x%p", dqp);
#endif
list_del_init(&dqp->q_freelist);
xfs_Gqm->qm_dqfrlist_cnt--;
@@ -341,9 +341,7 @@ xfs_qm_mount_quotas(
* quotas immediately.
*/
if (mp->m_sb.sb_rextents) {
- cmn_err(CE_NOTE,
- "Cannot turn on quotas for realtime filesystem %s",
- mp->m_fsname);
+ xfs_notice(mp, "Cannot turn on quotas for realtime filesystem");
mp->m_qflags = 0;
goto write_changes;
}
@@ -402,14 +400,13 @@ xfs_qm_mount_quotas(
* off, but the on disk superblock doesn't know that !
*/
ASSERT(!(XFS_IS_QUOTA_RUNNING(mp)));
- xfs_fs_cmn_err(CE_ALERT, mp,
- "XFS mount_quotas: Superblock update failed!");
+ xfs_alert(mp, "%s: Superblock update failed!",
+ __func__);
}
}
if (error) {
- xfs_fs_cmn_err(CE_WARN, mp,
- "Failed to initialize disk quotas.");
+ xfs_warn(mp, "Failed to initialize disk quotas.");
return;
}
@@ -464,12 +461,10 @@ xfs_qm_dqflush_all(
struct xfs_quotainfo *q = mp->m_quotainfo;
int recl;
struct xfs_dquot *dqp;
- int niters;
int error;
if (!q)
return 0;
- niters = 0;
again:
mutex_lock(&q->qi_dqlist_lock);
list_for_each_entry(dqp, &q->qi_dqlist, q_mplist) {
@@ -1230,13 +1225,6 @@ xfs_qm_qino_alloc(
}
/*
- * Keep an extra reference to this quota inode. This inode is
- * locked exclusively and joined to the transaction already.
- */
- ASSERT(xfs_isilocked(*ip, XFS_ILOCK_EXCL));
- IHOLD(*ip);
-
- /*
* Make the changes in the superblock, and log those too.
* sbfields arg may contain fields other than *QUOTINO;
* VERSIONNUM for example.
@@ -1264,7 +1252,7 @@ xfs_qm_qino_alloc(
xfs_mod_sb(tp, sbfields);
if ((error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES))) {
- xfs_fs_cmn_err(CE_ALERT, mp, "XFS qino_alloc failed!");
+ xfs_alert(mp, "%s failed (error %d)!", __func__, error);
return error;
}
return 0;
@@ -1299,7 +1287,7 @@ xfs_qm_reset_dqcounts(
* output any warnings because it's perfectly possible to
* find uninitialised dquot blks. See comment in xfs_qm_dqcheck.
*/
- (void) xfs_qm_dqcheck(ddq, id+j, type, XFS_QMOPT_DQREPAIR,
+ (void) xfs_qm_dqcheck(mp, ddq, id+j, type, XFS_QMOPT_DQREPAIR,
"xfs_quotacheck");
ddq->d_bcount = 0;
ddq->d_icount = 0;
@@ -1324,14 +1312,9 @@ xfs_qm_dqiter_bufs(
{
xfs_buf_t *bp;
int error;
- int notcommitted;
- int incr;
int type;
ASSERT(blkcnt > 0);
- notcommitted = 0;
- incr = (blkcnt > XFS_QM_MAX_DQCLUSTER_LOGSZ) ?
- XFS_QM_MAX_DQCLUSTER_LOGSZ : blkcnt;
type = flags & XFS_QMOPT_UQUOTA ? XFS_DQ_USER :
(flags & XFS_QMOPT_PQUOTA ? XFS_DQ_PROJ : XFS_DQ_GROUP);
error = 0;
@@ -1676,7 +1659,7 @@ xfs_qm_quotacheck(
*/
ASSERT(list_empty(&mp->m_quotainfo->qi_dqlist));
- cmn_err(CE_NOTE, "XFS quotacheck %s: Please wait.", mp->m_fsname);
+ xfs_notice(mp, "Quotacheck needed: Please wait.");
/*
* First we go thru all the dquots on disk, USR and GRP/PRJ, and reset
@@ -1754,9 +1737,9 @@ xfs_qm_quotacheck(
error_return:
if (error) {
- cmn_err(CE_WARN, "XFS quotacheck %s: Unsuccessful (Error %d): "
- "Disabling quotas.",
- mp->m_fsname, error);
+ xfs_warn(mp,
+ "Quotacheck: Unsuccessful (Error %d): Disabling quotas.",
+ error);
/*
* We must turn off quotas.
*/
@@ -1764,12 +1747,11 @@ xfs_qm_quotacheck(
ASSERT(xfs_Gqm != NULL);
xfs_qm_destroy_quotainfo(mp);
if (xfs_mount_reset_sbqflags(mp)) {
- cmn_err(CE_WARN, "XFS quotacheck %s: "
- "Failed to reset quota flags.", mp->m_fsname);
+ xfs_warn(mp,
+ "Quotacheck: Failed to reset quota flags.");
}
- } else {
- cmn_err(CE_NOTE, "XFS quotacheck %s: Done.", mp->m_fsname);
- }
+ } else
+ xfs_notice(mp, "Quotacheck: Done.");
return (error);
}
@@ -1937,8 +1919,8 @@ again:
*/
error = xfs_qm_dqflush(dqp, 0);
if (error) {
- xfs_fs_cmn_err(CE_WARN, mp,
- "xfs_qm_dqreclaim: dquot %p flush failed", dqp);
+ xfs_warn(mp, "%s: dquot %p flush failed",
+ __func__, dqp);
}
goto dqunlock;
}
@@ -2115,7 +2097,7 @@ xfs_qm_write_sb_changes(
int error;
#ifdef QUOTADEBUG
- cmn_err(CE_NOTE, "Writing superblock quota changes :%s", mp->m_fsname);
+ xfs_notice(mp, "Writing superblock quota changes");
#endif
tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE);
if ((error = xfs_trans_reserve(tp, 0,
diff --git a/fs/xfs/quota/xfs_qm.h b/fs/xfs/quota/xfs_qm.h
index c9446f1..567b29b 100644
--- a/fs/xfs/quota/xfs_qm.h
+++ b/fs/xfs/quota/xfs_qm.h
@@ -65,11 +65,6 @@ extern kmem_zone_t *qm_dqtrxzone;
* block in the dquot/xqm code.
*/
#define XFS_DQUOT_CLUSTER_SIZE_FSB (xfs_filblks_t)1
-/*
- * When doing a quotacheck, we log dquot clusters of this many FSBs at most
- * in a single transaction. We don't want to ask for too huge a log reservation.
- */
-#define XFS_QM_MAX_DQCLUSTER_LOGSZ 3
typedef xfs_dqhash_t xfs_dqlist_t;
diff --git a/fs/xfs/quota/xfs_qm_bhv.c b/fs/xfs/quota/xfs_qm_bhv.c
index 45b5cb1..a0a829a 100644
--- a/fs/xfs/quota/xfs_qm_bhv.c
+++ b/fs/xfs/quota/xfs_qm_bhv.c
@@ -119,8 +119,7 @@ xfs_qm_newmount(
(gquotaondisk && !XFS_IS_GQUOTA_ON(mp)) ||
(!gquotaondisk && XFS_IS_OQUOTA_ON(mp))) &&
xfs_dev_is_read_only(mp, "changing quota state")) {
- cmn_err(CE_WARN,
- "XFS: please mount with%s%s%s%s.",
+ xfs_warn(mp, "please mount with%s%s%s%s.",
(!quotaondisk ? "out quota" : ""),
(uquotaondisk ? " usrquota" : ""),
(pquotaondisk ? " prjquota" : ""),
@@ -135,7 +134,7 @@ xfs_qm_newmount(
*/
if (quotaondisk && !XFS_QM_NEED_QUOTACHECK(mp)) {
/*
- * If an error occured, qm_mount_quotas code
+ * If an error occurred, qm_mount_quotas code
* has already disabled quotas. So, just finish
* mounting, and get on with the boring life
* without disk quotas.
diff --git a/fs/xfs/quota/xfs_qm_syscalls.c b/fs/xfs/quota/xfs_qm_syscalls.c
index bdebc18..2dadb15 100644
--- a/fs/xfs/quota/xfs_qm_syscalls.c
+++ b/fs/xfs/quota/xfs_qm_syscalls.c
@@ -41,12 +41,6 @@
#include "xfs_qm.h"
#include "xfs_trace.h"
-#ifdef DEBUG
-# define qdprintk(s, args...) cmn_err(CE_DEBUG, s, ## args)
-#else
-# define qdprintk(s, args...) do { } while (0)
-#endif
-
STATIC int xfs_qm_log_quotaoff(xfs_mount_t *, xfs_qoff_logitem_t **, uint);
STATIC int xfs_qm_log_quotaoff_end(xfs_mount_t *, xfs_qoff_logitem_t *,
uint);
@@ -178,7 +172,7 @@ xfs_qm_scall_quotaoff(
/*
* Next we make the changes in the quota flag in the mount struct.
* This isn't protected by a particular lock directly, because we
- * don't want to take a mrlock everytime we depend on quotas being on.
+ * don't want to take a mrlock every time we depend on quotas being on.
*/
mp->m_qflags &= ~(flags);
@@ -294,7 +288,8 @@ xfs_qm_scall_trunc_qfiles(
int error = 0, error2 = 0;
if (!xfs_sb_version_hasquota(&mp->m_sb) || flags == 0) {
- qdprintk("qtrunc flags=%x m_qflags=%x\n", flags, mp->m_qflags);
+ xfs_debug(mp, "%s: flags=%x m_qflags=%x\n",
+ __func__, flags, mp->m_qflags);
return XFS_ERROR(EINVAL);
}
@@ -318,20 +313,19 @@ xfs_qm_scall_quotaon(
{
int error;
uint qf;
- uint accflags;
__int64_t sbflags;
flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD);
/*
* Switching on quota accounting must be done at mount time.
*/
- accflags = flags & XFS_ALL_QUOTA_ACCT;
flags &= ~(XFS_ALL_QUOTA_ACCT);
sbflags = 0;
if (flags == 0) {
- qdprintk("quotaon: zero flags, m_qflags=%x\n", mp->m_qflags);
+ xfs_debug(mp, "%s: zero flags, m_qflags=%x\n",
+ __func__, mp->m_qflags);
return XFS_ERROR(EINVAL);
}
@@ -352,12 +346,13 @@ xfs_qm_scall_quotaon(
(flags & XFS_GQUOTA_ACCT) == 0 &&
(mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) == 0 &&
(flags & XFS_OQUOTA_ENFD))) {
- qdprintk("Can't enforce without acct, flags=%x sbflags=%x\n",
- flags, mp->m_sb.sb_qflags);
+ xfs_debug(mp,
+ "%s: Can't enforce without acct, flags=%x sbflags=%x\n",
+ __func__, flags, mp->m_sb.sb_qflags);
return XFS_ERROR(EINVAL);
}
/*
- * If everything's upto-date incore, then don't waste time.
+ * If everything's up to-date incore, then don't waste time.
*/
if ((mp->m_qflags & flags) == flags)
return XFS_ERROR(EEXIST);
@@ -541,7 +536,7 @@ xfs_qm_scall_setqlim(
q->qi_bsoftlimit = soft;
}
} else {
- qdprintk("blkhard %Ld < blksoft %Ld\n", hard, soft);
+ xfs_debug(mp, "blkhard %Ld < blksoft %Ld\n", hard, soft);
}
hard = (newlim->d_fieldmask & FS_DQ_RTBHARD) ?
(xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_hardlimit) :
@@ -557,7 +552,7 @@ xfs_qm_scall_setqlim(
q->qi_rtbsoftlimit = soft;
}
} else {
- qdprintk("rtbhard %Ld < rtbsoft %Ld\n", hard, soft);
+ xfs_debug(mp, "rtbhard %Ld < rtbsoft %Ld\n", hard, soft);
}
hard = (newlim->d_fieldmask & FS_DQ_IHARD) ?
@@ -574,7 +569,7 @@ xfs_qm_scall_setqlim(
q->qi_isoftlimit = soft;
}
} else {
- qdprintk("ihard %Ld < isoft %Ld\n", hard, soft);
+ xfs_debug(mp, "ihard %Ld < isoft %Ld\n", hard, soft);
}
/*
@@ -939,10 +934,11 @@ struct mutex qcheck_lock;
#define DQTEST_LIST_PRINT(l, NXT, title) \
{ \
xfs_dqtest_t *dqp; int i = 0;\
- cmn_err(CE_DEBUG, "%s (#%d)", title, (int) (l)->qh_nelems); \
+ xfs_debug(NULL, "%s (#%d)", title, (int) (l)->qh_nelems); \
for (dqp = (xfs_dqtest_t *)(l)->qh_next; dqp != NULL; \
dqp = (xfs_dqtest_t *)dqp->NXT) { \
- cmn_err(CE_DEBUG, " %d. \"%d (%s)\" bcnt = %d, icnt = %d", \
+ xfs_debug(dqp->q_mount, \
+ " %d. \"%d (%s)\" bcnt = %d, icnt = %d", \
++i, dqp->d_id, DQFLAGTO_TYPESTR(dqp), \
dqp->d_bcount, dqp->d_icount); } \
}
@@ -966,16 +962,17 @@ xfs_qm_hashinsert(xfs_dqhash_t *h, xfs_dqtest_t *dqp)
}
STATIC void
xfs_qm_dqtest_print(
- xfs_dqtest_t *d)
+ struct xfs_mount *mp,
+ struct dqtest *d)
{
- cmn_err(CE_DEBUG, "-----------DQTEST DQUOT----------------");
- cmn_err(CE_DEBUG, "---- dquot ID = %d", d->d_id);
- cmn_err(CE_DEBUG, "---- fs = 0x%p", d->q_mount);
- cmn_err(CE_DEBUG, "---- bcount = %Lu (0x%x)",
+ xfs_debug(mp, "-----------DQTEST DQUOT----------------");
+ xfs_debug(mp, "---- dquot ID = %d", d->d_id);
+ xfs_debug(mp, "---- fs = 0x%p", d->q_mount);
+ xfs_debug(mp, "---- bcount = %Lu (0x%x)",
d->d_bcount, (int)d->d_bcount);
- cmn_err(CE_DEBUG, "---- icount = %Lu (0x%x)",
+ xfs_debug(mp, "---- icount = %Lu (0x%x)",
d->d_icount, (int)d->d_icount);
- cmn_err(CE_DEBUG, "---------------------------");
+ xfs_debug(mp, "---------------------------");
}
STATIC void
@@ -989,12 +986,14 @@ xfs_qm_dqtest_failed(
{
qmtest_nfails++;
if (error)
- cmn_err(CE_DEBUG, "quotacheck failed id=%d, err=%d\nreason: %s",
- d->d_id, error, reason);
+ xfs_debug(dqp->q_mount,
+ "quotacheck failed id=%d, err=%d\nreason: %s",
+ d->d_id, error, reason);
else
- cmn_err(CE_DEBUG, "quotacheck failed id=%d (%s) [%d != %d]",
- d->d_id, reason, (int)a, (int)b);
- xfs_qm_dqtest_print(d);
+ xfs_debug(dqp->q_mount,
+ "quotacheck failed id=%d (%s) [%d != %d]",
+ d->d_id, reason, (int)a, (int)b);
+ xfs_qm_dqtest_print(dqp->q_mount, d);
if (dqp)
xfs_qm_dqprint(dqp);
}
@@ -1021,9 +1020,9 @@ xfs_dqtest_cmp2(
be64_to_cpu(dqp->q_core.d_bcount) >=
be64_to_cpu(dqp->q_core.d_blk_softlimit)) {
if (!dqp->q_core.d_btimer && dqp->q_core.d_id) {
- cmn_err(CE_DEBUG,
- "%d [%s] [0x%p] BLK TIMER NOT STARTED",
- d->d_id, DQFLAGTO_TYPESTR(d), d->q_mount);
+ xfs_debug(dqp->q_mount,
+ "%d [%s] BLK TIMER NOT STARTED",
+ d->d_id, DQFLAGTO_TYPESTR(d));
err++;
}
}
@@ -1031,16 +1030,16 @@ xfs_dqtest_cmp2(
be64_to_cpu(dqp->q_core.d_icount) >=
be64_to_cpu(dqp->q_core.d_ino_softlimit)) {
if (!dqp->q_core.d_itimer && dqp->q_core.d_id) {
- cmn_err(CE_DEBUG,
- "%d [%s] [0x%p] INO TIMER NOT STARTED",
- d->d_id, DQFLAGTO_TYPESTR(d), d->q_mount);
+ xfs_debug(dqp->q_mount,
+ "%d [%s] INO TIMER NOT STARTED",
+ d->d_id, DQFLAGTO_TYPESTR(d));
err++;
}
}
#ifdef QUOTADEBUG
if (!err) {
- cmn_err(CE_DEBUG, "%d [%s] [0x%p] qchecked",
- d->d_id, DQFLAGTO_TYPESTR(d), d->q_mount);
+ xfs_debug(dqp->q_mount, "%d [%s] qchecked",
+ d->d_id, DQFLAGTO_TYPESTR(d));
}
#endif
return (err);
@@ -1137,8 +1136,8 @@ xfs_qm_internalqcheck_adjust(
if (ino == mp->m_sb.sb_uquotino || ino == mp->m_sb.sb_gquotino) {
*res = BULKSTAT_RV_NOTHING;
- qdprintk("internalqcheck: ino=%llu, uqino=%llu, gqino=%llu\n",
- (unsigned long long) ino,
+ xfs_debug(mp, "%s: ino=%llu, uqino=%llu, gqino=%llu\n",
+ __func__, (unsigned long long) ino,
(unsigned long long) mp->m_sb.sb_uquotino,
(unsigned long long) mp->m_sb.sb_gquotino);
return XFS_ERROR(EINVAL);
@@ -1223,12 +1222,12 @@ xfs_qm_internalqcheck(
xfs_qm_internalqcheck_adjust,
0, NULL, &done);
if (error) {
- cmn_err(CE_DEBUG, "Bulkstat returned error 0x%x", error);
+ xfs_debug(mp, "Bulkstat returned error 0x%x", error);
break;
}
} while (!done);
- cmn_err(CE_DEBUG, "Checking results against system dquots");
+ xfs_debug(mp, "Checking results against system dquots");
for (i = 0; i < qmtest_hashmask; i++) {
xfs_dqtest_t *d, *n;
xfs_dqhash_t *h;
@@ -1246,10 +1245,10 @@ xfs_qm_internalqcheck(
}
if (qmtest_nfails) {
- cmn_err(CE_DEBUG, "******** quotacheck failed ********");
- cmn_err(CE_DEBUG, "failures = %d", qmtest_nfails);
+ xfs_debug(mp, "******** quotacheck failed ********");
+ xfs_debug(mp, "failures = %d", qmtest_nfails);
} else {
- cmn_err(CE_DEBUG, "******** quotacheck successful! ********");
+ xfs_debug(mp, "******** quotacheck successful! ********");
}
kmem_free(qmtest_udqtab);
kmem_free(qmtest_gdqtab);
diff --git a/fs/xfs/quota/xfs_trans_dquot.c b/fs/xfs/quota/xfs_trans_dquot.c
index 7de91d1..2a36487 100644
--- a/fs/xfs/quota/xfs_trans_dquot.c
+++ b/fs/xfs/quota/xfs_trans_dquot.c
@@ -643,8 +643,9 @@ xfs_trans_dqresv(
(XFS_IS_OQUOTA_ENFORCED(dqp->q_mount) &&
(XFS_QM_ISPDQ(dqp) || XFS_QM_ISGDQ(dqp))))) {
#ifdef QUOTADEBUG
- cmn_err(CE_DEBUG, "BLK Res: nblks=%ld + resbcount=%Ld"
- " > hardlimit=%Ld?", nblks, *resbcountp, hardlimit);
+ xfs_debug(mp,
+ "BLK Res: nblks=%ld + resbcount=%Ld > hardlimit=%Ld?",
+ nblks, *resbcountp, hardlimit);
#endif
if (nblks > 0) {
/*
diff --git a/fs/xfs/support/debug.c b/fs/xfs/support/debug.c
deleted file mode 100644
index 0df8889..0000000
--- a/fs/xfs/support/debug.c
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#include <xfs.h>
-#include "debug.h"
-
-/* xfs_mount.h drags a lot of crap in, sorry.. */
-#include "xfs_sb.h"
-#include "xfs_inum.h"
-#include "xfs_ag.h"
-#include "xfs_mount.h"
-#include "xfs_error.h"
-
-void
-cmn_err(
- const char *lvl,
- const char *fmt,
- ...)
-{
- struct va_format vaf;
- va_list args;
-
- va_start(args, fmt);
- vaf.fmt = fmt;
- vaf.va = &args;
-
- printk("%s%pV", lvl, &vaf);
- va_end(args);
-
- BUG_ON(strncmp(lvl, KERN_EMERG, strlen(KERN_EMERG)) == 0);
-}
-
-void
-xfs_fs_cmn_err(
- const char *lvl,
- struct xfs_mount *mp,
- const char *fmt,
- ...)
-{
- struct va_format vaf;
- va_list args;
-
- va_start(args, fmt);
- vaf.fmt = fmt;
- vaf.va = &args;
-
- printk("%sFilesystem %s: %pV", lvl, mp->m_fsname, &vaf);
- va_end(args);
-
- BUG_ON(strncmp(lvl, KERN_EMERG, strlen(KERN_EMERG)) == 0);
-}
-
-/* All callers to xfs_cmn_err use CE_ALERT, so don't bother testing lvl */
-void
-xfs_cmn_err(
- int panic_tag,
- const char *lvl,
- struct xfs_mount *mp,
- const char *fmt,
- ...)
-{
- struct va_format vaf;
- va_list args;
- int do_panic = 0;
-
- if (xfs_panic_mask && (xfs_panic_mask & panic_tag)) {
- printk(KERN_ALERT "XFS: Transforming an alert into a BUG.");
- do_panic = 1;
- }
-
- va_start(args, fmt);
- vaf.fmt = fmt;
- vaf.va = &args;
-
- printk(KERN_ALERT "Filesystem %s: %pV", mp->m_fsname, &vaf);
- va_end(args);
-
- BUG_ON(do_panic);
-}
-
-void
-assfail(char *expr, char *file, int line)
-{
- printk(KERN_CRIT "Assertion failed: %s, file: %s, line: %d\n", expr,
- file, line);
- BUG();
-}
-
-void
-xfs_hex_dump(void *p, int length)
-{
- print_hex_dump(KERN_ALERT, "", DUMP_PREFIX_ADDRESS, 16, 1, p, length, 1);
-}
diff --git a/fs/xfs/support/debug.h b/fs/xfs/support/debug.h
deleted file mode 100644
index 05699f6..0000000
--- a/fs/xfs/support/debug.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Copyright (c) 2000-2005 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#ifndef __XFS_SUPPORT_DEBUG_H__
-#define __XFS_SUPPORT_DEBUG_H__
-
-#include <stdarg.h>
-
-struct xfs_mount;
-
-#define CE_DEBUG KERN_DEBUG
-#define CE_CONT KERN_INFO
-#define CE_NOTE KERN_NOTICE
-#define CE_WARN KERN_WARNING
-#define CE_ALERT KERN_ALERT
-#define CE_PANIC KERN_EMERG
-
-void cmn_err(const char *lvl, const char *fmt, ...)
- __attribute__ ((format (printf, 2, 3)));
-void xfs_fs_cmn_err( const char *lvl, struct xfs_mount *mp,
- const char *fmt, ...) __attribute__ ((format (printf, 3, 4)));
-void xfs_cmn_err( int panic_tag, const char *lvl, struct xfs_mount *mp,
- const char *fmt, ...) __attribute__ ((format (printf, 4, 5)));
-
-extern void assfail(char *expr, char *f, int l);
-
-#define ASSERT_ALWAYS(expr) \
- (unlikely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__))
-
-#ifndef DEBUG
-#define ASSERT(expr) ((void)0)
-
-#ifndef STATIC
-# define STATIC static noinline
-#endif
-
-#else /* DEBUG */
-
-#define ASSERT(expr) \
- (unlikely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__))
-
-#ifndef STATIC
-# define STATIC noinline
-#endif
-
-#endif /* DEBUG */
-#endif /* __XFS_SUPPORT_DEBUG_H__ */
diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c
index f322798..27d64d7 100644
--- a/fs/xfs/xfs_alloc.c
+++ b/fs/xfs/xfs_alloc.c
@@ -147,10 +147,9 @@ xfs_alloc_get_rec(
*/
STATIC void
xfs_alloc_compute_aligned(
+ xfs_alloc_arg_t *args, /* allocation argument structure */
xfs_agblock_t foundbno, /* starting block in found extent */
xfs_extlen_t foundlen, /* length in found extent */
- xfs_extlen_t alignment, /* alignment for allocation */
- xfs_extlen_t minlen, /* minimum length for allocation */
xfs_agblock_t *resbno, /* result block number */
xfs_extlen_t *reslen) /* result length */
{
@@ -158,8 +157,8 @@ xfs_alloc_compute_aligned(
xfs_extlen_t diff;
xfs_extlen_t len;
- if (alignment > 1 && foundlen >= minlen) {
- bno = roundup(foundbno, alignment);
+ if (args->alignment > 1 && foundlen >= args->minlen) {
+ bno = roundup(foundbno, args->alignment);
diff = bno - foundbno;
len = diff >= foundlen ? 0 : foundlen - diff;
} else {
@@ -464,6 +463,27 @@ xfs_alloc_read_agfl(
return 0;
}
+STATIC int
+xfs_alloc_update_counters(
+ struct xfs_trans *tp,
+ struct xfs_perag *pag,
+ struct xfs_buf *agbp,
+ long len)
+{
+ struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
+
+ pag->pagf_freeblks += len;
+ be32_add_cpu(&agf->agf_freeblks, len);
+
+ xfs_trans_agblocks_delta(tp, len);
+ if (unlikely(be32_to_cpu(agf->agf_freeblks) >
+ be32_to_cpu(agf->agf_length)))
+ return EFSCORRUPTED;
+
+ xfs_alloc_log_agf(tp, agbp, XFS_AGF_FREEBLKS);
+ return 0;
+}
+
/*
* Allocation group level functions.
*/
@@ -505,49 +525,44 @@ xfs_alloc_ag_vextent(
ASSERT(0);
/* NOTREACHED */
}
- if (error)
+
+ if (error || args->agbno == NULLAGBLOCK)
return error;
- /*
- * If the allocation worked, need to change the agf structure
- * (and log it), and the superblock.
- */
- if (args->agbno != NULLAGBLOCK) {
- xfs_agf_t *agf; /* allocation group freelist header */
- long slen = (long)args->len;
- ASSERT(args->len >= args->minlen && args->len <= args->maxlen);
- ASSERT(!(args->wasfromfl) || !args->isfl);
- ASSERT(args->agbno % args->alignment == 0);
- if (!(args->wasfromfl)) {
-
- agf = XFS_BUF_TO_AGF(args->agbp);
- be32_add_cpu(&agf->agf_freeblks, -(args->len));
- xfs_trans_agblocks_delta(args->tp,
- -((long)(args->len)));
- args->pag->pagf_freeblks -= args->len;
- ASSERT(be32_to_cpu(agf->agf_freeblks) <=
- be32_to_cpu(agf->agf_length));
- xfs_alloc_log_agf(args->tp, args->agbp,
- XFS_AGF_FREEBLKS);
- /*
- * Search the busylist for these blocks and mark the
- * transaction as synchronous if blocks are found. This
- * avoids the need to block due to a synchronous log
- * force to ensure correct ordering as the synchronous
- * transaction will guarantee that for us.
- */
- if (xfs_alloc_busy_search(args->mp, args->agno,
- args->agbno, args->len))
- xfs_trans_set_sync(args->tp);
- }
- if (!args->isfl)
- xfs_trans_mod_sb(args->tp,
- args->wasdel ? XFS_TRANS_SB_RES_FDBLOCKS :
- XFS_TRANS_SB_FDBLOCKS, -slen);
- XFS_STATS_INC(xs_allocx);
- XFS_STATS_ADD(xs_allocb, args->len);
+ ASSERT(args->len >= args->minlen);
+ ASSERT(args->len <= args->maxlen);
+ ASSERT(!args->wasfromfl || !args->isfl);
+ ASSERT(args->agbno % args->alignment == 0);
+
+ if (!args->wasfromfl) {
+ error = xfs_alloc_update_counters(args->tp, args->pag,
+ args->agbp,
+ -((long)(args->len)));
+ if (error)
+ return error;
+
+ /*
+ * Search the busylist for these blocks and mark the
+ * transaction as synchronous if blocks are found. This
+ * avoids the need to block due to a synchronous log
+ * force to ensure correct ordering as the synchronous
+ * transaction will guarantee that for us.
+ */
+ if (xfs_alloc_busy_search(args->mp, args->agno,
+ args->agbno, args->len))
+ xfs_trans_set_sync(args->tp);
}
- return 0;
+
+ if (!args->isfl) {
+ xfs_trans_mod_sb(args->tp, args->wasdel ?
+ XFS_TRANS_SB_RES_FDBLOCKS :
+ XFS_TRANS_SB_FDBLOCKS,
+ -((long)(args->len)));
+ }
+
+ XFS_STATS_INC(xs_allocx);
+ XFS_STATS_ADD(xs_allocb, args->len);
+ return error;
}
/*
@@ -693,8 +708,7 @@ xfs_alloc_find_best_extent(
if (error)
goto error0;
XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
- xfs_alloc_compute_aligned(*sbno, *slen, args->alignment,
- args->minlen, &bno, slena);
+ xfs_alloc_compute_aligned(args, *sbno, *slen, &bno, slena);
/*
* The good extent is closer than this one.
@@ -866,8 +880,8 @@ xfs_alloc_ag_vextent_near(
if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno, &ltlen, &i)))
goto error0;
XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
- xfs_alloc_compute_aligned(ltbno, ltlen, args->alignment,
- args->minlen, &ltbnoa, &ltlena);
+ xfs_alloc_compute_aligned(args, ltbno, ltlen,
+ &ltbnoa, &ltlena);
if (ltlena < args->minlen)
continue;
args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
@@ -987,8 +1001,8 @@ xfs_alloc_ag_vextent_near(
if ((error = xfs_alloc_get_rec(bno_cur_lt, &ltbno, &ltlen, &i)))
goto error0;
XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
- xfs_alloc_compute_aligned(ltbno, ltlen, args->alignment,
- args->minlen, &ltbnoa, &ltlena);
+ xfs_alloc_compute_aligned(args, ltbno, ltlen,
+ &ltbnoa, &ltlena);
if (ltlena >= args->minlen)
break;
if ((error = xfs_btree_decrement(bno_cur_lt, 0, &i)))
@@ -1003,8 +1017,8 @@ xfs_alloc_ag_vextent_near(
if ((error = xfs_alloc_get_rec(bno_cur_gt, &gtbno, &gtlen, &i)))
goto error0;
XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
- xfs_alloc_compute_aligned(gtbno, gtlen, args->alignment,
- args->minlen, &gtbnoa, &gtlena);
+ xfs_alloc_compute_aligned(args, gtbno, gtlen,
+ &gtbnoa, &gtlena);
if (gtlena >= args->minlen)
break;
if ((error = xfs_btree_increment(bno_cur_gt, 0, &i)))
@@ -1183,8 +1197,7 @@ xfs_alloc_ag_vextent_size(
* once aligned; if not, we search left for something better.
* This can't happen in the second case above.
*/
- xfs_alloc_compute_aligned(fbno, flen, args->alignment, args->minlen,
- &rbno, &rlen);
+ xfs_alloc_compute_aligned(args, fbno, flen, &rbno, &rlen);
rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
XFS_WANT_CORRUPTED_GOTO(rlen == 0 ||
(rlen <= flen && rbno + rlen <= fbno + flen), error0);
@@ -1209,8 +1222,8 @@ xfs_alloc_ag_vextent_size(
XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
if (flen < bestrlen)
break;
- xfs_alloc_compute_aligned(fbno, flen, args->alignment,
- args->minlen, &rbno, &rlen);
+ xfs_alloc_compute_aligned(args, fbno, flen,
+ &rbno, &rlen);
rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
XFS_WANT_CORRUPTED_GOTO(rlen == 0 ||
(rlen <= flen && rbno + rlen <= fbno + flen),
@@ -1388,6 +1401,7 @@ xfs_free_ag_extent(
xfs_mount_t *mp; /* mount point struct for filesystem */
xfs_agblock_t nbno; /* new starting block of freespace */
xfs_extlen_t nlen; /* new length of freespace */
+ xfs_perag_t *pag; /* per allocation group data */
mp = tp->t_mountp;
/*
@@ -1586,30 +1600,20 @@ xfs_free_ag_extent(
XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
cnt_cur = NULL;
+
/*
* Update the freespace totals in the ag and superblock.
*/
- {
- xfs_agf_t *agf;
- xfs_perag_t *pag; /* per allocation group data */
-
- pag = xfs_perag_get(mp, agno);
- pag->pagf_freeblks += len;
- xfs_perag_put(pag);
-
- agf = XFS_BUF_TO_AGF(agbp);
- be32_add_cpu(&agf->agf_freeblks, len);
- xfs_trans_agblocks_delta(tp, len);
- XFS_WANT_CORRUPTED_GOTO(
- be32_to_cpu(agf->agf_freeblks) <=
- be32_to_cpu(agf->agf_length),
- error0);
- xfs_alloc_log_agf(tp, agbp, XFS_AGF_FREEBLKS);
- if (!isfl)
- xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, (long)len);
- XFS_STATS_INC(xs_freex);
- XFS_STATS_ADD(xs_freeb, len);
- }
+ pag = xfs_perag_get(mp, agno);
+ error = xfs_alloc_update_counters(tp, pag, agbp, len);
+ xfs_perag_put(pag);
+ if (error)
+ goto error0;
+
+ if (!isfl)
+ xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, (long)len);
+ XFS_STATS_INC(xs_freex);
+ XFS_STATS_ADD(xs_freeb, len);
trace_xfs_free_extent(mp, agno, bno, len, isfl, haveleft, haveright);
@@ -2391,17 +2395,33 @@ xfs_free_extent(
memset(&args, 0, sizeof(xfs_alloc_arg_t));
args.tp = tp;
args.mp = tp->t_mountp;
+
+ /*
+ * validate that the block number is legal - the enables us to detect
+ * and handle a silent filesystem corruption rather than crashing.
+ */
args.agno = XFS_FSB_TO_AGNO(args.mp, bno);
- ASSERT(args.agno < args.mp->m_sb.sb_agcount);
+ if (args.agno >= args.mp->m_sb.sb_agcount)
+ return EFSCORRUPTED;
+
args.agbno = XFS_FSB_TO_AGBNO(args.mp, bno);
+ if (args.agbno >= args.mp->m_sb.sb_agblocks)
+ return EFSCORRUPTED;
+
args.pag = xfs_perag_get(args.mp, args.agno);
- if ((error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING)))
+ ASSERT(args.pag);
+
+ error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING);
+ if (error)
goto error0;
-#ifdef DEBUG
- ASSERT(args.agbp != NULL);
- ASSERT((args.agbno + len) <=
- be32_to_cpu(XFS_BUF_TO_AGF(args.agbp)->agf_length));
-#endif
+
+ /* validate the extent size is legal now we have the agf locked */
+ if (args.agbno + len >
+ be32_to_cpu(XFS_BUF_TO_AGF(args.agbp)->agf_length)) {
+ error = EFSCORRUPTED;
+ goto error0;
+ }
+
error = xfs_free_ag_extent(tp, args.agbp, args.agno, args.agbno, len, 0);
error0:
xfs_perag_put(args.pag);
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
index dc3afd77..fa00788 100644
--- a/fs/xfs/xfs_bmap.c
+++ b/fs/xfs/xfs_bmap.c
@@ -2365,6 +2365,13 @@ xfs_bmap_rtalloc(
*/
if (ralen * mp->m_sb.sb_rextsize >= MAXEXTLEN)
ralen = MAXEXTLEN / mp->m_sb.sb_rextsize;
+
+ /*
+ * Lock out other modifications to the RT bitmap inode.
+ */
+ xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL);
+ xfs_trans_ijoin_ref(ap->tp, mp->m_rbmip, XFS_ILOCK_EXCL);
+
/*
* If it's an allocation to an empty file at offset 0,
* pick an extent that will space things out in the rt area.
@@ -3519,7 +3526,7 @@ xfs_bmap_search_extents(
if (unlikely(!(gotp->br_startblock) && (*lastxp != NULLEXTNUM) &&
!(XFS_IS_REALTIME_INODE(ip) && fork == XFS_DATA_FORK))) {
- xfs_cmn_err(XFS_PTAG_FSBLOCK_ZERO, CE_ALERT, ip->i_mount,
+ xfs_alert_tag(ip->i_mount, XFS_PTAG_FSBLOCK_ZERO,
"Access to block zero in inode %llu "
"start_block: %llx start_off: %llx "
"blkcnt: %llx extent-state: %x lastx: %x\n",
@@ -4193,12 +4200,11 @@ xfs_bmap_read_extents(
num_recs = xfs_btree_get_numrecs(block);
if (unlikely(i + num_recs > room)) {
ASSERT(i + num_recs <= room);
- xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
+ xfs_warn(ip->i_mount,
"corrupt dinode %Lu, (btree extents).",
(unsigned long long) ip->i_ino);
- XFS_ERROR_REPORT("xfs_bmap_read_extents(1)",
- XFS_ERRLEVEL_LOW,
- ip->i_mount);
+ XFS_CORRUPTION_ERROR("xfs_bmap_read_extents(1)",
+ XFS_ERRLEVEL_LOW, ip->i_mount, block);
goto error0;
}
XFS_WANT_CORRUPTED_GOTO(
@@ -5772,7 +5778,7 @@ xfs_check_block(
else
thispa = XFS_BMBT_PTR_ADDR(mp, block, j, dmxr);
if (*thispa == *pp) {
- cmn_err(CE_WARN, "%s: thispa(%d) == pp(%d) %Ld",
+ xfs_warn(mp, "%s: thispa(%d) == pp(%d) %Ld",
__func__, j, i,
(unsigned long long)be64_to_cpu(*thispa));
panic("%s: ptrs are equal in node\n",
@@ -5937,11 +5943,11 @@ xfs_bmap_check_leaf_extents(
return;
error0:
- cmn_err(CE_WARN, "%s: at error0", __func__);
+ xfs_warn(mp, "%s: at error0", __func__);
if (bp_release)
xfs_trans_brelse(NULL, bp);
error_norelse:
- cmn_err(CE_WARN, "%s: BAD after btree leaves for %d extents",
+ xfs_warn(mp, "%s: BAD after btree leaves for %d extents",
__func__, i);
panic("%s: CORRUPTED BTREE OR SOMETHING", __func__);
return;
@@ -6144,7 +6150,7 @@ xfs_bmap_punch_delalloc_range(
if (error) {
/* something screwed, just bail */
if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
- xfs_fs_cmn_err(CE_ALERT, ip->i_mount,
+ xfs_alert(ip->i_mount,
"Failed delalloc mapping lookup ino %lld fsb %lld.",
ip->i_ino, start_fsb);
}
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index 6f8c21c..7b7e005 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -130,10 +130,12 @@ xfs_buf_item_log_check(
orig = bip->bli_orig;
buffer = XFS_BUF_PTR(bp);
for (x = 0; x < XFS_BUF_COUNT(bp); x++) {
- if (orig[x] != buffer[x] && !btst(bip->bli_logged, x))
- cmn_err(CE_PANIC,
- "xfs_buf_item_log_check bip %x buffer %x orig %x index %d",
- bip, bp, orig, x);
+ if (orig[x] != buffer[x] && !btst(bip->bli_logged, x)) {
+ xfs_emerg(bp->b_mount,
+ "%s: bip %x buffer %x orig %x index %d",
+ __func__, bip, bp, orig, x);
+ ASSERT(0);
+ }
}
}
#else
@@ -983,15 +985,14 @@ xfs_buf_iodone_callbacks(
if (XFS_BUF_TARGET(bp) != lasttarg ||
time_after(jiffies, (lasttime + 5*HZ))) {
lasttime = jiffies;
- cmn_err(CE_ALERT, "Device %s, XFS metadata write error"
- " block 0x%llx in %s",
+ xfs_alert(mp, "Device %s: metadata write error block 0x%llx",
XFS_BUFTARG_NAME(XFS_BUF_TARGET(bp)),
- (__uint64_t)XFS_BUF_ADDR(bp), mp->m_fsname);
+ (__uint64_t)XFS_BUF_ADDR(bp));
}
lasttarg = XFS_BUF_TARGET(bp);
/*
- * If the write was asynchronous then noone will be looking for the
+ * If the write was asynchronous then no one will be looking for the
* error. Clear the error state and write the buffer out again.
*
* During sync or umount we'll write all pending buffers again
diff --git a/fs/xfs/xfs_da_btree.c b/fs/xfs/xfs_da_btree.c
index 1c00bed..6102ac6 100644
--- a/fs/xfs/xfs_da_btree.c
+++ b/fs/xfs/xfs_da_btree.c
@@ -1995,13 +1995,12 @@ xfs_da_do_buf(
error = mappedbno == -2 ? 0 : XFS_ERROR(EFSCORRUPTED);
if (unlikely(error == EFSCORRUPTED)) {
if (xfs_error_level >= XFS_ERRLEVEL_LOW) {
- cmn_err(CE_ALERT, "xfs_da_do_buf: bno %lld\n",
- (long long)bno);
- cmn_err(CE_ALERT, "dir: inode %lld\n",
+ xfs_alert(mp, "%s: bno %lld dir: inode %lld",
+ __func__, (long long)bno,
(long long)dp->i_ino);
for (i = 0; i < nmap; i++) {
- cmn_err(CE_ALERT,
- "[%02d] br_startoff %lld br_startblock %lld br_blockcount %lld br_state %d\n",
+ xfs_alert(mp,
+"[%02d] br_startoff %lld br_startblock %lld br_blockcount %lld br_state %d",
i,
(long long)mapp[i].br_startoff,
(long long)mapp[i].br_startblock,
diff --git a/fs/xfs/xfs_dfrag.c b/fs/xfs/xfs_dfrag.c
index e60490b..be628677 100644
--- a/fs/xfs/xfs_dfrag.c
+++ b/fs/xfs/xfs_dfrag.c
@@ -270,9 +270,9 @@ xfs_swap_extents(
/* check inode formats now that data is flushed */
error = xfs_swap_extents_check_format(ip, tip);
if (error) {
- xfs_fs_cmn_err(CE_NOTE, mp,
+ xfs_notice(mp,
"%s: inode 0x%llx format is incompatible for exchanging.",
- __FILE__, ip->i_ino);
+ __func__, ip->i_ino);
goto out_unlock;
}
diff --git a/fs/xfs/xfs_dir2.c b/fs/xfs/xfs_dir2.c
index a1321bc..dba7a71 100644
--- a/fs/xfs/xfs_dir2.c
+++ b/fs/xfs/xfs_dir2.c
@@ -159,7 +159,7 @@ xfs_dir_ino_validate(
XFS_AGINO_TO_INO(mp, agno, agino) == ino;
if (unlikely(XFS_TEST_ERROR(!ino_ok, mp, XFS_ERRTAG_DIR_INO_VALIDATE,
XFS_RANDOM_DIR_INO_VALIDATE))) {
- xfs_fs_cmn_err(CE_WARN, mp, "Invalid inode number 0x%Lx",
+ xfs_warn(mp, "Invalid inode number 0x%Lx",
(unsigned long long) ino);
XFS_ERROR_REPORT("xfs_dir_ino_validate", XFS_ERRLEVEL_LOW, mp);
return XFS_ERROR(EFSCORRUPTED);
diff --git a/fs/xfs/xfs_dir2_node.c b/fs/xfs/xfs_dir2_node.c
index f9a0864..a0aab7d 100644
--- a/fs/xfs/xfs_dir2_node.c
+++ b/fs/xfs/xfs_dir2_node.c
@@ -899,10 +899,9 @@ xfs_dir2_leafn_rebalance(
if(blk2->index < 0) {
state->inleaf = 1;
blk2->index = 0;
- cmn_err(CE_ALERT,
- "xfs_dir2_leafn_rebalance: picked the wrong leaf? reverting original leaf: "
- "blk1->index %d\n",
- blk1->index);
+ xfs_alert(args->dp->i_mount,
+ "%s: picked the wrong leaf? reverting original leaf: blk1->index %d\n",
+ __func__, blk1->index);
}
}
@@ -1641,26 +1640,22 @@ xfs_dir2_node_addname_int(
}
if (unlikely(xfs_dir2_db_to_fdb(mp, dbno) != fbno)) {
- cmn_err(CE_ALERT,
- "xfs_dir2_node_addname_int: dir ino "
- "%llu needed freesp block %lld for\n"
- " data block %lld, got %lld\n"
- " ifbno %llu lastfbno %d\n",
- (unsigned long long)dp->i_ino,
+ xfs_alert(mp,
+ "%s: dir ino " "%llu needed freesp block %lld for\n"
+ " data block %lld, got %lld ifbno %llu lastfbno %d",
+ __func__, (unsigned long long)dp->i_ino,
(long long)xfs_dir2_db_to_fdb(mp, dbno),
(long long)dbno, (long long)fbno,
(unsigned long long)ifbno, lastfbno);
if (fblk) {
- cmn_err(CE_ALERT,
- " fblk 0x%p blkno %llu "
- "index %d magic 0x%x\n",
+ xfs_alert(mp,
+ " fblk 0x%p blkno %llu index %d magic 0x%x",
fblk,
(unsigned long long)fblk->blkno,
fblk->index,
fblk->magic);
} else {
- cmn_err(CE_ALERT,
- " ... fblk is NULL\n");
+ xfs_alert(mp, " ... fblk is NULL");
}
XFS_ERROR_REPORT("xfs_dir2_node_addname_int",
XFS_ERRLEVEL_LOW, mp);
diff --git a/fs/xfs/xfs_error.c b/fs/xfs/xfs_error.c
index 4c7db74..39f0633 100644
--- a/fs/xfs/xfs_error.c
+++ b/fs/xfs/xfs_error.c
@@ -48,7 +48,7 @@ xfs_error_trap(int e)
break;
if (e != xfs_etrap[i])
continue;
- cmn_err(CE_NOTE, "xfs_error_trap: error %d", e);
+ xfs_notice(NULL, "%s: error %d", __func__, e);
BUG();
break;
}
@@ -74,7 +74,7 @@ xfs_error_test(int error_tag, int *fsidp, char *expression,
for (i = 0; i < XFS_NUM_INJECT_ERROR; i++) {
if (xfs_etest[i] == error_tag && xfs_etest_fsid[i] == fsid) {
- cmn_err(CE_WARN,
+ xfs_warn(NULL,
"Injecting error (%s) at file %s, line %d, on filesystem \"%s\"",
expression, file, line, xfs_etest_fsname[i]);
return 1;
@@ -95,14 +95,14 @@ xfs_errortag_add(int error_tag, xfs_mount_t *mp)
for (i = 0; i < XFS_NUM_INJECT_ERROR; i++) {
if (xfs_etest_fsid[i] == fsid && xfs_etest[i] == error_tag) {
- cmn_err(CE_WARN, "XFS error tag #%d on", error_tag);
+ xfs_warn(mp, "error tag #%d on", error_tag);
return 0;
}
}
for (i = 0; i < XFS_NUM_INJECT_ERROR; i++) {
if (xfs_etest[i] == 0) {
- cmn_err(CE_WARN, "Turned on XFS error tag #%d",
+ xfs_warn(mp, "Turned on XFS error tag #%d",
error_tag);
xfs_etest[i] = error_tag;
xfs_etest_fsid[i] = fsid;
@@ -114,7 +114,7 @@ xfs_errortag_add(int error_tag, xfs_mount_t *mp)
}
}
- cmn_err(CE_WARN, "error tag overflow, too many turned on");
+ xfs_warn(mp, "error tag overflow, too many turned on");
return 1;
}
@@ -133,7 +133,7 @@ xfs_errortag_clearall(xfs_mount_t *mp, int loud)
if ((fsid == 0LL || xfs_etest_fsid[i] == fsid) &&
xfs_etest[i] != 0) {
cleared = 1;
- cmn_err(CE_WARN, "Clearing XFS error tag #%d",
+ xfs_warn(mp, "Clearing XFS error tag #%d",
xfs_etest[i]);
xfs_etest[i] = 0;
xfs_etest_fsid[i] = 0LL;
@@ -144,9 +144,7 @@ xfs_errortag_clearall(xfs_mount_t *mp, int loud)
}
if (loud || cleared)
- cmn_err(CE_WARN,
- "Cleared all XFS error tags for filesystem \"%s\"",
- mp->m_fsname);
+ xfs_warn(mp, "Cleared all XFS error tags for filesystem");
return 0;
}
@@ -162,9 +160,8 @@ xfs_error_report(
inst_t *ra)
{
if (level <= xfs_error_level) {
- xfs_cmn_err(XFS_PTAG_ERROR_REPORT,
- CE_ALERT, mp,
- "XFS internal error %s at line %d of file %s. Caller 0x%p\n",
+ xfs_alert_tag(mp, XFS_PTAG_ERROR_REPORT,
+ "Internal error %s at line %d of file %s. Caller 0x%p\n",
tag, linenum, filename, ra);
xfs_stack_trace();
@@ -184,4 +181,5 @@ xfs_corruption_error(
if (level <= xfs_error_level)
xfs_hex_dump(p, 16);
xfs_error_report(tag, level, mp, filename, linenum, ra);
+ xfs_alert(mp, "Corruption detected. Unmount and run xfs_repair");
}
diff --git a/fs/xfs/xfs_error.h b/fs/xfs/xfs_error.h
index 10dce54..079a367 100644
--- a/fs/xfs/xfs_error.h
+++ b/fs/xfs/xfs_error.h
@@ -145,10 +145,8 @@ extern int xfs_errortag_clearall(struct xfs_mount *mp, int loud);
#endif /* DEBUG */
/*
- * XFS panic tags -- allow a call to xfs_cmn_err() be turned into
- * a panic by setting xfs_panic_mask in a
- * sysctl. update xfs_max[XFS_PARAM] if
- * more are added.
+ * XFS panic tags -- allow a call to xfs_alert_tag() be turned into
+ * a panic by setting xfs_panic_mask in a sysctl.
*/
#define XFS_NO_PTAG 0
#define XFS_PTAG_IFLUSH 0x00000001
@@ -160,17 +158,4 @@ extern int xfs_errortag_clearall(struct xfs_mount *mp, int loud);
#define XFS_PTAG_SHUTDOWN_LOGERROR 0x00000040
#define XFS_PTAG_FSBLOCK_ZERO 0x00000080
-struct xfs_mount;
-
-extern void xfs_hex_dump(void *p, int length);
-
-#define xfs_fs_repair_cmn_err(level, mp, fmt, args...) \
- xfs_fs_cmn_err(level, mp, fmt " Unmount and run xfs_repair.", ## args)
-
-#define xfs_fs_mount_cmn_err(f, fmt, args...) \
- do { \
- if (!(f & XFS_MFSI_QUIET)) \
- cmn_err(CE_WARN, "XFS: " fmt, ## args); \
- } while (0)
-
#endif /* __XFS_ERROR_H__ */
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index 85668ef..9153d2c 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -385,8 +385,8 @@ xfs_growfs_data_private(
XFS_AGB_TO_DADDR(mp, agno, XFS_SB_BLOCK(mp)),
XFS_FSS_TO_BB(mp, 1), 0, &bp);
if (error) {
- xfs_fs_cmn_err(CE_WARN, mp,
- "error %d reading secondary superblock for ag %d",
+ xfs_warn(mp,
+ "error %d reading secondary superblock for ag %d",
error, agno);
break;
}
@@ -399,7 +399,7 @@ xfs_growfs_data_private(
if (!(error = xfs_bwrite(mp, bp))) {
continue;
} else {
- xfs_fs_cmn_err(CE_WARN, mp,
+ xfs_warn(mp,
"write error %d updating secondary superblock for ag %d",
error, agno);
break; /* no point in continuing */
diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c
index 0626a32..84ebeec 100644
--- a/fs/xfs/xfs_ialloc.c
+++ b/fs/xfs/xfs_ialloc.c
@@ -1055,28 +1055,23 @@ xfs_difree(
*/
agno = XFS_INO_TO_AGNO(mp, inode);
if (agno >= mp->m_sb.sb_agcount) {
- cmn_err(CE_WARN,
- "xfs_difree: agno >= mp->m_sb.sb_agcount (%d >= %d) on %s. Returning EINVAL.",
- agno, mp->m_sb.sb_agcount, mp->m_fsname);
+ xfs_warn(mp, "%s: agno >= mp->m_sb.sb_agcount (%d >= %d).",
+ __func__, agno, mp->m_sb.sb_agcount);
ASSERT(0);
return XFS_ERROR(EINVAL);
}
agino = XFS_INO_TO_AGINO(mp, inode);
if (inode != XFS_AGINO_TO_INO(mp, agno, agino)) {
- cmn_err(CE_WARN,
- "xfs_difree: inode != XFS_AGINO_TO_INO() "
- "(%llu != %llu) on %s. Returning EINVAL.",
- (unsigned long long)inode,
- (unsigned long long)XFS_AGINO_TO_INO(mp, agno, agino),
- mp->m_fsname);
+ xfs_warn(mp, "%s: inode != XFS_AGINO_TO_INO() (%llu != %llu).",
+ __func__, (unsigned long long)inode,
+ (unsigned long long)XFS_AGINO_TO_INO(mp, agno, agino));
ASSERT(0);
return XFS_ERROR(EINVAL);
}
agbno = XFS_AGINO_TO_AGBNO(mp, agino);
if (agbno >= mp->m_sb.sb_agblocks) {
- cmn_err(CE_WARN,
- "xfs_difree: agbno >= mp->m_sb.sb_agblocks (%d >= %d) on %s. Returning EINVAL.",
- agbno, mp->m_sb.sb_agblocks, mp->m_fsname);
+ xfs_warn(mp, "%s: agbno >= mp->m_sb.sb_agblocks (%d >= %d).",
+ __func__, agbno, mp->m_sb.sb_agblocks);
ASSERT(0);
return XFS_ERROR(EINVAL);
}
@@ -1085,9 +1080,8 @@ xfs_difree(
*/
error = xfs_ialloc_read_agi(mp, tp, agno, &agbp);
if (error) {
- cmn_err(CE_WARN,
- "xfs_difree: xfs_ialloc_read_agi() returned an error %d on %s. Returning error.",
- error, mp->m_fsname);
+ xfs_warn(mp, "%s: xfs_ialloc_read_agi() returned error %d.",
+ __func__, error);
return error;
}
agi = XFS_BUF_TO_AGI(agbp);
@@ -1106,17 +1100,15 @@ xfs_difree(
* Look for the entry describing this inode.
*/
if ((error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &i))) {
- cmn_err(CE_WARN,
- "xfs_difree: xfs_inobt_lookup returned() an error %d on %s. Returning error.",
- error, mp->m_fsname);
+ xfs_warn(mp, "%s: xfs_inobt_lookup() returned error %d.",
+ __func__, error);
goto error0;
}
XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
error = xfs_inobt_get_rec(cur, &rec, &i);
if (error) {
- cmn_err(CE_WARN,
- "xfs_difree: xfs_inobt_get_rec() returned an error %d on %s. Returning error.",
- error, mp->m_fsname);
+ xfs_warn(mp, "%s: xfs_inobt_get_rec() returned error %d.",
+ __func__, error);
goto error0;
}
XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
@@ -1157,8 +1149,8 @@ xfs_difree(
xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -(ilen - 1));
if ((error = xfs_btree_delete(cur, &i))) {
- cmn_err(CE_WARN, "xfs_difree: xfs_btree_delete returned an error %d on %s.\n",
- error, mp->m_fsname);
+ xfs_warn(mp, "%s: xfs_btree_delete returned error %d.",
+ __func__, error);
goto error0;
}
@@ -1170,9 +1162,8 @@ xfs_difree(
error = xfs_inobt_update(cur, &rec);
if (error) {
- cmn_err(CE_WARN,
- "xfs_difree: xfs_inobt_update returned an error %d on %s.",
- error, mp->m_fsname);
+ xfs_warn(mp, "%s: xfs_inobt_update returned error %d.",
+ __func__, error);
goto error0;
}
@@ -1218,10 +1209,9 @@ xfs_imap_lookup(
error = xfs_ialloc_read_agi(mp, tp, agno, &agbp);
if (error) {
- xfs_fs_cmn_err(CE_ALERT, mp, "xfs_imap: "
- "xfs_ialloc_read_agi() returned "
- "error %d, agno %d",
- error, agno);
+ xfs_alert(mp,
+ "%s: xfs_ialloc_read_agi() returned error %d, agno %d",
+ __func__, error, agno);
return error;
}
@@ -1299,24 +1289,21 @@ xfs_imap(
if (flags & XFS_IGET_UNTRUSTED)
return XFS_ERROR(EINVAL);
if (agno >= mp->m_sb.sb_agcount) {
- xfs_fs_cmn_err(CE_ALERT, mp,
- "xfs_imap: agno (%d) >= "
- "mp->m_sb.sb_agcount (%d)",
- agno, mp->m_sb.sb_agcount);
+ xfs_alert(mp,
+ "%s: agno (%d) >= mp->m_sb.sb_agcount (%d)",
+ __func__, agno, mp->m_sb.sb_agcount);
}
if (agbno >= mp->m_sb.sb_agblocks) {
- xfs_fs_cmn_err(CE_ALERT, mp,
- "xfs_imap: agbno (0x%llx) >= "
- "mp->m_sb.sb_agblocks (0x%lx)",
- (unsigned long long) agbno,
- (unsigned long) mp->m_sb.sb_agblocks);
+ xfs_alert(mp,
+ "%s: agbno (0x%llx) >= mp->m_sb.sb_agblocks (0x%lx)",
+ __func__, (unsigned long long)agbno,
+ (unsigned long)mp->m_sb.sb_agblocks);
}
if (ino != XFS_AGINO_TO_INO(mp, agno, agino)) {
- xfs_fs_cmn_err(CE_ALERT, mp,
- "xfs_imap: ino (0x%llx) != "
- "XFS_AGINO_TO_INO(mp, agno, agino) "
- "(0x%llx)",
- ino, XFS_AGINO_TO_INO(mp, agno, agino));
+ xfs_alert(mp,
+ "%s: ino (0x%llx) != XFS_AGINO_TO_INO() (0x%llx)",
+ __func__, ino,
+ XFS_AGINO_TO_INO(mp, agno, agino));
}
xfs_stack_trace();
#endif /* DEBUG */
@@ -1388,10 +1375,9 @@ out_map:
*/
if ((imap->im_blkno + imap->im_len) >
XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)) {
- xfs_fs_cmn_err(CE_ALERT, mp, "xfs_imap: "
- "(imap->im_blkno (0x%llx) + imap->im_len (0x%llx)) > "
- " XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks) (0x%llx)",
- (unsigned long long) imap->im_blkno,
+ xfs_alert(mp,
+ "%s: (im_blkno (0x%llx) + im_len (0x%llx)) > sb_dblocks (0x%llx)",
+ __func__, (unsigned long long) imap->im_blkno,
(unsigned long long) imap->im_len,
XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks));
return XFS_ERROR(EINVAL);
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index be7cf62..a37480a 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -110,8 +110,8 @@ xfs_inobp_check(
dip = (xfs_dinode_t *)xfs_buf_offset(bp,
i * mp->m_sb.sb_inodesize);
if (!dip->di_next_unlinked) {
- xfs_fs_cmn_err(CE_ALERT, mp,
- "Detected a bogus zero next_unlinked field in incore inode buffer 0x%p. About to pop an ASSERT.",
+ xfs_alert(mp,
+ "Detected bogus zero next_unlinked field in incore inode buffer 0x%p.",
bp);
ASSERT(dip->di_next_unlinked);
}
@@ -142,10 +142,9 @@ xfs_imap_to_bp(
(int)imap->im_len, buf_flags, &bp);
if (error) {
if (error != EAGAIN) {
- cmn_err(CE_WARN,
- "xfs_imap_to_bp: xfs_trans_read_buf()returned "
- "an error %d on %s. Returning error.",
- error, mp->m_fsname);
+ xfs_warn(mp,
+ "%s: xfs_trans_read_buf() returned error %d.",
+ __func__, error);
} else {
ASSERT(buf_flags & XBF_TRYLOCK);
}
@@ -180,12 +179,11 @@ xfs_imap_to_bp(
XFS_CORRUPTION_ERROR("xfs_imap_to_bp",
XFS_ERRLEVEL_HIGH, mp, dip);
#ifdef DEBUG
- cmn_err(CE_PANIC,
- "Device %s - bad inode magic/vsn "
- "daddr %lld #%d (magic=%x)",
- XFS_BUFTARG_NAME(mp->m_ddev_targp),
+ xfs_emerg(mp,
+ "bad inode magic/vsn daddr %lld #%d (magic=%x)",
(unsigned long long)imap->im_blkno, i,
be16_to_cpu(dip->di_magic));
+ ASSERT(0);
#endif
xfs_trans_brelse(tp, bp);
return XFS_ERROR(EFSCORRUPTED);
@@ -317,7 +315,7 @@ xfs_iformat(
if (unlikely(be32_to_cpu(dip->di_nextents) +
be16_to_cpu(dip->di_anextents) >
be64_to_cpu(dip->di_nblocks))) {
- xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
+ xfs_warn(ip->i_mount,
"corrupt dinode %Lu, extent total = %d, nblocks = %Lu.",
(unsigned long long)ip->i_ino,
(int)(be32_to_cpu(dip->di_nextents) +
@@ -330,8 +328,7 @@ xfs_iformat(
}
if (unlikely(dip->di_forkoff > ip->i_mount->m_sb.sb_inodesize)) {
- xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
- "corrupt dinode %Lu, forkoff = 0x%x.",
+ xfs_warn(ip->i_mount, "corrupt dinode %Lu, forkoff = 0x%x.",
(unsigned long long)ip->i_ino,
dip->di_forkoff);
XFS_CORRUPTION_ERROR("xfs_iformat(2)", XFS_ERRLEVEL_LOW,
@@ -341,7 +338,7 @@ xfs_iformat(
if (unlikely((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) &&
!ip->i_mount->m_rtdev_targp)) {
- xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
+ xfs_warn(ip->i_mount,
"corrupt dinode %Lu, has realtime flag set.",
ip->i_ino);
XFS_CORRUPTION_ERROR("xfs_iformat(realtime)",
@@ -373,9 +370,8 @@ xfs_iformat(
* no local regular files yet
*/
if (unlikely((be16_to_cpu(dip->di_mode) & S_IFMT) == S_IFREG)) {
- xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
- "corrupt inode %Lu "
- "(local format for regular file).",
+ xfs_warn(ip->i_mount,
+ "corrupt inode %Lu (local format for regular file).",
(unsigned long long) ip->i_ino);
XFS_CORRUPTION_ERROR("xfs_iformat(4)",
XFS_ERRLEVEL_LOW,
@@ -385,9 +381,8 @@ xfs_iformat(
di_size = be64_to_cpu(dip->di_size);
if (unlikely(di_size > XFS_DFORK_DSIZE(dip, ip->i_mount))) {
- xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
- "corrupt inode %Lu "
- "(bad size %Ld for local inode).",
+ xfs_warn(ip->i_mount,
+ "corrupt inode %Lu (bad size %Ld for local inode).",
(unsigned long long) ip->i_ino,
(long long) di_size);
XFS_CORRUPTION_ERROR("xfs_iformat(5)",
@@ -431,9 +426,8 @@ xfs_iformat(
size = be16_to_cpu(atp->hdr.totsize);
if (unlikely(size < sizeof(struct xfs_attr_sf_hdr))) {
- xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
- "corrupt inode %Lu "
- "(bad attr fork size %Ld).",
+ xfs_warn(ip->i_mount,
+ "corrupt inode %Lu (bad attr fork size %Ld).",
(unsigned long long) ip->i_ino,
(long long) size);
XFS_CORRUPTION_ERROR("xfs_iformat(8)",
@@ -488,9 +482,8 @@ xfs_iformat_local(
* kmem_alloc() or memcpy() below.
*/
if (unlikely(size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) {
- xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
- "corrupt inode %Lu "
- "(bad size %d for local fork, size = %d).",
+ xfs_warn(ip->i_mount,
+ "corrupt inode %Lu (bad size %d for local fork, size = %d).",
(unsigned long long) ip->i_ino, size,
XFS_DFORK_SIZE(dip, ip->i_mount, whichfork));
XFS_CORRUPTION_ERROR("xfs_iformat_local", XFS_ERRLEVEL_LOW,
@@ -547,8 +540,7 @@ xfs_iformat_extents(
* kmem_alloc() or memcpy() below.
*/
if (unlikely(size < 0 || size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) {
- xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
- "corrupt inode %Lu ((a)extents = %d).",
+ xfs_warn(ip->i_mount, "corrupt inode %Lu ((a)extents = %d).",
(unsigned long long) ip->i_ino, nex);
XFS_CORRUPTION_ERROR("xfs_iformat_extents(1)", XFS_ERRLEVEL_LOW,
ip->i_mount, dip);
@@ -623,11 +615,10 @@ xfs_iformat_btree(
|| XFS_BMDR_SPACE_CALC(nrecs) >
XFS_DFORK_SIZE(dip, ip->i_mount, whichfork)
|| XFS_IFORK_NEXTENTS(ip, whichfork) > ip->i_d.di_nblocks)) {
- xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
- "corrupt inode %Lu (btree).",
+ xfs_warn(ip->i_mount, "corrupt inode %Lu (btree).",
(unsigned long long) ip->i_ino);
- XFS_ERROR_REPORT("xfs_iformat_btree", XFS_ERRLEVEL_LOW,
- ip->i_mount);
+ XFS_CORRUPTION_ERROR("xfs_iformat_btree", XFS_ERRLEVEL_LOW,
+ ip->i_mount, dip);
return XFS_ERROR(EFSCORRUPTED);
}
@@ -813,11 +804,9 @@ xfs_iread(
*/
if (be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC) {
#ifdef DEBUG
- xfs_fs_cmn_err(CE_ALERT, mp, "xfs_iread: "
- "dip->di_magic (0x%x) != "
- "XFS_DINODE_MAGIC (0x%x)",
- be16_to_cpu(dip->di_magic),
- XFS_DINODE_MAGIC);
+ xfs_alert(mp,
+ "%s: dip->di_magic (0x%x) != XFS_DINODE_MAGIC (0x%x)",
+ __func__, be16_to_cpu(dip->di_magic), XFS_DINODE_MAGIC);
#endif /* DEBUG */
error = XFS_ERROR(EINVAL);
goto out_brelse;
@@ -835,9 +824,8 @@ xfs_iread(
error = xfs_iformat(ip, dip);
if (error) {
#ifdef DEBUG
- xfs_fs_cmn_err(CE_ALERT, mp, "xfs_iread: "
- "xfs_iformat() returned error %d",
- error);
+ xfs_alert(mp, "%s: xfs_iformat() returned error %d",
+ __func__, error);
#endif /* DEBUG */
goto out_brelse;
}
@@ -1016,8 +1004,8 @@ xfs_ialloc(
* This is because we're setting fields here we need
* to prevent others from looking at until we're done.
*/
- error = xfs_trans_iget(tp->t_mountp, tp, ino,
- XFS_IGET_CREATE, XFS_ILOCK_EXCL, &ip);
+ error = xfs_iget(tp->t_mountp, tp, ino, XFS_IGET_CREATE,
+ XFS_ILOCK_EXCL, &ip);
if (error)
return error;
ASSERT(ip != NULL);
@@ -1166,6 +1154,7 @@ xfs_ialloc(
/*
* Log the new values stuffed into the inode.
*/
+ xfs_trans_ijoin_ref(tp, ip, XFS_ILOCK_EXCL);
xfs_trans_log_inode(tp, ip, flags);
/* now that we have an i_mode we can setup inode ops and unlock */
@@ -1820,9 +1809,8 @@ xfs_iunlink_remove(
*/
error = xfs_itobp(mp, tp, ip, &dip, &ibp, XBF_LOCK);
if (error) {
- cmn_err(CE_WARN,
- "xfs_iunlink_remove: xfs_itobp() returned an error %d on %s. Returning error.",
- error, mp->m_fsname);
+ xfs_warn(mp, "%s: xfs_itobp() returned error %d.",
+ __func__, error);
return error;
}
next_agino = be32_to_cpu(dip->di_next_unlinked);
@@ -1867,9 +1855,9 @@ xfs_iunlink_remove(
error = xfs_inotobp(mp, tp, next_ino, &last_dip,
&last_ibp, &last_offset, 0);
if (error) {
- cmn_err(CE_WARN,
- "xfs_iunlink_remove: xfs_inotobp() returned an error %d on %s. Returning error.",
- error, mp->m_fsname);
+ xfs_warn(mp,
+ "%s: xfs_inotobp() returned error %d.",
+ __func__, error);
return error;
}
next_agino = be32_to_cpu(last_dip->di_next_unlinked);
@@ -1882,9 +1870,8 @@ xfs_iunlink_remove(
*/
error = xfs_itobp(mp, tp, ip, &dip, &ibp, XBF_LOCK);
if (error) {
- cmn_err(CE_WARN,
- "xfs_iunlink_remove: xfs_itobp() returned an error %d on %s. Returning error.",
- error, mp->m_fsname);
+ xfs_warn(mp, "%s: xfs_itobp(2) returned error %d.",
+ __func__, error);
return error;
}
next_agino = be32_to_cpu(dip->di_next_unlinked);
@@ -2802,7 +2789,7 @@ xfs_iflush(
/*
* We can't flush the inode until it is unpinned, so wait for it if we
- * are allowed to block. We know noone new can pin it, because we are
+ * are allowed to block. We know no one new can pin it, because we are
* holding the inode lock shared and you need to hold it exclusively to
* pin the inode.
*
@@ -2848,7 +2835,7 @@ xfs_iflush(
* Get the buffer containing the on-disk inode.
*/
error = xfs_itobp(mp, NULL, ip, &dip, &bp,
- (flags & SYNC_WAIT) ? XBF_LOCK : XBF_TRYLOCK);
+ (flags & SYNC_TRYLOCK) ? XBF_TRYLOCK : XBF_LOCK);
if (error || !bp) {
xfs_ifunlock(ip);
return error;
@@ -2939,16 +2926,16 @@ xfs_iflush_int(
if (XFS_TEST_ERROR(be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC,
mp, XFS_ERRTAG_IFLUSH_1, XFS_RANDOM_IFLUSH_1)) {
- xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp,
- "xfs_iflush: Bad inode %Lu magic number 0x%x, ptr 0x%p",
- ip->i_ino, be16_to_cpu(dip->di_magic), dip);
+ xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
+ "%s: Bad inode %Lu magic number 0x%x, ptr 0x%p",
+ __func__, ip->i_ino, be16_to_cpu(dip->di_magic), dip);
goto corrupt_out;
}
if (XFS_TEST_ERROR(ip->i_d.di_magic != XFS_DINODE_MAGIC,
mp, XFS_ERRTAG_IFLUSH_2, XFS_RANDOM_IFLUSH_2)) {
- xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp,
- "xfs_iflush: Bad inode %Lu, ptr 0x%p, magic number 0x%x",
- ip->i_ino, ip, ip->i_d.di_magic);
+ xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
+ "%s: Bad inode %Lu, ptr 0x%p, magic number 0x%x",
+ __func__, ip->i_ino, ip, ip->i_d.di_magic);
goto corrupt_out;
}
if ((ip->i_d.di_mode & S_IFMT) == S_IFREG) {
@@ -2956,9 +2943,9 @@ xfs_iflush_int(
(ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
(ip->i_d.di_format != XFS_DINODE_FMT_BTREE),
mp, XFS_ERRTAG_IFLUSH_3, XFS_RANDOM_IFLUSH_3)) {
- xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp,
- "xfs_iflush: Bad regular inode %Lu, ptr 0x%p",
- ip->i_ino, ip);
+ xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
+ "%s: Bad regular inode %Lu, ptr 0x%p",
+ __func__, ip->i_ino, ip);
goto corrupt_out;
}
} else if ((ip->i_d.di_mode & S_IFMT) == S_IFDIR) {
@@ -2967,28 +2954,28 @@ xfs_iflush_int(
(ip->i_d.di_format != XFS_DINODE_FMT_BTREE) &&
(ip->i_d.di_format != XFS_DINODE_FMT_LOCAL),
mp, XFS_ERRTAG_IFLUSH_4, XFS_RANDOM_IFLUSH_4)) {
- xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp,
- "xfs_iflush: Bad directory inode %Lu, ptr 0x%p",
- ip->i_ino, ip);
+ xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
+ "%s: Bad directory inode %Lu, ptr 0x%p",
+ __func__, ip->i_ino, ip);
goto corrupt_out;
}
}
if (XFS_TEST_ERROR(ip->i_d.di_nextents + ip->i_d.di_anextents >
ip->i_d.di_nblocks, mp, XFS_ERRTAG_IFLUSH_5,
XFS_RANDOM_IFLUSH_5)) {
- xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp,
- "xfs_iflush: detected corrupt incore inode %Lu, total extents = %d, nblocks = %Ld, ptr 0x%p",
- ip->i_ino,
+ xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
+ "%s: detected corrupt incore inode %Lu, "
+ "total extents = %d, nblocks = %Ld, ptr 0x%p",
+ __func__, ip->i_ino,
ip->i_d.di_nextents + ip->i_d.di_anextents,
- ip->i_d.di_nblocks,
- ip);
+ ip->i_d.di_nblocks, ip);
goto corrupt_out;
}
if (XFS_TEST_ERROR(ip->i_d.di_forkoff > mp->m_sb.sb_inodesize,
mp, XFS_ERRTAG_IFLUSH_6, XFS_RANDOM_IFLUSH_6)) {
- xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp,
- "xfs_iflush: bad inode %Lu, forkoff 0x%x, ptr 0x%p",
- ip->i_ino, ip->i_d.di_forkoff, ip);
+ xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
+ "%s: bad inode %Lu, forkoff 0x%x, ptr 0x%p",
+ __func__, ip->i_ino, ip->i_d.di_forkoff, ip);
goto corrupt_out;
}
/*
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 5c95fa8..ff4e2a3 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -111,7 +111,7 @@ struct xfs_imap {
* Generally, we do not want to hold the i_rlock while holding the
* i_ilock. Hierarchy is i_iolock followed by i_rlock.
*
- * xfs_iptr_t contains all the inode fields upto and including the
+ * xfs_iptr_t contains all the inode fields up to and including the
* i_mnext and i_mprev fields, it is used as a marker in the inode
* chain off the mount structure by xfs_sync calls.
*/
@@ -336,7 +336,7 @@ xfs_iflags_test_and_clear(xfs_inode_t *ip, unsigned short flags)
/*
* Project quota id helpers (previously projid was 16bit only
- * and using two 16bit values to hold new 32bit projid was choosen
+ * and using two 16bit values to hold new 32bit projid was chosen
* to retain compatibility with "old" filesystems).
*/
static inline prid_t
@@ -409,28 +409,35 @@ static inline void xfs_ifunlock(xfs_inode_t *ip)
/*
* Flags for lockdep annotations.
*
- * XFS_I[O]LOCK_PARENT - for operations that require locking two inodes
- * (ie directory operations that require locking a directory inode and
- * an entry inode). The first inode gets locked with this flag so it
- * gets a lockdep subclass of 1 and the second lock will have a lockdep
- * subclass of 0.
+ * XFS_LOCK_PARENT - for directory operations that require locking a
+ * parent directory inode and a child entry inode. The parent gets locked
+ * with this flag so it gets a lockdep subclass of 1 and the child entry
+ * lock will have a lockdep subclass of 0.
+ *
+ * XFS_LOCK_RTBITMAP/XFS_LOCK_RTSUM - the realtime device bitmap and summary
+ * inodes do not participate in the normal lock order, and thus have their
+ * own subclasses.
*
* XFS_LOCK_INUMORDER - for locking several inodes at the some time
* with xfs_lock_inodes(). This flag is used as the starting subclass
* and each subsequent lock acquired will increment the subclass by one.
- * So the first lock acquired will have a lockdep subclass of 2, the
- * second lock will have a lockdep subclass of 3, and so on. It is
+ * So the first lock acquired will have a lockdep subclass of 4, the
+ * second lock will have a lockdep subclass of 5, and so on. It is
* the responsibility of the class builder to shift this to the correct
* portion of the lock_mode lockdep mask.
*/
#define XFS_LOCK_PARENT 1
-#define XFS_LOCK_INUMORDER 2
+#define XFS_LOCK_RTBITMAP 2
+#define XFS_LOCK_RTSUM 3
+#define XFS_LOCK_INUMORDER 4
#define XFS_IOLOCK_SHIFT 16
#define XFS_IOLOCK_PARENT (XFS_LOCK_PARENT << XFS_IOLOCK_SHIFT)
#define XFS_ILOCK_SHIFT 24
#define XFS_ILOCK_PARENT (XFS_LOCK_PARENT << XFS_ILOCK_SHIFT)
+#define XFS_ILOCK_RTBITMAP (XFS_LOCK_RTBITMAP << XFS_ILOCK_SHIFT)
+#define XFS_ILOCK_RTSUM (XFS_LOCK_RTSUM << XFS_ILOCK_SHIFT)
#define XFS_IOLOCK_DEP_MASK 0x00ff0000
#define XFS_ILOCK_DEP_MASK 0xff000000
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index fd4f398..576fdfe 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -198,6 +198,41 @@ xfs_inode_item_size(
}
/*
+ * xfs_inode_item_format_extents - convert in-core extents to on-disk form
+ *
+ * For either the data or attr fork in extent format, we need to endian convert
+ * the in-core extent as we place them into the on-disk inode. In this case, we
+ * need to do this conversion before we write the extents into the log. Because
+ * we don't have the disk inode to write into here, we allocate a buffer and
+ * format the extents into it via xfs_iextents_copy(). We free the buffer in
+ * the unlock routine after the copy for the log has been made.
+ *
+ * In the case of the data fork, the in-core and on-disk fork sizes can be
+ * different due to delayed allocation extents. We only log on-disk extents
+ * here, so always use the physical fork size to determine the size of the
+ * buffer we need to allocate.
+ */
+STATIC void
+xfs_inode_item_format_extents(
+ struct xfs_inode *ip,
+ struct xfs_log_iovec *vecp,
+ int whichfork,
+ int type)
+{
+ xfs_bmbt_rec_t *ext_buffer;
+
+ ext_buffer = kmem_alloc(XFS_IFORK_SIZE(ip, whichfork), KM_SLEEP);
+ if (whichfork == XFS_DATA_FORK)
+ ip->i_itemp->ili_extents_buf = ext_buffer;
+ else
+ ip->i_itemp->ili_aextents_buf = ext_buffer;
+
+ vecp->i_addr = ext_buffer;
+ vecp->i_len = xfs_iextents_copy(ip, ext_buffer, whichfork);
+ vecp->i_type = type;
+}
+
+/*
* This is called to fill in the vector of log iovecs for the
* given inode log item. It fills the first item with an inode
* log format structure, the second with the on-disk inode structure,
@@ -213,7 +248,6 @@ xfs_inode_item_format(
struct xfs_inode *ip = iip->ili_inode;
uint nvecs;
size_t data_bytes;
- xfs_bmbt_rec_t *ext_buffer;
xfs_mount_t *mp;
vecp->i_addr = &iip->ili_format;
@@ -320,22 +354,8 @@ xfs_inode_item_format(
} else
#endif
{
- /*
- * There are delayed allocation extents
- * in the inode, or we need to convert
- * the extents to on disk format.
- * Use xfs_iextents_copy()
- * to copy only the real extents into
- * a separate buffer. We'll free the
- * buffer in the unlock routine.
- */
- ext_buffer = kmem_alloc(ip->i_df.if_bytes,
- KM_SLEEP);
- iip->ili_extents_buf = ext_buffer;
- vecp->i_addr = ext_buffer;
- vecp->i_len = xfs_iextents_copy(ip, ext_buffer,
- XFS_DATA_FORK);
- vecp->i_type = XLOG_REG_TYPE_IEXT;
+ xfs_inode_item_format_extents(ip, vecp,
+ XFS_DATA_FORK, XLOG_REG_TYPE_IEXT);
}
ASSERT(vecp->i_len <= ip->i_df.if_bytes);
iip->ili_format.ilf_dsize = vecp->i_len;
@@ -445,19 +465,12 @@ xfs_inode_item_format(
*/
vecp->i_addr = ip->i_afp->if_u1.if_extents;
vecp->i_len = ip->i_afp->if_bytes;
+ vecp->i_type = XLOG_REG_TYPE_IATTR_EXT;
#else
ASSERT(iip->ili_aextents_buf == NULL);
- /*
- * Need to endian flip before logging
- */
- ext_buffer = kmem_alloc(ip->i_afp->if_bytes,
- KM_SLEEP);
- iip->ili_aextents_buf = ext_buffer;
- vecp->i_addr = ext_buffer;
- vecp->i_len = xfs_iextents_copy(ip, ext_buffer,
- XFS_ATTR_FORK);
+ xfs_inode_item_format_extents(ip, vecp,
+ XFS_ATTR_FORK, XLOG_REG_TYPE_IATTR_EXT);
#endif
- vecp->i_type = XLOG_REG_TYPE_IATTR_EXT;
iip->ili_format.ilf_asize = vecp->i_len;
vecp++;
nvecs++;
@@ -760,11 +773,11 @@ xfs_inode_item_push(
* Push the inode to it's backing buffer. This will not remove the
* inode from the AIL - a further push will be required to trigger a
* buffer push. However, this allows all the dirty inodes to be pushed
- * to the buffer before it is pushed to disk. THe buffer IO completion
- * will pull th einode from the AIL, mark it clean and unlock the flush
+ * to the buffer before it is pushed to disk. The buffer IO completion
+ * will pull the inode from the AIL, mark it clean and unlock the flush
* lock.
*/
- (void) xfs_iflush(ip, 0);
+ (void) xfs_iflush(ip, SYNC_TRYLOCK);
xfs_iunlock(ip, XFS_ILOCK_SHARED);
}
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 8a0f044..091d82b 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -101,11 +101,11 @@ xfs_iomap_eof_align_last_fsb(
}
STATIC int
-xfs_cmn_err_fsblock_zero(
+xfs_alert_fsblock_zero(
xfs_inode_t *ip,
xfs_bmbt_irec_t *imap)
{
- xfs_cmn_err(XFS_PTAG_FSBLOCK_ZERO, CE_ALERT, ip->i_mount,
+ xfs_alert_tag(ip->i_mount, XFS_PTAG_FSBLOCK_ZERO,
"Access to block zero in inode %llu "
"start_block: %llx start_off: %llx "
"blkcnt: %llx extent-state: %x\n",
@@ -246,7 +246,7 @@ xfs_iomap_write_direct(
}
if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip))) {
- error = xfs_cmn_err_fsblock_zero(ip, imap);
+ error = xfs_alert_fsblock_zero(ip, imap);
goto error_out;
}
@@ -464,7 +464,7 @@ retry:
}
if (!(imap[0].br_startblock || XFS_IS_REALTIME_INODE(ip)))
- return xfs_cmn_err_fsblock_zero(ip, &imap[0]);
+ return xfs_alert_fsblock_zero(ip, &imap[0]);
*ret_imap = imap[0];
return 0;
@@ -614,7 +614,7 @@ xfs_iomap_write_allocate(
* covers at least part of the callers request
*/
if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip)))
- return xfs_cmn_err_fsblock_zero(ip, imap);
+ return xfs_alert_fsblock_zero(ip, imap);
if ((offset_fsb >= imap->br_startoff) &&
(offset_fsb < (imap->br_startoff +
@@ -724,7 +724,7 @@ xfs_iomap_write_unwritten(
return XFS_ERROR(error);
if (!(imap.br_startblock || XFS_IS_REALTIME_INODE(ip)))
- return xfs_cmn_err_fsblock_zero(ip, &imap);
+ return xfs_alert_fsblock_zero(ip, &imap);
if ((numblks_fsb = imap.br_blockcount) == 0) {
/*
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index dc1882a..751e94f 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -204,7 +204,6 @@ xfs_bulkstat(
xfs_agi_t *agi; /* agi header data */
xfs_agino_t agino; /* inode # in allocation group */
xfs_agnumber_t agno; /* allocation group number */
- xfs_daddr_t bno; /* inode cluster start daddr */
int chunkidx; /* current index into inode chunk */
int clustidx; /* current index into inode cluster */
xfs_btree_cur_t *cur; /* btree cursor for ialloc btree */
@@ -463,7 +462,6 @@ xfs_bulkstat(
mp->m_sb.sb_inopblog);
}
ino = XFS_AGINO_TO_INO(mp, agno, agino);
- bno = XFS_AGB_TO_DADDR(mp, agno, agbno);
/*
* Skip if this inode is free.
*/
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index ae6fef1f..b612ce4 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -374,11 +374,10 @@ xfs_log_mount(
int error;
if (!(mp->m_flags & XFS_MOUNT_NORECOVERY))
- cmn_err(CE_NOTE, "XFS mounting filesystem %s", mp->m_fsname);
+ xfs_notice(mp, "Mounting Filesystem");
else {
- cmn_err(CE_NOTE,
- "Mounting filesystem \"%s\" in no-recovery mode. Filesystem will be inconsistent.",
- mp->m_fsname);
+ xfs_notice(mp,
+"Mounting filesystem in no-recovery mode. Filesystem will be inconsistent.");
ASSERT(mp->m_flags & XFS_MOUNT_RDONLY);
}
@@ -393,7 +392,7 @@ xfs_log_mount(
*/
error = xfs_trans_ail_init(mp);
if (error) {
- cmn_err(CE_WARN, "XFS: AIL initialisation failed: error %d", error);
+ xfs_warn(mp, "AIL initialisation failed: error %d", error);
goto out_free_log;
}
mp->m_log->l_ailp = mp->m_ail;
@@ -413,7 +412,8 @@ xfs_log_mount(
if (readonly)
mp->m_flags |= XFS_MOUNT_RDONLY;
if (error) {
- cmn_err(CE_WARN, "XFS: log mount/recovery failed: error %d", error);
+ xfs_warn(mp, "log mount/recovery failed: error %d",
+ error);
goto out_destroy_ail;
}
}
@@ -542,10 +542,8 @@ xfs_log_unmount_write(xfs_mount_t *mp)
*/
}
- if (error) {
- xfs_fs_cmn_err(CE_ALERT, mp,
- "xfs_log_unmount: unmount record failed");
- }
+ if (error)
+ xfs_alert(mp, "%s: unmount record failed", __func__);
spin_lock(&log->l_icloglock);
@@ -763,7 +761,7 @@ xfs_log_need_covered(xfs_mount_t *mp)
break;
case XLOG_STATE_COVER_NEED:
case XLOG_STATE_COVER_NEED2:
- if (!xfs_trans_ail_tail(log->l_ailp) &&
+ if (!xfs_ail_min_lsn(log->l_ailp) &&
xlog_iclogs_empty(log)) {
if (log->l_covered_state == XLOG_STATE_COVER_NEED)
log->l_covered_state = XLOG_STATE_COVER_DONE;
@@ -803,7 +801,7 @@ xlog_assign_tail_lsn(
xfs_lsn_t tail_lsn;
struct log *log = mp->m_log;
- tail_lsn = xfs_trans_ail_tail(mp->m_ail);
+ tail_lsn = xfs_ail_min_lsn(mp->m_ail);
if (!tail_lsn)
tail_lsn = atomic64_read(&log->l_last_sync_lsn);
@@ -852,7 +850,7 @@ xlog_space_left(
* In this case we just want to return the size of the
* log as the amount of space left.
*/
- xfs_fs_cmn_err(CE_ALERT, log->l_mp,
+ xfs_alert(log->l_mp,
"xlog_space_left: head behind tail\n"
" tail_cycle = %d, tail_bytes = %d\n"
" GH cycle = %d, GH bytes = %d",
@@ -1001,7 +999,7 @@ xlog_alloc_log(xfs_mount_t *mp,
log = kmem_zalloc(sizeof(xlog_t), KM_MAYFAIL);
if (!log) {
- xlog_warn("XFS: Log allocation failed: No memory!");
+ xfs_warn(mp, "Log allocation failed: No memory!");
goto out;
}
@@ -1029,24 +1027,24 @@ xlog_alloc_log(xfs_mount_t *mp,
if (xfs_sb_version_hassector(&mp->m_sb)) {
log2_size = mp->m_sb.sb_logsectlog;
if (log2_size < BBSHIFT) {
- xlog_warn("XFS: Log sector size too small "
- "(0x%x < 0x%x)", log2_size, BBSHIFT);
+ xfs_warn(mp, "Log sector size too small (0x%x < 0x%x)",
+ log2_size, BBSHIFT);
goto out_free_log;
}
log2_size -= BBSHIFT;
if (log2_size > mp->m_sectbb_log) {
- xlog_warn("XFS: Log sector size too large "
- "(0x%x > 0x%x)", log2_size, mp->m_sectbb_log);
+ xfs_warn(mp, "Log sector size too large (0x%x > 0x%x)",
+ log2_size, mp->m_sectbb_log);
goto out_free_log;
}
/* for larger sector sizes, must have v2 or external log */
if (log2_size && log->l_logBBstart > 0 &&
!xfs_sb_version_haslogv2(&mp->m_sb)) {
-
- xlog_warn("XFS: log sector size (0x%x) invalid "
- "for configuration.", log2_size);
+ xfs_warn(mp,
+ "log sector size (0x%x) invalid for configuration.",
+ log2_size);
goto out_free_log;
}
}
@@ -1241,7 +1239,7 @@ xlog_grant_push_ail(
* the filesystem is shutting down.
*/
if (!XLOG_FORCED_SHUTDOWN(log))
- xfs_trans_ail_push(log->l_ailp, threshold_lsn);
+ xfs_ail_push(log->l_ailp, threshold_lsn);
}
/*
@@ -1563,38 +1561,36 @@ xlog_print_tic_res(
"SWAPEXT"
};
- xfs_fs_cmn_err(CE_WARN, mp,
- "xfs_log_write: reservation summary:\n"
- " trans type = %s (%u)\n"
- " unit res = %d bytes\n"
- " current res = %d bytes\n"
- " total reg = %u bytes (o/flow = %u bytes)\n"
- " ophdrs = %u (ophdr space = %u bytes)\n"
- " ophdr + reg = %u bytes\n"
- " num regions = %u\n",
- ((ticket->t_trans_type <= 0 ||
- ticket->t_trans_type > XFS_TRANS_TYPE_MAX) ?
- "bad-trans-type" : trans_type_str[ticket->t_trans_type-1]),
- ticket->t_trans_type,
- ticket->t_unit_res,
- ticket->t_curr_res,
- ticket->t_res_arr_sum, ticket->t_res_o_flow,
- ticket->t_res_num_ophdrs, ophdr_spc,
- ticket->t_res_arr_sum +
- ticket->t_res_o_flow + ophdr_spc,
- ticket->t_res_num);
+ xfs_warn(mp,
+ "xfs_log_write: reservation summary:\n"
+ " trans type = %s (%u)\n"
+ " unit res = %d bytes\n"
+ " current res = %d bytes\n"
+ " total reg = %u bytes (o/flow = %u bytes)\n"
+ " ophdrs = %u (ophdr space = %u bytes)\n"
+ " ophdr + reg = %u bytes\n"
+ " num regions = %u\n",
+ ((ticket->t_trans_type <= 0 ||
+ ticket->t_trans_type > XFS_TRANS_TYPE_MAX) ?
+ "bad-trans-type" : trans_type_str[ticket->t_trans_type-1]),
+ ticket->t_trans_type,
+ ticket->t_unit_res,
+ ticket->t_curr_res,
+ ticket->t_res_arr_sum, ticket->t_res_o_flow,
+ ticket->t_res_num_ophdrs, ophdr_spc,
+ ticket->t_res_arr_sum +
+ ticket->t_res_o_flow + ophdr_spc,
+ ticket->t_res_num);
for (i = 0; i < ticket->t_res_num; i++) {
- uint r_type = ticket->t_res_arr[i].r_type;
- cmn_err(CE_WARN,
- "region[%u]: %s - %u bytes\n",
- i,
+ uint r_type = ticket->t_res_arr[i].r_type;
+ xfs_warn(mp, "region[%u]: %s - %u bytes\n", i,
((r_type <= 0 || r_type > XLOG_REG_TYPE_MAX) ?
"bad-rtype" : res_type_str[r_type-1]),
ticket->t_res_arr[i].r_len);
}
- xfs_cmn_err(XFS_PTAG_LOGRES, CE_ALERT, mp,
+ xfs_alert_tag(mp, XFS_PTAG_LOGRES,
"xfs_log_write: reservation ran out. Need to up reservation");
xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
}
@@ -1682,7 +1678,7 @@ xlog_write_setup_ophdr(
case XFS_LOG:
break;
default:
- xfs_fs_cmn_err(CE_WARN, log->l_mp,
+ xfs_warn(log->l_mp,
"Bad XFS transaction clientid 0x%x in ticket 0x%p",
ophdr->oh_clientid, ticket);
return NULL;
@@ -2264,7 +2260,7 @@ xlog_state_do_callback(
if (repeats > 5000) {
flushcnt += repeats;
repeats = 0;
- xfs_fs_cmn_err(CE_WARN, log->l_mp,
+ xfs_warn(log->l_mp,
"%s: possible infinite loop (%d iterations)",
__func__, flushcnt);
}
@@ -3052,10 +3048,8 @@ xfs_log_force(
int error;
error = _xfs_log_force(mp, flags, NULL);
- if (error) {
- xfs_fs_cmn_err(CE_WARN, mp, "xfs_log_force: "
- "error %d returned.", error);
- }
+ if (error)
+ xfs_warn(mp, "%s: error %d returned.", __func__, error);
}
/*
@@ -3204,10 +3198,8 @@ xfs_log_force_lsn(
int error;
error = _xfs_log_force_lsn(mp, lsn, flags, NULL);
- if (error) {
- xfs_fs_cmn_err(CE_WARN, mp, "xfs_log_force: "
- "error %d returned.", error);
- }
+ if (error)
+ xfs_warn(mp, "%s: error %d returned.", __func__, error);
}
/*
@@ -3412,9 +3404,20 @@ xlog_verify_dest_ptr(
}
if (!good_ptr)
- xlog_panic("xlog_verify_dest_ptr: invalid ptr");
+ xfs_emerg(log->l_mp, "%s: invalid ptr", __func__);
}
+/*
+ * Check to make sure the grant write head didn't just over lap the tail. If
+ * the cycles are the same, we can't be overlapping. Otherwise, make sure that
+ * the cycles differ by exactly one and check the byte count.
+ *
+ * This check is run unlocked, so can give false positives. Rather than assert
+ * on failures, use a warn-once flag and a panic tag to allow the admin to
+ * determine if they want to panic the machine when such an error occurs. For
+ * debug kernels this will have the same effect as using an assert but, unlinke
+ * an assert, it can be turned off at runtime.
+ */
STATIC void
xlog_verify_grant_tail(
struct log *log)
@@ -3422,17 +3425,22 @@ xlog_verify_grant_tail(
int tail_cycle, tail_blocks;
int cycle, space;
- /*
- * Check to make sure the grant write head didn't just over lap the
- * tail. If the cycles are the same, we can't be overlapping.
- * Otherwise, make sure that the cycles differ by exactly one and
- * check the byte count.
- */
xlog_crack_grant_head(&log->l_grant_write_head, &cycle, &space);
xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_blocks);
if (tail_cycle != cycle) {
- ASSERT(cycle - 1 == tail_cycle);
- ASSERT(space <= BBTOB(tail_blocks));
+ if (cycle - 1 != tail_cycle &&
+ !(log->l_flags & XLOG_TAIL_WARN)) {
+ xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES,
+ "%s: cycle - 1 != tail_cycle", __func__);
+ log->l_flags |= XLOG_TAIL_WARN;
+ }
+
+ if (space > BBTOB(tail_blocks) &&
+ !(log->l_flags & XLOG_TAIL_WARN)) {
+ xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES,
+ "%s: space > BBTOB(tail_blocks)", __func__);
+ log->l_flags |= XLOG_TAIL_WARN;
+ }
}
}
@@ -3448,16 +3456,16 @@ xlog_verify_tail_lsn(xlog_t *log,
blocks =
log->l_logBBsize - (log->l_prev_block - BLOCK_LSN(tail_lsn));
if (blocks < BTOBB(iclog->ic_offset)+BTOBB(log->l_iclog_hsize))
- xlog_panic("xlog_verify_tail_lsn: ran out of log space");
+ xfs_emerg(log->l_mp, "%s: ran out of log space", __func__);
} else {
ASSERT(CYCLE_LSN(tail_lsn)+1 == log->l_prev_cycle);
if (BLOCK_LSN(tail_lsn) == log->l_prev_block)
- xlog_panic("xlog_verify_tail_lsn: tail wrapped");
+ xfs_emerg(log->l_mp, "%s: tail wrapped", __func__);
blocks = BLOCK_LSN(tail_lsn) - log->l_prev_block;
if (blocks < BTOBB(iclog->ic_offset) + 1)
- xlog_panic("xlog_verify_tail_lsn: ran out of log space");
+ xfs_emerg(log->l_mp, "%s: ran out of log space", __func__);
}
} /* xlog_verify_tail_lsn */
@@ -3497,22 +3505,23 @@ xlog_verify_iclog(xlog_t *log,
icptr = log->l_iclog;
for (i=0; i < log->l_iclog_bufs; i++) {
if (icptr == NULL)
- xlog_panic("xlog_verify_iclog: invalid ptr");
+ xfs_emerg(log->l_mp, "%s: invalid ptr", __func__);
icptr = icptr->ic_next;
}
if (icptr != log->l_iclog)
- xlog_panic("xlog_verify_iclog: corrupt iclog ring");
+ xfs_emerg(log->l_mp, "%s: corrupt iclog ring", __func__);
spin_unlock(&log->l_icloglock);
/* check log magic numbers */
if (be32_to_cpu(iclog->ic_header.h_magicno) != XLOG_HEADER_MAGIC_NUM)
- xlog_panic("xlog_verify_iclog: invalid magic num");
+ xfs_emerg(log->l_mp, "%s: invalid magic num", __func__);
ptr = (xfs_caddr_t) &iclog->ic_header;
for (ptr += BBSIZE; ptr < ((xfs_caddr_t)&iclog->ic_header) + count;
ptr += BBSIZE) {
if (be32_to_cpu(*(__be32 *)ptr) == XLOG_HEADER_MAGIC_NUM)
- xlog_panic("xlog_verify_iclog: unexpected magic num");
+ xfs_emerg(log->l_mp, "%s: unexpected magic num",
+ __func__);
}
/* check fields */
@@ -3542,9 +3551,10 @@ xlog_verify_iclog(xlog_t *log,
}
}
if (clientid != XFS_TRANSACTION && clientid != XFS_LOG)
- cmn_err(CE_WARN, "xlog_verify_iclog: "
- "invalid clientid %d op 0x%p offset 0x%lx",
- clientid, ophead, (unsigned long)field_offset);
+ xfs_warn(log->l_mp,
+ "%s: invalid clientid %d op 0x%p offset 0x%lx",
+ __func__, clientid, ophead,
+ (unsigned long)field_offset);
/* check length */
field_offset = (__psint_t)
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h
index d5f8be8..5864850 100644
--- a/fs/xfs/xfs_log_priv.h
+++ b/fs/xfs/xfs_log_priv.h
@@ -87,10 +87,6 @@ static inline uint xlog_get_client_id(__be32 i)
return be32_to_cpu(i) >> 24;
}
-#define xlog_panic(args...) cmn_err(CE_PANIC, ## args)
-#define xlog_exit(args...) cmn_err(CE_PANIC, ## args)
-#define xlog_warn(args...) cmn_err(CE_WARN, ## args)
-
/*
* In core log state
*/
@@ -148,6 +144,7 @@ static inline uint xlog_get_client_id(__be32 i)
#define XLOG_RECOVERY_NEEDED 0x4 /* log was recovered */
#define XLOG_IO_ERROR 0x8 /* log hit an I/O error, and being
shutdown */
+#define XLOG_TAIL_WARN 0x10 /* log tail verify warning issued */
#ifdef __KERNEL__
/*
@@ -574,7 +571,7 @@ int xlog_write(struct log *log, struct xfs_log_vec *log_vector,
* When we crack an atomic LSN, we sample it first so that the value will not
* change while we are cracking it into the component values. This means we
* will always get consistent component values to work from. This should always
- * be used to smaple and crack LSNs taht are stored and updated in atomic
+ * be used to sample and crack LSNs that are stored and updated in atomic
* variables.
*/
static inline void
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index aa0ebb7..5cc464a 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -92,7 +92,7 @@ xlog_get_bp(
int nbblks)
{
if (!xlog_buf_bbcount_valid(log, nbblks)) {
- xlog_warn("XFS: Invalid block length (0x%x) given for buffer",
+ xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
nbblks);
XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
return NULL;
@@ -101,7 +101,7 @@ xlog_get_bp(
/*
* We do log I/O in units of log sectors (a power-of-2
* multiple of the basic block size), so we round up the
- * requested size to acommodate the basic blocks required
+ * requested size to accommodate the basic blocks required
* for complete log sectors.
*
* In addition, the buffer may be used for a non-sector-
@@ -112,7 +112,7 @@ xlog_get_bp(
* an issue. Nor will this be a problem if the log I/O is
* done in basic blocks (sector size 1). But otherwise we
* extend the buffer by one extra log sector to ensure
- * there's space to accomodate this possiblility.
+ * there's space to accommodate this possibility.
*/
if (nbblks > 1 && log->l_sectBBsize > 1)
nbblks += log->l_sectBBsize;
@@ -160,7 +160,7 @@ xlog_bread_noalign(
int error;
if (!xlog_buf_bbcount_valid(log, nbblks)) {
- xlog_warn("XFS: Invalid block length (0x%x) given for buffer",
+ xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
nbblks);
XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
return EFSCORRUPTED;
@@ -219,7 +219,7 @@ xlog_bwrite(
int error;
if (!xlog_buf_bbcount_valid(log, nbblks)) {
- xlog_warn("XFS: Invalid block length (0x%x) given for buffer",
+ xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
nbblks);
XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
return EFSCORRUPTED;
@@ -254,9 +254,9 @@ xlog_header_check_dump(
xfs_mount_t *mp,
xlog_rec_header_t *head)
{
- cmn_err(CE_DEBUG, "%s: SB : uuid = %pU, fmt = %d\n",
+ xfs_debug(mp, "%s: SB : uuid = %pU, fmt = %d\n",
__func__, &mp->m_sb.sb_uuid, XLOG_FMT);
- cmn_err(CE_DEBUG, " log : uuid = %pU, fmt = %d\n",
+ xfs_debug(mp, " log : uuid = %pU, fmt = %d\n",
&head->h_fs_uuid, be32_to_cpu(head->h_fmt));
}
#else
@@ -279,15 +279,15 @@ xlog_header_check_recover(
* a dirty log created in IRIX.
*/
if (unlikely(be32_to_cpu(head->h_fmt) != XLOG_FMT)) {
- xlog_warn(
- "XFS: dirty log written in incompatible format - can't recover");
+ xfs_warn(mp,
+ "dirty log written in incompatible format - can't recover");
xlog_header_check_dump(mp, head);
XFS_ERROR_REPORT("xlog_header_check_recover(1)",
XFS_ERRLEVEL_HIGH, mp);
return XFS_ERROR(EFSCORRUPTED);
} else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
- xlog_warn(
- "XFS: dirty log entry has mismatched uuid - can't recover");
+ xfs_warn(mp,
+ "dirty log entry has mismatched uuid - can't recover");
xlog_header_check_dump(mp, head);
XFS_ERROR_REPORT("xlog_header_check_recover(2)",
XFS_ERRLEVEL_HIGH, mp);
@@ -312,9 +312,9 @@ xlog_header_check_mount(
* h_fs_uuid is nil, we assume this log was last mounted
* by IRIX and continue.
*/
- xlog_warn("XFS: nil uuid in log - IRIX style log");
+ xfs_warn(mp, "nil uuid in log - IRIX style log");
} else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
- xlog_warn("XFS: log has mismatched uuid - can't recover");
+ xfs_warn(mp, "log has mismatched uuid - can't recover");
xlog_header_check_dump(mp, head);
XFS_ERROR_REPORT("xlog_header_check_mount",
XFS_ERRLEVEL_HIGH, mp);
@@ -490,8 +490,8 @@ xlog_find_verify_log_record(
for (i = (*last_blk) - 1; i >= 0; i--) {
if (i < start_blk) {
/* valid log record not found */
- xlog_warn(
- "XFS: Log inconsistent (didn't find previous header)");
+ xfs_warn(log->l_mp,
+ "Log inconsistent (didn't find previous header)");
ASSERT(0);
error = XFS_ERROR(EIO);
goto out;
@@ -591,12 +591,12 @@ xlog_find_head(
* mkfs etc write a dummy unmount record to a fresh
* log so we can store the uuid in there
*/
- xlog_warn("XFS: totally zeroed log");
+ xfs_warn(log->l_mp, "totally zeroed log");
}
return 0;
} else if (error) {
- xlog_warn("XFS: empty log check failed");
+ xfs_warn(log->l_mp, "empty log check failed");
return error;
}
@@ -819,7 +819,7 @@ validate_head:
xlog_put_bp(bp);
if (error)
- xlog_warn("XFS: failed to find log head");
+ xfs_warn(log->l_mp, "failed to find log head");
return error;
}
@@ -912,7 +912,7 @@ xlog_find_tail(
}
}
if (!found) {
- xlog_warn("XFS: xlog_find_tail: couldn't find sync record");
+ xfs_warn(log->l_mp, "%s: couldn't find sync record", __func__);
ASSERT(0);
return XFS_ERROR(EIO);
}
@@ -1028,7 +1028,7 @@ done:
xlog_put_bp(bp);
if (error)
- xlog_warn("XFS: failed to locate log tail");
+ xfs_warn(log->l_mp, "failed to locate log tail");
return error;
}
@@ -1092,7 +1092,8 @@ xlog_find_zeroed(
* the first block must be 1. If it's not, maybe we're
* not looking at a log... Bail out.
*/
- xlog_warn("XFS: Log inconsistent or not a log (last==0, first!=1)");
+ xfs_warn(log->l_mp,
+ "Log inconsistent or not a log (last==0, first!=1)");
return XFS_ERROR(EINVAL);
}
@@ -1506,8 +1507,8 @@ xlog_recover_add_to_trans(
if (list_empty(&trans->r_itemq)) {
/* we need to catch log corruptions here */
if (*(uint *)dp != XFS_TRANS_HEADER_MAGIC) {
- xlog_warn("XFS: xlog_recover_add_to_trans: "
- "bad header magic number");
+ xfs_warn(log->l_mp, "%s: bad header magic number",
+ __func__);
ASSERT(0);
return XFS_ERROR(EIO);
}
@@ -1534,8 +1535,8 @@ xlog_recover_add_to_trans(
if (item->ri_total == 0) { /* first region to be added */
if (in_f->ilf_size == 0 ||
in_f->ilf_size > XLOG_MAX_REGIONS_IN_ITEM) {
- xlog_warn(
- "XFS: bad number of regions (%d) in inode log format",
+ xfs_warn(log->l_mp,
+ "bad number of regions (%d) in inode log format",
in_f->ilf_size);
ASSERT(0);
return XFS_ERROR(EIO);
@@ -1592,8 +1593,9 @@ xlog_recover_reorder_trans(
list_move_tail(&item->ri_list, &trans->r_itemq);
break;
default:
- xlog_warn(
- "XFS: xlog_recover_reorder_trans: unrecognized type of log operation");
+ xfs_warn(log->l_mp,
+ "%s: unrecognized type of log operation",
+ __func__);
ASSERT(0);
return XFS_ERROR(EIO);
}
@@ -1803,8 +1805,9 @@ xlog_recover_do_inode_buffer(
logged_nextp = item->ri_buf[item_index].i_addr +
next_unlinked_offset - reg_buf_offset;
if (unlikely(*logged_nextp == 0)) {
- xfs_fs_cmn_err(CE_ALERT, mp,
- "bad inode buffer log record (ptr = 0x%p, bp = 0x%p). XFS trying to replay bad (0) inode di_next_unlinked field",
+ xfs_alert(mp,
+ "Bad inode buffer log record (ptr = 0x%p, bp = 0x%p). "
+ "Trying to replay bad (0) inode di_next_unlinked field.",
item, bp);
XFS_ERROR_REPORT("xlog_recover_do_inode_buf",
XFS_ERRLEVEL_LOW, mp);
@@ -1863,17 +1866,17 @@ xlog_recover_do_reg_buffer(
if (buf_f->blf_flags &
(XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
if (item->ri_buf[i].i_addr == NULL) {
- cmn_err(CE_ALERT,
+ xfs_alert(mp,
"XFS: NULL dquot in %s.", __func__);
goto next;
}
if (item->ri_buf[i].i_len < sizeof(xfs_disk_dquot_t)) {
- cmn_err(CE_ALERT,
+ xfs_alert(mp,
"XFS: dquot too small (%d) in %s.",
item->ri_buf[i].i_len, __func__);
goto next;
}
- error = xfs_qm_dqcheck(item->ri_buf[i].i_addr,
+ error = xfs_qm_dqcheck(mp, item->ri_buf[i].i_addr,
-1, 0, XFS_QMOPT_DOWARN,
"dquot_buf_recover");
if (error)
@@ -1898,6 +1901,7 @@ xlog_recover_do_reg_buffer(
*/
int
xfs_qm_dqcheck(
+ struct xfs_mount *mp,
xfs_disk_dquot_t *ddq,
xfs_dqid_t id,
uint type, /* used only when IO_dorepair is true */
@@ -1924,14 +1928,14 @@ xfs_qm_dqcheck(
*/
if (be16_to_cpu(ddq->d_magic) != XFS_DQUOT_MAGIC) {
if (flags & XFS_QMOPT_DOWARN)
- cmn_err(CE_ALERT,
+ xfs_alert(mp,
"%s : XFS dquot ID 0x%x, magic 0x%x != 0x%x",
str, id, be16_to_cpu(ddq->d_magic), XFS_DQUOT_MAGIC);
errs++;
}
if (ddq->d_version != XFS_DQUOT_VERSION) {
if (flags & XFS_QMOPT_DOWARN)
- cmn_err(CE_ALERT,
+ xfs_alert(mp,
"%s : XFS dquot ID 0x%x, version 0x%x != 0x%x",
str, id, ddq->d_version, XFS_DQUOT_VERSION);
errs++;
@@ -1941,7 +1945,7 @@ xfs_qm_dqcheck(
ddq->d_flags != XFS_DQ_PROJ &&
ddq->d_flags != XFS_DQ_GROUP) {
if (flags & XFS_QMOPT_DOWARN)
- cmn_err(CE_ALERT,
+ xfs_alert(mp,
"%s : XFS dquot ID 0x%x, unknown flags 0x%x",
str, id, ddq->d_flags);
errs++;
@@ -1949,7 +1953,7 @@ xfs_qm_dqcheck(
if (id != -1 && id != be32_to_cpu(ddq->d_id)) {
if (flags & XFS_QMOPT_DOWARN)
- cmn_err(CE_ALERT,
+ xfs_alert(mp,
"%s : ondisk-dquot 0x%p, ID mismatch: "
"0x%x expected, found id 0x%x",
str, ddq, id, be32_to_cpu(ddq->d_id));
@@ -1962,9 +1966,8 @@ xfs_qm_dqcheck(
be64_to_cpu(ddq->d_blk_softlimit)) {
if (!ddq->d_btimer) {
if (flags & XFS_QMOPT_DOWARN)
- cmn_err(CE_ALERT,
- "%s : Dquot ID 0x%x (0x%p) "
- "BLK TIMER NOT STARTED",
+ xfs_alert(mp,
+ "%s : Dquot ID 0x%x (0x%p) BLK TIMER NOT STARTED",
str, (int)be32_to_cpu(ddq->d_id), ddq);
errs++;
}
@@ -1974,9 +1977,8 @@ xfs_qm_dqcheck(
be64_to_cpu(ddq->d_ino_softlimit)) {
if (!ddq->d_itimer) {
if (flags & XFS_QMOPT_DOWARN)
- cmn_err(CE_ALERT,
- "%s : Dquot ID 0x%x (0x%p) "
- "INODE TIMER NOT STARTED",
+ xfs_alert(mp,
+ "%s : Dquot ID 0x%x (0x%p) INODE TIMER NOT STARTED",
str, (int)be32_to_cpu(ddq->d_id), ddq);
errs++;
}
@@ -1986,9 +1988,8 @@ xfs_qm_dqcheck(
be64_to_cpu(ddq->d_rtb_softlimit)) {
if (!ddq->d_rtbtimer) {
if (flags & XFS_QMOPT_DOWARN)
- cmn_err(CE_ALERT,
- "%s : Dquot ID 0x%x (0x%p) "
- "RTBLK TIMER NOT STARTED",
+ xfs_alert(mp,
+ "%s : Dquot ID 0x%x (0x%p) RTBLK TIMER NOT STARTED",
str, (int)be32_to_cpu(ddq->d_id), ddq);
errs++;
}
@@ -1999,7 +2000,7 @@ xfs_qm_dqcheck(
return errs;
if (flags & XFS_QMOPT_DOWARN)
- cmn_err(CE_NOTE, "Re-initializing dquot ID 0x%x", id);
+ xfs_notice(mp, "Re-initializing dquot ID 0x%x", id);
/*
* Typically, a repair is only requested by quotacheck.
@@ -2218,9 +2219,9 @@ xlog_recover_inode_pass2(
*/
if (unlikely(be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC)) {
xfs_buf_relse(bp);
- xfs_fs_cmn_err(CE_ALERT, mp,
- "xfs_inode_recover: Bad inode magic number, dino ptr = 0x%p, dino bp = 0x%p, ino = %Ld",
- dip, bp, in_f->ilf_ino);
+ xfs_alert(mp,
+ "%s: Bad inode magic number, dip = 0x%p, dino bp = 0x%p, ino = %Ld",
+ __func__, dip, bp, in_f->ilf_ino);
XFS_ERROR_REPORT("xlog_recover_inode_pass2(1)",
XFS_ERRLEVEL_LOW, mp);
error = EFSCORRUPTED;
@@ -2229,9 +2230,9 @@ xlog_recover_inode_pass2(
dicp = item->ri_buf[1].i_addr;
if (unlikely(dicp->di_magic != XFS_DINODE_MAGIC)) {
xfs_buf_relse(bp);
- xfs_fs_cmn_err(CE_ALERT, mp,
- "xfs_inode_recover: Bad inode log record, rec ptr 0x%p, ino %Ld",
- item, in_f->ilf_ino);
+ xfs_alert(mp,
+ "%s: Bad inode log record, rec ptr 0x%p, ino %Ld",
+ __func__, item, in_f->ilf_ino);
XFS_ERROR_REPORT("xlog_recover_inode_pass2(2)",
XFS_ERRLEVEL_LOW, mp);
error = EFSCORRUPTED;
@@ -2263,9 +2264,10 @@ xlog_recover_inode_pass2(
XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(3)",
XFS_ERRLEVEL_LOW, mp, dicp);
xfs_buf_relse(bp);
- xfs_fs_cmn_err(CE_ALERT, mp,
- "xfs_inode_recover: Bad regular inode log record, rec ptr 0x%p, ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
- item, dip, bp, in_f->ilf_ino);
+ xfs_alert(mp,
+ "%s: Bad regular inode log record, rec ptr 0x%p, "
+ "ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
+ __func__, item, dip, bp, in_f->ilf_ino);
error = EFSCORRUPTED;
goto error;
}
@@ -2276,9 +2278,10 @@ xlog_recover_inode_pass2(
XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(4)",
XFS_ERRLEVEL_LOW, mp, dicp);
xfs_buf_relse(bp);
- xfs_fs_cmn_err(CE_ALERT, mp,
- "xfs_inode_recover: Bad dir inode log record, rec ptr 0x%p, ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
- item, dip, bp, in_f->ilf_ino);
+ xfs_alert(mp,
+ "%s: Bad dir inode log record, rec ptr 0x%p, "
+ "ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
+ __func__, item, dip, bp, in_f->ilf_ino);
error = EFSCORRUPTED;
goto error;
}
@@ -2287,9 +2290,10 @@ xlog_recover_inode_pass2(
XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(5)",
XFS_ERRLEVEL_LOW, mp, dicp);
xfs_buf_relse(bp);
- xfs_fs_cmn_err(CE_ALERT, mp,
- "xfs_inode_recover: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, dino bp 0x%p, ino %Ld, total extents = %d, nblocks = %Ld",
- item, dip, bp, in_f->ilf_ino,
+ xfs_alert(mp,
+ "%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, "
+ "dino bp 0x%p, ino %Ld, total extents = %d, nblocks = %Ld",
+ __func__, item, dip, bp, in_f->ilf_ino,
dicp->di_nextents + dicp->di_anextents,
dicp->di_nblocks);
error = EFSCORRUPTED;
@@ -2299,8 +2303,9 @@ xlog_recover_inode_pass2(
XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(6)",
XFS_ERRLEVEL_LOW, mp, dicp);
xfs_buf_relse(bp);
- xfs_fs_cmn_err(CE_ALERT, mp,
- "xfs_inode_recover: Bad inode log rec ptr 0x%p, dino ptr 0x%p, dino bp 0x%p, ino %Ld, forkoff 0x%x",
+ xfs_alert(mp,
+ "%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, "
+ "dino bp 0x%p, ino %Ld, forkoff 0x%x", __func__,
item, dip, bp, in_f->ilf_ino, dicp->di_forkoff);
error = EFSCORRUPTED;
goto error;
@@ -2309,9 +2314,9 @@ xlog_recover_inode_pass2(
XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(7)",
XFS_ERRLEVEL_LOW, mp, dicp);
xfs_buf_relse(bp);
- xfs_fs_cmn_err(CE_ALERT, mp,
- "xfs_inode_recover: Bad inode log record length %d, rec ptr 0x%p",
- item->ri_buf[1].i_len, item);
+ xfs_alert(mp,
+ "%s: Bad inode log record length %d, rec ptr 0x%p",
+ __func__, item->ri_buf[1].i_len, item);
error = EFSCORRUPTED;
goto error;
}
@@ -2398,7 +2403,7 @@ xlog_recover_inode_pass2(
break;
default:
- xlog_warn("XFS: xlog_recover_inode_pass2: Invalid flag");
+ xfs_warn(log->l_mp, "%s: Invalid flag", __func__);
ASSERT(0);
xfs_buf_relse(bp);
error = EIO;
@@ -2467,13 +2472,11 @@ xlog_recover_dquot_pass2(
recddq = item->ri_buf[1].i_addr;
if (recddq == NULL) {
- cmn_err(CE_ALERT,
- "XFS: NULL dquot in %s.", __func__);
+ xfs_alert(log->l_mp, "NULL dquot in %s.", __func__);
return XFS_ERROR(EIO);
}
if (item->ri_buf[1].i_len < sizeof(xfs_disk_dquot_t)) {
- cmn_err(CE_ALERT,
- "XFS: dquot too small (%d) in %s.",
+ xfs_alert(log->l_mp, "dquot too small (%d) in %s.",
item->ri_buf[1].i_len, __func__);
return XFS_ERROR(EIO);
}
@@ -2498,12 +2501,10 @@ xlog_recover_dquot_pass2(
*/
dq_f = item->ri_buf[0].i_addr;
ASSERT(dq_f);
- if ((error = xfs_qm_dqcheck(recddq,
- dq_f->qlf_id,
- 0, XFS_QMOPT_DOWARN,
- "xlog_recover_dquot_pass2 (log copy)"))) {
+ error = xfs_qm_dqcheck(mp, recddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN,
+ "xlog_recover_dquot_pass2 (log copy)");
+ if (error)
return XFS_ERROR(EIO);
- }
ASSERT(dq_f->qlf_len == 1);
error = xfs_read_buf(mp, mp->m_ddev_targp,
@@ -2523,8 +2524,9 @@ xlog_recover_dquot_pass2(
* was among a chunk of dquots created earlier, and we did some
* minimal initialization then.
*/
- if (xfs_qm_dqcheck(ddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN,
- "xlog_recover_dquot_pass2")) {
+ error = xfs_qm_dqcheck(mp, ddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN,
+ "xlog_recover_dquot_pass2");
+ if (error) {
xfs_buf_relse(bp);
return XFS_ERROR(EIO);
}
@@ -2676,9 +2678,8 @@ xlog_recover_commit_pass1(
/* nothing to do in pass 1 */
return 0;
default:
- xlog_warn(
- "XFS: invalid item type (%d) xlog_recover_commit_pass1",
- ITEM_TYPE(item));
+ xfs_warn(log->l_mp, "%s: invalid item type (%d)",
+ __func__, ITEM_TYPE(item));
ASSERT(0);
return XFS_ERROR(EIO);
}
@@ -2707,9 +2708,8 @@ xlog_recover_commit_pass2(
/* nothing to do in pass2 */
return 0;
default:
- xlog_warn(
- "XFS: invalid item type (%d) xlog_recover_commit_pass2",
- ITEM_TYPE(item));
+ xfs_warn(log->l_mp, "%s: invalid item type (%d)",
+ __func__, ITEM_TYPE(item));
ASSERT(0);
return XFS_ERROR(EIO);
}
@@ -2751,10 +2751,11 @@ xlog_recover_commit_trans(
STATIC int
xlog_recover_unmount_trans(
+ struct log *log,
xlog_recover_t *trans)
{
/* Do nothing now */
- xlog_warn("XFS: xlog_recover_unmount_trans: Unmount LR");
+ xfs_warn(log->l_mp, "%s: Unmount LR", __func__);
return 0;
}
@@ -2797,8 +2798,8 @@ xlog_recover_process_data(
dp += sizeof(xlog_op_header_t);
if (ohead->oh_clientid != XFS_TRANSACTION &&
ohead->oh_clientid != XFS_LOG) {
- xlog_warn(
- "XFS: xlog_recover_process_data: bad clientid");
+ xfs_warn(log->l_mp, "%s: bad clientid 0x%x",
+ __func__, ohead->oh_clientid);
ASSERT(0);
return (XFS_ERROR(EIO));
}
@@ -2811,8 +2812,8 @@ xlog_recover_process_data(
be64_to_cpu(rhead->h_lsn));
} else {
if (dp + be32_to_cpu(ohead->oh_len) > lp) {
- xlog_warn(
- "XFS: xlog_recover_process_data: bad length");
+ xfs_warn(log->l_mp, "%s: bad length 0x%x",
+ __func__, be32_to_cpu(ohead->oh_len));
WARN_ON(1);
return (XFS_ERROR(EIO));
}
@@ -2825,7 +2826,7 @@ xlog_recover_process_data(
trans, pass);
break;
case XLOG_UNMOUNT_TRANS:
- error = xlog_recover_unmount_trans(trans);
+ error = xlog_recover_unmount_trans(log, trans);
break;
case XLOG_WAS_CONT_TRANS:
error = xlog_recover_add_to_cont_trans(log,
@@ -2833,8 +2834,8 @@ xlog_recover_process_data(
be32_to_cpu(ohead->oh_len));
break;
case XLOG_START_TRANS:
- xlog_warn(
- "XFS: xlog_recover_process_data: bad transaction");
+ xfs_warn(log->l_mp, "%s: bad transaction",
+ __func__);
ASSERT(0);
error = XFS_ERROR(EIO);
break;
@@ -2844,8 +2845,8 @@ xlog_recover_process_data(
dp, be32_to_cpu(ohead->oh_len));
break;
default:
- xlog_warn(
- "XFS: xlog_recover_process_data: bad flag");
+ xfs_warn(log->l_mp, "%s: bad flag 0x%x",
+ __func__, flags);
ASSERT(0);
error = XFS_ERROR(EIO);
break;
@@ -3030,8 +3031,7 @@ xlog_recover_clear_agi_bucket(
out_abort:
xfs_trans_cancel(tp, XFS_TRANS_ABORT);
out_error:
- xfs_fs_cmn_err(CE_WARN, mp, "xlog_recover_clear_agi_bucket: "
- "failed to clear agi %d. Continuing.", agno);
+ xfs_warn(mp, "%s: failed to clear agi %d. Continuing.", __func__, agno);
return;
}
@@ -3282,7 +3282,7 @@ xlog_valid_rec_header(
if (unlikely(
(!rhead->h_version ||
(be32_to_cpu(rhead->h_version) & (~XLOG_VERSION_OKBITS))))) {
- xlog_warn("XFS: %s: unrecognised log version (%d).",
+ xfs_warn(log->l_mp, "%s: unrecognised log version (%d).",
__func__, be32_to_cpu(rhead->h_version));
return XFS_ERROR(EIO);
}
@@ -3740,10 +3740,9 @@ xlog_recover(
return error;
}
- cmn_err(CE_NOTE,
- "Starting XFS recovery on filesystem: %s (logdev: %s)",
- log->l_mp->m_fsname, log->l_mp->m_logname ?
- log->l_mp->m_logname : "internal");
+ xfs_notice(log->l_mp, "Starting recovery (logdev: %s)",
+ log->l_mp->m_logname ? log->l_mp->m_logname
+ : "internal");
error = xlog_do_recover(log, head_blk, tail_blk);
log->l_flags |= XLOG_RECOVERY_NEEDED;
@@ -3776,9 +3775,7 @@ xlog_recover_finish(
int error;
error = xlog_recover_process_efis(log);
if (error) {
- cmn_err(CE_ALERT,
- "Failed to recover EFIs on filesystem: %s",
- log->l_mp->m_fsname);
+ xfs_alert(log->l_mp, "Failed to recover EFIs");
return error;
}
/*
@@ -3793,15 +3790,12 @@ xlog_recover_finish(
xlog_recover_check_summary(log);
- cmn_err(CE_NOTE,
- "Ending XFS recovery on filesystem: %s (logdev: %s)",
- log->l_mp->m_fsname, log->l_mp->m_logname ?
- log->l_mp->m_logname : "internal");
+ xfs_notice(log->l_mp, "Ending recovery (logdev: %s)",
+ log->l_mp->m_logname ? log->l_mp->m_logname
+ : "internal");
log->l_flags &= ~XLOG_RECOVERY_NEEDED;
} else {
- cmn_err(CE_DEBUG,
- "Ending clean XFS mount for filesystem: %s\n",
- log->l_mp->m_fsname);
+ xfs_info(log->l_mp, "Ending clean mount");
}
return 0;
}
@@ -3834,10 +3828,8 @@ xlog_recover_check_summary(
for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
error = xfs_read_agf(mp, NULL, agno, 0, &agfbp);
if (error) {
- xfs_fs_cmn_err(CE_ALERT, mp,
- "xlog_recover_check_summary(agf)"
- "agf read failed agno %d error %d",
- agno, error);
+ xfs_alert(mp, "%s agf read failed agno %d error %d",
+ __func__, agno, error);
} else {
agfp = XFS_BUF_TO_AGF(agfbp);
freeblks += be32_to_cpu(agfp->agf_freeblks) +
@@ -3846,7 +3838,10 @@ xlog_recover_check_summary(
}
error = xfs_read_agi(mp, NULL, agno, &agibp);
- if (!error) {
+ if (error) {
+ xfs_alert(mp, "%s agi read failed agno %d error %d",
+ __func__, agno, error);
+ } else {
struct xfs_agi *agi = XFS_BUF_TO_AGI(agibp);
itotal += be32_to_cpu(agi->agi_count);
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index d447aef..bb3f9a7 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -133,9 +133,7 @@ xfs_uuid_mount(
return 0;
if (uuid_is_nil(uuid)) {
- cmn_err(CE_WARN,
- "XFS: Filesystem %s has nil UUID - can't mount",
- mp->m_fsname);
+ xfs_warn(mp, "Filesystem has nil UUID - can't mount");
return XFS_ERROR(EINVAL);
}
@@ -163,8 +161,7 @@ xfs_uuid_mount(
out_duplicate:
mutex_unlock(&xfs_uuid_table_mutex);
- cmn_err(CE_WARN, "XFS: Filesystem %s has duplicate UUID - can't mount",
- mp->m_fsname);
+ xfs_warn(mp, "Filesystem has duplicate UUID - can't mount");
return XFS_ERROR(EINVAL);
}
@@ -311,6 +308,8 @@ xfs_mount_validate_sb(
xfs_sb_t *sbp,
int flags)
{
+ int loud = !(flags & XFS_MFSI_QUIET);
+
/*
* If the log device and data device have the
* same device number, the log is internal.
@@ -319,28 +318,32 @@ xfs_mount_validate_sb(
* a volume filesystem in a non-volume manner.
*/
if (sbp->sb_magicnum != XFS_SB_MAGIC) {
- xfs_fs_mount_cmn_err(flags, "bad magic number");
+ if (loud)
+ xfs_warn(mp, "bad magic number");
return XFS_ERROR(EWRONGFS);
}
if (!xfs_sb_good_version(sbp)) {
- xfs_fs_mount_cmn_err(flags, "bad version");
+ if (loud)
+ xfs_warn(mp, "bad version");
return XFS_ERROR(EWRONGFS);
}
if (unlikely(
sbp->sb_logstart == 0 && mp->m_logdev_targp == mp->m_ddev_targp)) {
- xfs_fs_mount_cmn_err(flags,
- "filesystem is marked as having an external log; "
- "specify logdev on the\nmount command line.");
+ if (loud)
+ xfs_warn(mp,
+ "filesystem is marked as having an external log; "
+ "specify logdev on the mount command line.");
return XFS_ERROR(EINVAL);
}
if (unlikely(
sbp->sb_logstart != 0 && mp->m_logdev_targp != mp->m_ddev_targp)) {
- xfs_fs_mount_cmn_err(flags,
- "filesystem is marked as having an internal log; "
- "do not specify logdev on\nthe mount command line.");
+ if (loud)
+ xfs_warn(mp,
+ "filesystem is marked as having an internal log; "
+ "do not specify logdev on the mount command line.");
return XFS_ERROR(EINVAL);
}
@@ -369,7 +372,8 @@ xfs_mount_validate_sb(
(sbp->sb_rextsize * sbp->sb_blocksize > XFS_MAX_RTEXTSIZE) ||
(sbp->sb_rextsize * sbp->sb_blocksize < XFS_MIN_RTEXTSIZE) ||
(sbp->sb_imax_pct > 100 /* zero sb_imax_pct is valid */))) {
- xfs_fs_mount_cmn_err(flags, "SB sanity check 1 failed");
+ if (loud)
+ xfs_warn(mp, "SB sanity check 1 failed");
return XFS_ERROR(EFSCORRUPTED);
}
@@ -382,7 +386,8 @@ xfs_mount_validate_sb(
(xfs_drfsbno_t)sbp->sb_agcount * sbp->sb_agblocks ||
sbp->sb_dblocks < (xfs_drfsbno_t)(sbp->sb_agcount - 1) *
sbp->sb_agblocks + XFS_MIN_AG_BLOCKS)) {
- xfs_fs_mount_cmn_err(flags, "SB sanity check 2 failed");
+ if (loud)
+ xfs_warn(mp, "SB sanity check 2 failed");
return XFS_ERROR(EFSCORRUPTED);
}
@@ -390,12 +395,12 @@ xfs_mount_validate_sb(
* Until this is fixed only page-sized or smaller data blocks work.
*/
if (unlikely(sbp->sb_blocksize > PAGE_SIZE)) {
- xfs_fs_mount_cmn_err(flags,
- "file system with blocksize %d bytes",
- sbp->sb_blocksize);
- xfs_fs_mount_cmn_err(flags,
- "only pagesize (%ld) or less will currently work.",
- PAGE_SIZE);
+ if (loud) {
+ xfs_warn(mp,
+ "File system with blocksize %d bytes. "
+ "Only pagesize (%ld) or less will currently work.",
+ sbp->sb_blocksize, PAGE_SIZE);
+ }
return XFS_ERROR(ENOSYS);
}
@@ -409,21 +414,23 @@ xfs_mount_validate_sb(
case 2048:
break;
default:
- xfs_fs_mount_cmn_err(flags,
- "inode size of %d bytes not supported",
- sbp->sb_inodesize);
+ if (loud)
+ xfs_warn(mp, "inode size of %d bytes not supported",
+ sbp->sb_inodesize);
return XFS_ERROR(ENOSYS);
}
if (xfs_sb_validate_fsb_count(sbp, sbp->sb_dblocks) ||
xfs_sb_validate_fsb_count(sbp, sbp->sb_rblocks)) {
- xfs_fs_mount_cmn_err(flags,
- "file system too large to be mounted on this system.");
+ if (loud)
+ xfs_warn(mp,
+ "file system too large to be mounted on this system.");
return XFS_ERROR(EFBIG);
}
if (unlikely(sbp->sb_inprogress)) {
- xfs_fs_mount_cmn_err(flags, "file system busy");
+ if (loud)
+ xfs_warn(mp, "file system busy");
return XFS_ERROR(EFSCORRUPTED);
}
@@ -431,8 +438,9 @@ xfs_mount_validate_sb(
* Version 1 directory format has never worked on Linux.
*/
if (unlikely(!xfs_sb_version_hasdirv2(sbp))) {
- xfs_fs_mount_cmn_err(flags,
- "file system using version 1 directory format");
+ if (loud)
+ xfs_warn(mp,
+ "file system using version 1 directory format");
return XFS_ERROR(ENOSYS);
}
@@ -673,6 +681,7 @@ xfs_readsb(xfs_mount_t *mp, int flags)
unsigned int sector_size;
xfs_buf_t *bp;
int error;
+ int loud = !(flags & XFS_MFSI_QUIET);
ASSERT(mp->m_sb_bp == NULL);
ASSERT(mp->m_ddev_targp != NULL);
@@ -688,7 +697,8 @@ reread:
bp = xfs_buf_read_uncached(mp, mp->m_ddev_targp,
XFS_SB_DADDR, sector_size, 0);
if (!bp) {
- xfs_fs_mount_cmn_err(flags, "SB buffer read failed");
+ if (loud)
+ xfs_warn(mp, "SB buffer read failed");
return EIO;
}
@@ -699,7 +709,8 @@ reread:
xfs_sb_from_disk(&mp->m_sb, XFS_BUF_TO_SBP(bp));
error = xfs_mount_validate_sb(mp, &(mp->m_sb), flags);
if (error) {
- xfs_fs_mount_cmn_err(flags, "SB validate failed");
+ if (loud)
+ xfs_warn(mp, "SB validate failed");
goto release_buf;
}
@@ -707,9 +718,9 @@ reread:
* We must be able to do sector-sized and sector-aligned IO.
*/
if (sector_size > mp->m_sb.sb_sectsize) {
- xfs_fs_mount_cmn_err(flags,
- "device supports only %u byte sectors (not %u)",
- sector_size, mp->m_sb.sb_sectsize);
+ if (loud)
+ xfs_warn(mp, "device supports %u byte sectors (not %u)",
+ sector_size, mp->m_sb.sb_sectsize);
error = ENOSYS;
goto release_buf;
}
@@ -853,8 +864,7 @@ xfs_update_alignment(xfs_mount_t *mp)
if ((BBTOB(mp->m_dalign) & mp->m_blockmask) ||
(BBTOB(mp->m_swidth) & mp->m_blockmask)) {
if (mp->m_flags & XFS_MOUNT_RETERR) {
- cmn_err(CE_WARN,
- "XFS: alignment check 1 failed");
+ xfs_warn(mp, "alignment check 1 failed");
return XFS_ERROR(EINVAL);
}
mp->m_dalign = mp->m_swidth = 0;
@@ -867,8 +877,9 @@ xfs_update_alignment(xfs_mount_t *mp)
if (mp->m_flags & XFS_MOUNT_RETERR) {
return XFS_ERROR(EINVAL);
}
- xfs_fs_cmn_err(CE_WARN, mp,
-"stripe alignment turned off: sunit(%d)/swidth(%d) incompatible with agsize(%d)",
+ xfs_warn(mp,
+ "stripe alignment turned off: sunit(%d)/swidth(%d) "
+ "incompatible with agsize(%d)",
mp->m_dalign, mp->m_swidth,
sbp->sb_agblocks);
@@ -878,9 +889,9 @@ xfs_update_alignment(xfs_mount_t *mp)
mp->m_swidth = XFS_BB_TO_FSBT(mp, mp->m_swidth);
} else {
if (mp->m_flags & XFS_MOUNT_RETERR) {
- xfs_fs_cmn_err(CE_WARN, mp,
-"stripe alignment turned off: sunit(%d) less than bsize(%d)",
- mp->m_dalign,
+ xfs_warn(mp,
+ "stripe alignment turned off: sunit(%d) less than bsize(%d)",
+ mp->m_dalign,
mp->m_blockmask +1);
return XFS_ERROR(EINVAL);
}
@@ -1026,14 +1037,14 @@ xfs_check_sizes(xfs_mount_t *mp)
d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks);
if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_dblocks) {
- cmn_err(CE_WARN, "XFS: filesystem size mismatch detected");
+ xfs_warn(mp, "filesystem size mismatch detected");
return XFS_ERROR(EFBIG);
}
bp = xfs_buf_read_uncached(mp, mp->m_ddev_targp,
d - XFS_FSS_TO_BB(mp, 1),
BBTOB(XFS_FSS_TO_BB(mp, 1)), 0);
if (!bp) {
- cmn_err(CE_WARN, "XFS: last sector read failed");
+ xfs_warn(mp, "last sector read failed");
return EIO;
}
xfs_buf_relse(bp);
@@ -1041,14 +1052,14 @@ xfs_check_sizes(xfs_mount_t *mp)
if (mp->m_logdev_targp != mp->m_ddev_targp) {
d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_logblocks);
if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_logblocks) {
- cmn_err(CE_WARN, "XFS: log size mismatch detected");
+ xfs_warn(mp, "log size mismatch detected");
return XFS_ERROR(EFBIG);
}
bp = xfs_buf_read_uncached(mp, mp->m_logdev_targp,
d - XFS_FSB_TO_BB(mp, 1),
XFS_FSB_TO_B(mp, 1), 0);
if (!bp) {
- cmn_err(CE_WARN, "XFS: log device read failed");
+ xfs_warn(mp, "log device read failed");
return EIO;
}
xfs_buf_relse(bp);
@@ -1086,7 +1097,7 @@ xfs_mount_reset_sbqflags(
return 0;
#ifdef QUOTADEBUG
- xfs_fs_cmn_err(CE_NOTE, mp, "Writing superblock quota changes");
+ xfs_notice(mp, "Writing superblock quota changes");
#endif
tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE);
@@ -1094,8 +1105,7 @@ xfs_mount_reset_sbqflags(
XFS_DEFAULT_LOG_COUNT);
if (error) {
xfs_trans_cancel(tp, 0);
- xfs_fs_cmn_err(CE_ALERT, mp,
- "xfs_mount_reset_sbqflags: Superblock update failed!");
+ xfs_alert(mp, "%s: Superblock update failed!", __func__);
return error;
}
@@ -1161,8 +1171,7 @@ xfs_mountfs(
* transaction subsystem is online.
*/
if (xfs_sb_has_mismatched_features2(sbp)) {
- cmn_err(CE_WARN,
- "XFS: correcting sb_features alignment problem");
+ xfs_warn(mp, "correcting sb_features alignment problem");
sbp->sb_features2 |= sbp->sb_bad_features2;
sbp->sb_bad_features2 = sbp->sb_features2;
mp->m_update_flags |= XFS_SB_FEATURES2 | XFS_SB_BAD_FEATURES2;
@@ -1241,7 +1250,7 @@ xfs_mountfs(
*/
error = xfs_rtmount_init(mp);
if (error) {
- cmn_err(CE_WARN, "XFS: RT mount failed");
+ xfs_warn(mp, "RT mount failed");
goto out_remove_uuid;
}
@@ -1272,12 +1281,12 @@ xfs_mountfs(
INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC);
error = xfs_initialize_perag(mp, sbp->sb_agcount, &mp->m_maxagi);
if (error) {
- cmn_err(CE_WARN, "XFS: Failed per-ag init: %d", error);
+ xfs_warn(mp, "Failed per-ag init: %d", error);
goto out_remove_uuid;
}
if (!sbp->sb_logblocks) {
- cmn_err(CE_WARN, "XFS: no log defined");
+ xfs_warn(mp, "no log defined");
XFS_ERROR_REPORT("xfs_mountfs", XFS_ERRLEVEL_LOW, mp);
error = XFS_ERROR(EFSCORRUPTED);
goto out_free_perag;
@@ -1290,7 +1299,7 @@ xfs_mountfs(
XFS_FSB_TO_DADDR(mp, sbp->sb_logstart),
XFS_FSB_TO_BB(mp, sbp->sb_logblocks));
if (error) {
- cmn_err(CE_WARN, "XFS: log mount failed");
+ xfs_warn(mp, "log mount failed");
goto out_free_perag;
}
@@ -1327,16 +1336,14 @@ xfs_mountfs(
*/
error = xfs_iget(mp, NULL, sbp->sb_rootino, 0, XFS_ILOCK_EXCL, &rip);
if (error) {
- cmn_err(CE_WARN, "XFS: failed to read root inode");
+ xfs_warn(mp, "failed to read root inode");
goto out_log_dealloc;
}
ASSERT(rip != NULL);
if (unlikely((rip->i_d.di_mode & S_IFMT) != S_IFDIR)) {
- cmn_err(CE_WARN, "XFS: corrupted root inode");
- cmn_err(CE_WARN, "Device %s - root %llu is not a directory",
- XFS_BUFTARG_NAME(mp->m_ddev_targp),
+ xfs_warn(mp, "corrupted root inode %llu: not a directory",
(unsigned long long)rip->i_ino);
xfs_iunlock(rip, XFS_ILOCK_EXCL);
XFS_ERROR_REPORT("xfs_mountfs_int(2)", XFS_ERRLEVEL_LOW,
@@ -1356,7 +1363,7 @@ xfs_mountfs(
/*
* Free up the root inode.
*/
- cmn_err(CE_WARN, "XFS: failed to read RT inodes");
+ xfs_warn(mp, "failed to read RT inodes");
goto out_rele_rip;
}
@@ -1368,7 +1375,7 @@ xfs_mountfs(
if (mp->m_update_flags && !(mp->m_flags & XFS_MOUNT_RDONLY)) {
error = xfs_mount_log_sb(mp, mp->m_update_flags);
if (error) {
- cmn_err(CE_WARN, "XFS: failed to write sb changes");
+ xfs_warn(mp, "failed to write sb changes");
goto out_rtunmount;
}
}
@@ -1389,10 +1396,7 @@ xfs_mountfs(
* quotachecked license.
*/
if (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_ACCT) {
- cmn_err(CE_NOTE,
- "XFS: resetting qflags for filesystem %s",
- mp->m_fsname);
-
+ xfs_notice(mp, "resetting quota flags");
error = xfs_mount_reset_sbqflags(mp);
if (error)
return error;
@@ -1406,7 +1410,7 @@ xfs_mountfs(
*/
error = xfs_log_mount_finish(mp);
if (error) {
- cmn_err(CE_WARN, "XFS: log mount finish failed");
+ xfs_warn(mp, "log mount finish failed");
goto out_rtunmount;
}
@@ -1435,8 +1439,8 @@ xfs_mountfs(
resblks = xfs_default_resblks(mp);
error = xfs_reserve_blocks(mp, &resblks, NULL);
if (error)
- cmn_err(CE_WARN, "XFS: Unable to allocate reserve "
- "blocks. Continuing without a reserve pool.");
+ xfs_warn(mp,
+ "Unable to allocate reserve blocks. Continuing without reserve pool.");
}
return 0;
@@ -1525,12 +1529,12 @@ xfs_unmountfs(
resblks = 0;
error = xfs_reserve_blocks(mp, &resblks, NULL);
if (error)
- cmn_err(CE_WARN, "XFS: Unable to free reserved block pool. "
+ xfs_warn(mp, "Unable to free reserved block pool. "
"Freespace may not be correct on next mount.");
error = xfs_log_sbcount(mp, 1);
if (error)
- cmn_err(CE_WARN, "XFS: Unable to update superblock counters. "
+ xfs_warn(mp, "Unable to update superblock counters. "
"Freespace may not be correct on next mount.");
xfs_unmountfs_writesb(mp);
xfs_unmountfs_wait(mp); /* wait for async bufs */
@@ -2013,10 +2017,8 @@ xfs_dev_is_read_only(
if (xfs_readonly_buftarg(mp->m_ddev_targp) ||
xfs_readonly_buftarg(mp->m_logdev_targp) ||
(mp->m_rtdev_targp && xfs_readonly_buftarg(mp->m_rtdev_targp))) {
- cmn_err(CE_NOTE,
- "XFS: %s required on read-only device.", message);
- cmn_err(CE_NOTE,
- "XFS: write access unavailable, cannot proceed.");
+ xfs_notice(mp, "%s required on read-only device.", message);
+ xfs_notice(mp, "write access unavailable, cannot proceed.");
return EROFS;
}
return 0;
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index a62e897..19af0ab 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -203,12 +203,9 @@ typedef struct xfs_mount {
struct mutex m_icsb_mutex; /* balancer sync lock */
#endif
struct xfs_mru_cache *m_filestream; /* per-mount filestream data */
- struct task_struct *m_sync_task; /* generalised sync thread */
- xfs_sync_work_t m_sync_work; /* work item for VFS_SYNC */
- struct list_head m_sync_list; /* sync thread work item list */
- spinlock_t m_sync_lock; /* work item list lock */
- int m_sync_seq; /* sync thread generation no. */
- wait_queue_head_t m_wait_single_sync_task;
+ struct delayed_work m_sync_work; /* background sync work */
+ struct delayed_work m_reclaim_work; /* background inode reclaim */
+ struct work_struct m_flush_work; /* background inode flush */
__int64_t m_update_flags; /* sb flags we need to update
on the next remount,rw */
struct shrinker m_inode_shrink; /* inode reclaim shrinker */
diff --git a/fs/xfs/xfs_quota.h b/fs/xfs/xfs_quota.h
index 9bb6eda..a595f29 100644
--- a/fs/xfs/xfs_quota.h
+++ b/fs/xfs/xfs_quota.h
@@ -382,7 +382,8 @@ static inline int xfs_qm_sync(struct xfs_mount *mp, int flags)
xfs_trans_reserve_quota_bydquots(tp, mp, ud, gd, nb, ni, \
f | XFS_QMOPT_RES_REGBLKS)
-extern int xfs_qm_dqcheck(xfs_disk_dquot_t *, xfs_dqid_t, uint, uint, char *);
+extern int xfs_qm_dqcheck(struct xfs_mount *, xfs_disk_dquot_t *,
+ xfs_dqid_t, uint, uint, char *);
extern int xfs_mount_reset_sbqflags(struct xfs_mount *);
#endif /* __KERNEL__ */
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
index 12a1913..8f76fdf 100644
--- a/fs/xfs/xfs_rtalloc.c
+++ b/fs/xfs/xfs_rtalloc.c
@@ -76,7 +76,7 @@ xfs_growfs_rt_alloc(
xfs_mount_t *mp, /* file system mount point */
xfs_extlen_t oblocks, /* old count of blocks */
xfs_extlen_t nblocks, /* new count of blocks */
- xfs_ino_t ino) /* inode number (bitmap/summary) */
+ xfs_inode_t *ip) /* inode (bitmap/summary) */
{
xfs_fileoff_t bno; /* block number in file */
xfs_buf_t *bp; /* temporary buffer for zeroing */
@@ -86,7 +86,6 @@ xfs_growfs_rt_alloc(
xfs_fsblock_t firstblock; /* first block allocated in xaction */
xfs_bmap_free_t flist; /* list of freed blocks */
xfs_fsblock_t fsbno; /* filesystem block for bno */
- xfs_inode_t *ip; /* pointer to incore inode */
xfs_bmbt_irec_t map; /* block map output */
int nmap; /* number of block maps */
int resblks; /* space reservation */
@@ -112,9 +111,9 @@ xfs_growfs_rt_alloc(
/*
* Lock the inode.
*/
- if ((error = xfs_trans_iget(mp, tp, ino, 0,
- XFS_ILOCK_EXCL, &ip)))
- goto error_cancel;
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+ xfs_trans_ijoin_ref(tp, ip, XFS_ILOCK_EXCL);
+
xfs_bmap_init(&flist, &firstblock);
/*
* Allocate blocks to the bitmap file.
@@ -155,9 +154,8 @@ xfs_growfs_rt_alloc(
/*
* Lock the bitmap inode.
*/
- if ((error = xfs_trans_iget(mp, tp, ino, 0,
- XFS_ILOCK_EXCL, &ip)))
- goto error_cancel;
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+ xfs_trans_ijoin_ref(tp, ip, XFS_ILOCK_EXCL);
/*
* Get a buffer for the block.
*/
@@ -1854,7 +1852,6 @@ xfs_growfs_rt(
xfs_rtblock_t bmbno; /* bitmap block number */
xfs_buf_t *bp; /* temporary buffer */
int error; /* error return value */
- xfs_inode_t *ip; /* bitmap inode, used as lock */
xfs_mount_t *nmp; /* new (fake) mount structure */
xfs_drfsbno_t nrblocks; /* new number of realtime blocks */
xfs_extlen_t nrbmblocks; /* new number of rt bitmap blocks */
@@ -1918,11 +1915,11 @@ xfs_growfs_rt(
/*
* Allocate space to the bitmap and summary files, as necessary.
*/
- if ((error = xfs_growfs_rt_alloc(mp, rbmblocks, nrbmblocks,
- mp->m_sb.sb_rbmino)))
+ error = xfs_growfs_rt_alloc(mp, rbmblocks, nrbmblocks, mp->m_rbmip);
+ if (error)
return error;
- if ((error = xfs_growfs_rt_alloc(mp, rsumblocks, nrsumblocks,
- mp->m_sb.sb_rsumino)))
+ error = xfs_growfs_rt_alloc(mp, rsumblocks, nrsumblocks, mp->m_rsumip);
+ if (error)
return error;
/*
* Allocate a new (fake) mount/sb.
@@ -1972,10 +1969,8 @@ xfs_growfs_rt(
/*
* Lock out other callers by grabbing the bitmap inode lock.
*/
- if ((error = xfs_trans_iget(mp, tp, mp->m_sb.sb_rbmino, 0,
- XFS_ILOCK_EXCL, &ip)))
- goto error_cancel;
- ASSERT(ip == mp->m_rbmip);
+ xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL);
+ xfs_trans_ijoin_ref(tp, mp->m_rbmip, XFS_ILOCK_EXCL);
/*
* Update the bitmap inode's size.
*/
@@ -1986,10 +1981,8 @@ xfs_growfs_rt(
/*
* Get the summary inode into the transaction.
*/
- if ((error = xfs_trans_iget(mp, tp, mp->m_sb.sb_rsumino, 0,
- XFS_ILOCK_EXCL, &ip)))
- goto error_cancel;
- ASSERT(ip == mp->m_rsumip);
+ xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL);
+ xfs_trans_ijoin_ref(tp, mp->m_rsumip, XFS_ILOCK_EXCL);
/*
* Update the summary inode's size.
*/
@@ -2075,15 +2068,15 @@ xfs_rtallocate_extent(
xfs_extlen_t prod, /* extent product factor */
xfs_rtblock_t *rtblock) /* out: start block allocated */
{
+ xfs_mount_t *mp = tp->t_mountp;
int error; /* error value */
- xfs_inode_t *ip; /* inode for bitmap file */
- xfs_mount_t *mp; /* file system mount structure */
xfs_rtblock_t r; /* result allocated block */
xfs_fsblock_t sb; /* summary file block number */
xfs_buf_t *sumbp; /* summary file block buffer */
+ ASSERT(xfs_isilocked(mp->m_rbmip, XFS_ILOCK_EXCL));
ASSERT(minlen > 0 && minlen <= maxlen);
- mp = tp->t_mountp;
+
/*
* If prod is set then figure out what to do to minlen and maxlen.
*/
@@ -2099,12 +2092,7 @@ xfs_rtallocate_extent(
return 0;
}
}
- /*
- * Lock out other callers by grabbing the bitmap inode lock.
- */
- if ((error = xfs_trans_iget(mp, tp, mp->m_sb.sb_rbmino, 0,
- XFS_ILOCK_EXCL, &ip)))
- return error;
+
sumbp = NULL;
/*
* Allocate by size, or near another block, or exactly at some block.
@@ -2123,11 +2111,12 @@ xfs_rtallocate_extent(
len, &sumbp, &sb, prod, &r);
break;
default:
+ error = EIO;
ASSERT(0);
}
- if (error) {
+ if (error)
return error;
- }
+
/*
* If it worked, update the superblock.
*/
@@ -2155,7 +2144,6 @@ xfs_rtfree_extent(
xfs_extlen_t len) /* length of extent freed */
{
int error; /* error value */
- xfs_inode_t *ip; /* bitmap file inode */
xfs_mount_t *mp; /* file system mount structure */
xfs_fsblock_t sb; /* summary file block number */
xfs_buf_t *sumbp; /* summary file block buffer */
@@ -2164,9 +2152,9 @@ xfs_rtfree_extent(
/*
* Synchronize by locking the bitmap inode.
*/
- if ((error = xfs_trans_iget(mp, tp, mp->m_sb.sb_rbmino, 0,
- XFS_ILOCK_EXCL, &ip)))
- return error;
+ xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL);
+ xfs_trans_ijoin_ref(tp, mp->m_rbmip, XFS_ILOCK_EXCL);
+
#if defined(__KERNEL__) && defined(DEBUG)
/*
* Check to see that this whole range is currently allocated.
@@ -2199,10 +2187,10 @@ xfs_rtfree_extent(
*/
if (tp->t_frextents_delta + mp->m_sb.sb_frextents ==
mp->m_sb.sb_rextents) {
- if (!(ip->i_d.di_flags & XFS_DIFLAG_NEWRTBM))
- ip->i_d.di_flags |= XFS_DIFLAG_NEWRTBM;
- *(__uint64_t *)&ip->i_d.di_atime = 0;
- xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+ if (!(mp->m_rbmip->i_d.di_flags & XFS_DIFLAG_NEWRTBM))
+ mp->m_rbmip->i_d.di_flags |= XFS_DIFLAG_NEWRTBM;
+ *(__uint64_t *)&mp->m_rbmip->i_d.di_atime = 0;
+ xfs_trans_log_inode(tp, mp->m_rbmip, XFS_ILOG_CORE);
}
return 0;
}
@@ -2222,8 +2210,8 @@ xfs_rtmount_init(
if (sbp->sb_rblocks == 0)
return 0;
if (mp->m_rtdev_targp == NULL) {
- cmn_err(CE_WARN,
- "XFS: This filesystem has a realtime volume, use rtdev=device option");
+ xfs_warn(mp,
+ "Filesystem has a realtime volume, use rtdev=device option");
return XFS_ERROR(ENODEV);
}
mp->m_rsumlevels = sbp->sb_rextslog + 1;
@@ -2237,7 +2225,7 @@ xfs_rtmount_init(
*/
d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_rblocks);
if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_rblocks) {
- cmn_err(CE_WARN, "XFS: realtime mount -- %llu != %llu",
+ xfs_warn(mp, "realtime mount -- %llu != %llu",
(unsigned long long) XFS_BB_TO_FSB(mp, d),
(unsigned long long) mp->m_sb.sb_rblocks);
return XFS_ERROR(EFBIG);
@@ -2246,7 +2234,7 @@ xfs_rtmount_init(
d - XFS_FSB_TO_BB(mp, 1),
XFS_FSB_TO_B(mp, 1), 0);
if (!bp) {
- cmn_err(CE_WARN, "XFS: realtime device size check failed");
+ xfs_warn(mp, "realtime device size check failed");
return EIO;
}
xfs_buf_relse(bp);
@@ -2306,20 +2294,16 @@ xfs_rtpick_extent(
xfs_rtblock_t *pick) /* result rt extent */
{
xfs_rtblock_t b; /* result block */
- int error; /* error return value */
- xfs_inode_t *ip; /* bitmap incore inode */
int log2; /* log of sequence number */
__uint64_t resid; /* residual after log removed */
__uint64_t seq; /* sequence number of file creation */
__uint64_t *seqp; /* pointer to seqno in inode */
- if ((error = xfs_trans_iget(mp, tp, mp->m_sb.sb_rbmino, 0,
- XFS_ILOCK_EXCL, &ip)))
- return error;
- ASSERT(ip == mp->m_rbmip);
- seqp = (__uint64_t *)&ip->i_d.di_atime;
- if (!(ip->i_d.di_flags & XFS_DIFLAG_NEWRTBM)) {
- ip->i_d.di_flags |= XFS_DIFLAG_NEWRTBM;
+ ASSERT(xfs_isilocked(mp->m_rbmip, XFS_ILOCK_EXCL));
+
+ seqp = (__uint64_t *)&mp->m_rbmip->i_d.di_atime;
+ if (!(mp->m_rbmip->i_d.di_flags & XFS_DIFLAG_NEWRTBM)) {
+ mp->m_rbmip->i_d.di_flags |= XFS_DIFLAG_NEWRTBM;
*seqp = 0;
}
seq = *seqp;
@@ -2335,7 +2319,7 @@ xfs_rtpick_extent(
b = mp->m_sb.sb_rextents - len;
}
*seqp = seq + 1;
- xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+ xfs_trans_log_inode(tp, mp->m_rbmip, XFS_ILOG_CORE);
*pick = b;
return 0;
}
diff --git a/fs/xfs/xfs_rtalloc.h b/fs/xfs/xfs_rtalloc.h
index ff614c2..09e1f4f 100644
--- a/fs/xfs/xfs_rtalloc.h
+++ b/fs/xfs/xfs_rtalloc.h
@@ -154,7 +154,7 @@ xfs_rtmount_init(
if (mp->m_sb.sb_rblocks == 0)
return 0;
- cmn_err(CE_WARN, "XFS: Not built with CONFIG_XFS_RT");
+ xfs_warn(mp, "Not built with CONFIG_XFS_RT");
return ENOSYS;
}
# define xfs_rtmount_inodes(m) (((mp)->m_sb.sb_rblocks == 0)? 0 : (ENOSYS))
diff --git a/fs/xfs/xfs_rw.c b/fs/xfs/xfs_rw.c
index 56861d5..d6d6fdf 100644
--- a/fs/xfs/xfs_rw.c
+++ b/fs/xfs/xfs_rw.c
@@ -49,9 +49,9 @@ xfs_do_force_shutdown(
logerror = flags & SHUTDOWN_LOG_IO_ERROR;
if (!(flags & SHUTDOWN_FORCE_UMOUNT)) {
- cmn_err(CE_NOTE, "xfs_force_shutdown(%s,0x%x) called from "
- "line %d of file %s. Return address = 0x%p",
- mp->m_fsname, flags, lnnum, fname, __return_address);
+ xfs_notice(mp,
+ "%s(0x%x) called from line %d of file %s. Return address = 0x%p",
+ __func__, flags, lnnum, fname, __return_address);
}
/*
* No need to duplicate efforts.
@@ -69,30 +69,25 @@ xfs_do_force_shutdown(
return;
if (flags & SHUTDOWN_CORRUPT_INCORE) {
- xfs_cmn_err(XFS_PTAG_SHUTDOWN_CORRUPT, CE_ALERT, mp,
- "Corruption of in-memory data detected. Shutting down filesystem: %s",
- mp->m_fsname);
- if (XFS_ERRLEVEL_HIGH <= xfs_error_level) {
+ xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_CORRUPT,
+ "Corruption of in-memory data detected. Shutting down filesystem");
+ if (XFS_ERRLEVEL_HIGH <= xfs_error_level)
xfs_stack_trace();
- }
} else if (!(flags & SHUTDOWN_FORCE_UMOUNT)) {
if (logerror) {
- xfs_cmn_err(XFS_PTAG_SHUTDOWN_LOGERROR, CE_ALERT, mp,
- "Log I/O Error Detected. Shutting down filesystem: %s",
- mp->m_fsname);
+ xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_LOGERROR,
+ "Log I/O Error Detected. Shutting down filesystem");
} else if (flags & SHUTDOWN_DEVICE_REQ) {
- xfs_cmn_err(XFS_PTAG_SHUTDOWN_IOERROR, CE_ALERT, mp,
- "All device paths lost. Shutting down filesystem: %s",
- mp->m_fsname);
+ xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_IOERROR,
+ "All device paths lost. Shutting down filesystem");
} else if (!(flags & SHUTDOWN_REMOTE_REQ)) {
- xfs_cmn_err(XFS_PTAG_SHUTDOWN_IOERROR, CE_ALERT, mp,
- "I/O Error Detected. Shutting down filesystem: %s",
- mp->m_fsname);
+ xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_IOERROR,
+ "I/O Error Detected. Shutting down filesystem");
}
}
if (!(flags & SHUTDOWN_FORCE_UMOUNT)) {
- cmn_err(CE_ALERT, "Please umount the filesystem, "
- "and rectify the problem(s)");
+ xfs_alert(mp,
+ "Please umount the filesystem and rectify the problem(s)");
}
}
@@ -106,10 +101,9 @@ xfs_ioerror_alert(
xfs_buf_t *bp,
xfs_daddr_t blkno)
{
- cmn_err(CE_ALERT,
- "I/O error in filesystem (\"%s\") meta-data dev %s block 0x%llx"
- " (\"%s\") error %d buf count %zd",
- (!mp || !mp->m_fsname) ? "(fs name not set)" : mp->m_fsname,
+ xfs_alert(mp,
+ "I/O error occurred: meta-data dev %s block 0x%llx"
+ " (\"%s\") error %d buf count %zd",
XFS_BUFTARG_NAME(XFS_BUF_TARGET(bp)),
(__uint64_t)blkno, func,
XFS_BUF_GETERROR(bp), XFS_BUF_COUNT(bp));
@@ -173,17 +167,9 @@ xfs_extlen_t
xfs_get_extsz_hint(
struct xfs_inode *ip)
{
- xfs_extlen_t extsz;
-
- if (unlikely(XFS_IS_REALTIME_INODE(ip))) {
- extsz = (ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE)
- ? ip->i_d.di_extsize
- : ip->i_mount->m_sb.sb_rextsize;
- ASSERT(extsz);
- } else {
- extsz = (ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE)
- ? ip->i_d.di_extsize : 0;
- }
-
- return extsz;
+ if ((ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE) && ip->i_d.di_extsize)
+ return ip->i_d.di_extsize;
+ if (XFS_IS_REALTIME_INODE(ip))
+ return ip->i_mount->m_sb.sb_rextsize;
+ return 0;
}
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index c2042b7..06a9759 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -469,8 +469,6 @@ void xfs_trans_inode_buf(xfs_trans_t *, struct xfs_buf *);
void xfs_trans_stale_inode_buf(xfs_trans_t *, struct xfs_buf *);
void xfs_trans_dquot_buf(xfs_trans_t *, struct xfs_buf *, uint);
void xfs_trans_inode_alloc_buf(xfs_trans_t *, struct xfs_buf *);
-int xfs_trans_iget(struct xfs_mount *, xfs_trans_t *,
- xfs_ino_t , uint, uint, struct xfs_inode **);
void xfs_trans_ichgtime(struct xfs_trans *, struct xfs_inode *, int);
void xfs_trans_ijoin_ref(struct xfs_trans *, struct xfs_inode *, uint);
void xfs_trans_ijoin(struct xfs_trans *, struct xfs_inode *);
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
index c5bbbc4..acdb92f 100644
--- a/fs/xfs/xfs_trans_ail.c
+++ b/fs/xfs/xfs_trans_ail.c
@@ -28,74 +28,138 @@
#include "xfs_trans_priv.h"
#include "xfs_error.h"
-STATIC void xfs_ail_splice(struct xfs_ail *, struct list_head *, xfs_lsn_t);
-STATIC void xfs_ail_delete(struct xfs_ail *, xfs_log_item_t *);
-STATIC xfs_log_item_t * xfs_ail_min(struct xfs_ail *);
-STATIC xfs_log_item_t * xfs_ail_next(struct xfs_ail *, xfs_log_item_t *);
+struct workqueue_struct *xfs_ail_wq; /* AIL workqueue */
#ifdef DEBUG
-STATIC void xfs_ail_check(struct xfs_ail *, xfs_log_item_t *);
-#else
+/*
+ * Check that the list is sorted as it should be.
+ */
+STATIC void
+xfs_ail_check(
+ struct xfs_ail *ailp,
+ xfs_log_item_t *lip)
+{
+ xfs_log_item_t *prev_lip;
+
+ if (list_empty(&ailp->xa_ail))
+ return;
+
+ /*
+ * Check the next and previous entries are valid.
+ */
+ ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0);
+ prev_lip = list_entry(lip->li_ail.prev, xfs_log_item_t, li_ail);
+ if (&prev_lip->li_ail != &ailp->xa_ail)
+ ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0);
+
+ prev_lip = list_entry(lip->li_ail.next, xfs_log_item_t, li_ail);
+ if (&prev_lip->li_ail != &ailp->xa_ail)
+ ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) >= 0);
+
+
+#ifdef XFS_TRANS_DEBUG
+ /*
+ * Walk the list checking lsn ordering, and that every entry has the
+ * XFS_LI_IN_AIL flag set. This is really expensive, so only do it
+ * when specifically debugging the transaction subsystem.
+ */
+ prev_lip = list_entry(&ailp->xa_ail, xfs_log_item_t, li_ail);
+ list_for_each_entry(lip, &ailp->xa_ail, li_ail) {
+ if (&prev_lip->li_ail != &ailp->xa_ail)
+ ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0);
+ ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0);
+ prev_lip = lip;
+ }
+#endif /* XFS_TRANS_DEBUG */
+}
+#else /* !DEBUG */
#define xfs_ail_check(a,l)
#endif /* DEBUG */
+/*
+ * Return a pointer to the first item in the AIL. If the AIL is empty, then
+ * return NULL.
+ */
+static xfs_log_item_t *
+xfs_ail_min(
+ struct xfs_ail *ailp)
+{
+ if (list_empty(&ailp->xa_ail))
+ return NULL;
+
+ return list_first_entry(&ailp->xa_ail, xfs_log_item_t, li_ail);
+}
+
+ /*
+ * Return a pointer to the last item in the AIL. If the AIL is empty, then
+ * return NULL.
+ */
+static xfs_log_item_t *
+xfs_ail_max(
+ struct xfs_ail *ailp)
+{
+ if (list_empty(&ailp->xa_ail))
+ return NULL;
+
+ return list_entry(ailp->xa_ail.prev, xfs_log_item_t, li_ail);
+}
+
+/*
+ * Return a pointer to the item which follows the given item in the AIL. If
+ * the given item is the last item in the list, then return NULL.
+ */
+static xfs_log_item_t *
+xfs_ail_next(
+ struct xfs_ail *ailp,
+ xfs_log_item_t *lip)
+{
+ if (lip->li_ail.next == &ailp->xa_ail)
+ return NULL;
+
+ return list_first_entry(&lip->li_ail, xfs_log_item_t, li_ail);
+}
/*
- * This is called by the log manager code to determine the LSN
- * of the tail of the log. This is exactly the LSN of the first
- * item in the AIL. If the AIL is empty, then this function
- * returns 0.
+ * This is called by the log manager code to determine the LSN of the tail of
+ * the log. This is exactly the LSN of the first item in the AIL. If the AIL
+ * is empty, then this function returns 0.
*
- * We need the AIL lock in order to get a coherent read of the
- * lsn of the last item in the AIL.
+ * We need the AIL lock in order to get a coherent read of the lsn of the last
+ * item in the AIL.
*/
xfs_lsn_t
-xfs_trans_ail_tail(
+xfs_ail_min_lsn(
struct xfs_ail *ailp)
{
- xfs_lsn_t lsn;
+ xfs_lsn_t lsn = 0;
xfs_log_item_t *lip;
spin_lock(&ailp->xa_lock);
lip = xfs_ail_min(ailp);
- if (lip == NULL) {
- lsn = (xfs_lsn_t)0;
- } else {
+ if (lip)
lsn = lip->li_lsn;
- }
spin_unlock(&ailp->xa_lock);
return lsn;
}
/*
- * xfs_trans_push_ail
- *
- * This routine is called to move the tail of the AIL forward. It does this by
- * trying to flush items in the AIL whose lsns are below the given
- * threshold_lsn.
- *
- * the push is run asynchronously in a separate thread, so we return the tail
- * of the log right now instead of the tail after the push. This means we will
- * either continue right away, or we will sleep waiting on the async thread to
- * do its work.
- *
- * We do this unlocked - we only need to know whether there is anything in the
- * AIL at the time we are called. We don't need to access the contents of
- * any of the objects, so the lock is not needed.
+ * Return the maximum lsn held in the AIL, or zero if the AIL is empty.
*/
-void
-xfs_trans_ail_push(
- struct xfs_ail *ailp,
- xfs_lsn_t threshold_lsn)
+static xfs_lsn_t
+xfs_ail_max_lsn(
+ struct xfs_ail *ailp)
{
- xfs_log_item_t *lip;
+ xfs_lsn_t lsn = 0;
+ xfs_log_item_t *lip;
- lip = xfs_ail_min(ailp);
- if (lip && !XFS_FORCED_SHUTDOWN(ailp->xa_mount)) {
- if (XFS_LSN_CMP(threshold_lsn, ailp->xa_target) > 0)
- xfsaild_wakeup(ailp, threshold_lsn);
- }
+ spin_lock(&ailp->xa_lock);
+ lip = xfs_ail_max(ailp);
+ if (lip)
+ lsn = lip->li_lsn;
+ spin_unlock(&ailp->xa_lock);
+
+ return lsn;
}
/*
@@ -236,16 +300,57 @@ out:
}
/*
- * xfsaild_push does the work of pushing on the AIL. Returning a timeout of
- * zero indicates that the caller should sleep until woken.
+ * splice the log item list into the AIL at the given LSN.
*/
-long
-xfsaild_push(
- struct xfs_ail *ailp,
- xfs_lsn_t *last_lsn)
+static void
+xfs_ail_splice(
+ struct xfs_ail *ailp,
+ struct list_head *list,
+ xfs_lsn_t lsn)
{
- long tout = 0;
- xfs_lsn_t last_pushed_lsn = *last_lsn;
+ xfs_log_item_t *next_lip;
+
+ /* If the list is empty, just insert the item. */
+ if (list_empty(&ailp->xa_ail)) {
+ list_splice(list, &ailp->xa_ail);
+ return;
+ }
+
+ list_for_each_entry_reverse(next_lip, &ailp->xa_ail, li_ail) {
+ if (XFS_LSN_CMP(next_lip->li_lsn, lsn) <= 0)
+ break;
+ }
+
+ ASSERT(&next_lip->li_ail == &ailp->xa_ail ||
+ XFS_LSN_CMP(next_lip->li_lsn, lsn) <= 0);
+
+ list_splice_init(list, &next_lip->li_ail);
+}
+
+/*
+ * Delete the given item from the AIL. Return a pointer to the item.
+ */
+static void
+xfs_ail_delete(
+ struct xfs_ail *ailp,
+ xfs_log_item_t *lip)
+{
+ xfs_ail_check(ailp, lip);
+ list_del(&lip->li_ail);
+ xfs_trans_ail_cursor_clear(ailp, lip);
+}
+
+/*
+ * xfs_ail_worker does the work of pushing on the AIL. It will requeue itself
+ * to run at a later time if there is more work to do to complete the push.
+ */
+STATIC void
+xfs_ail_worker(
+ struct work_struct *work)
+{
+ struct xfs_ail *ailp = container_of(to_delayed_work(work),
+ struct xfs_ail, xa_work);
+ long tout;
xfs_lsn_t target = ailp->xa_target;
xfs_lsn_t lsn;
xfs_log_item_t *lip;
@@ -256,15 +361,15 @@ xfsaild_push(
spin_lock(&ailp->xa_lock);
xfs_trans_ail_cursor_init(ailp, cur);
- lip = xfs_trans_ail_cursor_first(ailp, cur, *last_lsn);
+ lip = xfs_trans_ail_cursor_first(ailp, cur, ailp->xa_last_pushed_lsn);
if (!lip || XFS_FORCED_SHUTDOWN(mp)) {
/*
* AIL is empty or our push has reached the end.
*/
xfs_trans_ail_cursor_done(ailp, cur);
spin_unlock(&ailp->xa_lock);
- *last_lsn = 0;
- return tout;
+ ailp->xa_last_pushed_lsn = 0;
+ return;
}
XFS_STATS_INC(xs_push_ail);
@@ -301,13 +406,13 @@ xfsaild_push(
case XFS_ITEM_SUCCESS:
XFS_STATS_INC(xs_push_ail_success);
IOP_PUSH(lip);
- last_pushed_lsn = lsn;
+ ailp->xa_last_pushed_lsn = lsn;
break;
case XFS_ITEM_PUSHBUF:
XFS_STATS_INC(xs_push_ail_pushbuf);
IOP_PUSHBUF(lip);
- last_pushed_lsn = lsn;
+ ailp->xa_last_pushed_lsn = lsn;
push_xfsbufd = 1;
break;
@@ -319,7 +424,7 @@ xfsaild_push(
case XFS_ITEM_LOCKED:
XFS_STATS_INC(xs_push_ail_locked);
- last_pushed_lsn = lsn;
+ ailp->xa_last_pushed_lsn = lsn;
stuck++;
break;
@@ -374,9 +479,23 @@ xfsaild_push(
wake_up_process(mp->m_ddev_targp->bt_task);
}
+ /* assume we have more work to do in a short while */
+ tout = 10;
if (!count) {
/* We're past our target or empty, so idle */
- last_pushed_lsn = 0;
+ ailp->xa_last_pushed_lsn = 0;
+
+ /*
+ * Check for an updated push target before clearing the
+ * XFS_AIL_PUSHING_BIT. If the target changed, we've got more
+ * work to do. Wait a bit longer before starting that work.
+ */
+ smp_rmb();
+ if (ailp->xa_target == target) {
+ clear_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags);
+ return;
+ }
+ tout = 50;
} else if (XFS_LSN_CMP(lsn, target) >= 0) {
/*
* We reached the target so wait a bit longer for I/O to
@@ -384,7 +503,7 @@ xfsaild_push(
* start the next scan from the start of the AIL.
*/
tout = 50;
- last_pushed_lsn = 0;
+ ailp->xa_last_pushed_lsn = 0;
} else if ((stuck * 100) / count > 90) {
/*
* Either there is a lot of contention on the AIL or we
@@ -396,14 +515,61 @@ xfsaild_push(
* continuing from where we were.
*/
tout = 20;
- } else {
- /* more to do, but wait a short while before continuing */
- tout = 10;
}
- *last_lsn = last_pushed_lsn;
- return tout;
+
+ /* There is more to do, requeue us. */
+ queue_delayed_work(xfs_syncd_wq, &ailp->xa_work,
+ msecs_to_jiffies(tout));
+}
+
+/*
+ * This routine is called to move the tail of the AIL forward. It does this by
+ * trying to flush items in the AIL whose lsns are below the given
+ * threshold_lsn.
+ *
+ * The push is run asynchronously in a workqueue, which means the caller needs
+ * to handle waiting on the async flush for space to become available.
+ * We don't want to interrupt any push that is in progress, hence we only queue
+ * work if we set the pushing bit approriately.
+ *
+ * We do this unlocked - we only need to know whether there is anything in the
+ * AIL at the time we are called. We don't need to access the contents of
+ * any of the objects, so the lock is not needed.
+ */
+void
+xfs_ail_push(
+ struct xfs_ail *ailp,
+ xfs_lsn_t threshold_lsn)
+{
+ xfs_log_item_t *lip;
+
+ lip = xfs_ail_min(ailp);
+ if (!lip || XFS_FORCED_SHUTDOWN(ailp->xa_mount) ||
+ XFS_LSN_CMP(threshold_lsn, ailp->xa_target) <= 0)
+ return;
+
+ /*
+ * Ensure that the new target is noticed in push code before it clears
+ * the XFS_AIL_PUSHING_BIT.
+ */
+ smp_wmb();
+ ailp->xa_target = threshold_lsn;
+ if (!test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags))
+ queue_delayed_work(xfs_syncd_wq, &ailp->xa_work, 0);
}
+/*
+ * Push out all items in the AIL immediately
+ */
+void
+xfs_ail_push_all(
+ struct xfs_ail *ailp)
+{
+ xfs_lsn_t threshold_lsn = xfs_ail_max_lsn(ailp);
+
+ if (threshold_lsn)
+ xfs_ail_push(ailp, threshold_lsn);
+}
/*
* This is to be called when an item is unlocked that may have
@@ -563,7 +729,7 @@ xfs_trans_ail_delete_bulk(
spin_unlock(&ailp->xa_lock);
if (!XFS_FORCED_SHUTDOWN(mp)) {
- xfs_cmn_err(XFS_PTAG_AILDELETE, CE_ALERT, mp,
+ xfs_alert_tag(mp, XFS_PTAG_AILDELETE,
"%s: attempting to delete a log item that is not in the AIL",
__func__);
xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
@@ -615,7 +781,6 @@ xfs_trans_ail_init(
xfs_mount_t *mp)
{
struct xfs_ail *ailp;
- int error;
ailp = kmem_zalloc(sizeof(struct xfs_ail), KM_MAYFAIL);
if (!ailp)
@@ -624,15 +789,9 @@ xfs_trans_ail_init(
ailp->xa_mount = mp;
INIT_LIST_HEAD(&ailp->xa_ail);
spin_lock_init(&ailp->xa_lock);
- error = xfsaild_start(ailp);
- if (error)
- goto out_free_ailp;
+ INIT_DELAYED_WORK(&ailp->xa_work, xfs_ail_worker);
mp->m_ail = ailp;
return 0;
-
-out_free_ailp:
- kmem_free(ailp);
- return error;
}
void
@@ -641,124 +800,6 @@ xfs_trans_ail_destroy(
{
struct xfs_ail *ailp = mp->m_ail;
- xfsaild_stop(ailp);
+ cancel_delayed_work_sync(&ailp->xa_work);
kmem_free(ailp);
}
-
-/*
- * splice the log item list into the AIL at the given LSN.
- */
-STATIC void
-xfs_ail_splice(
- struct xfs_ail *ailp,
- struct list_head *list,
- xfs_lsn_t lsn)
-{
- xfs_log_item_t *next_lip;
-
- /*
- * If the list is empty, just insert the item.
- */
- if (list_empty(&ailp->xa_ail)) {
- list_splice(list, &ailp->xa_ail);
- return;
- }
-
- list_for_each_entry_reverse(next_lip, &ailp->xa_ail, li_ail) {
- if (XFS_LSN_CMP(next_lip->li_lsn, lsn) <= 0)
- break;
- }
-
- ASSERT((&next_lip->li_ail == &ailp->xa_ail) ||
- (XFS_LSN_CMP(next_lip->li_lsn, lsn) <= 0));
-
- list_splice_init(list, &next_lip->li_ail);
- return;
-}
-
-/*
- * Delete the given item from the AIL. Return a pointer to the item.
- */
-STATIC void
-xfs_ail_delete(
- struct xfs_ail *ailp,
- xfs_log_item_t *lip)
-{
- xfs_ail_check(ailp, lip);
- list_del(&lip->li_ail);
- xfs_trans_ail_cursor_clear(ailp, lip);
-}
-
-/*
- * Return a pointer to the first item in the AIL.
- * If the AIL is empty, then return NULL.
- */
-STATIC xfs_log_item_t *
-xfs_ail_min(
- struct xfs_ail *ailp)
-{
- if (list_empty(&ailp->xa_ail))
- return NULL;
-
- return list_first_entry(&ailp->xa_ail, xfs_log_item_t, li_ail);
-}
-
-/*
- * Return a pointer to the item which follows
- * the given item in the AIL. If the given item
- * is the last item in the list, then return NULL.
- */
-STATIC xfs_log_item_t *
-xfs_ail_next(
- struct xfs_ail *ailp,
- xfs_log_item_t *lip)
-{
- if (lip->li_ail.next == &ailp->xa_ail)
- return NULL;
-
- return list_first_entry(&lip->li_ail, xfs_log_item_t, li_ail);
-}
-
-#ifdef DEBUG
-/*
- * Check that the list is sorted as it should be.
- */
-STATIC void
-xfs_ail_check(
- struct xfs_ail *ailp,
- xfs_log_item_t *lip)
-{
- xfs_log_item_t *prev_lip;
-
- if (list_empty(&ailp->xa_ail))
- return;
-
- /*
- * Check the next and previous entries are valid.
- */
- ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0);
- prev_lip = list_entry(lip->li_ail.prev, xfs_log_item_t, li_ail);
- if (&prev_lip->li_ail != &ailp->xa_ail)
- ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0);
-
- prev_lip = list_entry(lip->li_ail.next, xfs_log_item_t, li_ail);
- if (&prev_lip->li_ail != &ailp->xa_ail)
- ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) >= 0);
-
-
-#ifdef XFS_TRANS_DEBUG
- /*
- * Walk the list checking lsn ordering, and that every entry has the
- * XFS_LI_IN_AIL flag set. This is really expensive, so only do it
- * when specifically debugging the transaction subsystem.
- */
- prev_lip = list_entry(&ailp->xa_ail, xfs_log_item_t, li_ail);
- list_for_each_entry(lip, &ailp->xa_ail, li_ail) {
- if (&prev_lip->li_ail != &ailp->xa_ail)
- ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0);
- ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0);
- prev_lip = lip;
- }
-#endif /* XFS_TRANS_DEBUG */
-}
-#endif /* DEBUG */
diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c
index c47918c..03b3b7f 100644
--- a/fs/xfs/xfs_trans_buf.c
+++ b/fs/xfs/xfs_trans_buf.c
@@ -305,7 +305,7 @@ xfs_trans_read_buf(
if (xfs_error_target == target) {
if (((xfs_req_num++) % xfs_error_mod) == 0) {
xfs_buf_relse(bp);
- cmn_err(CE_DEBUG, "Returning error!\n");
+ xfs_debug(mp, "Returning error!");
return XFS_ERROR(EIO);
}
}
@@ -383,7 +383,8 @@ xfs_trans_read_buf(
bp = xfs_buf_read(target, blkno, len, flags | XBF_DONT_BLOCK);
if (bp == NULL) {
*bpp = NULL;
- return 0;
+ return (flags & XBF_TRYLOCK) ?
+ 0 : XFS_ERROR(ENOMEM);
}
if (XFS_BUF_GETERROR(bp) != 0) {
XFS_BUF_SUPER_STALE(bp);
@@ -403,7 +404,7 @@ xfs_trans_read_buf(
xfs_force_shutdown(tp->t_mountp,
SHUTDOWN_META_IO_ERROR);
xfs_buf_relse(bp);
- cmn_err(CE_DEBUG, "Returning trans error!\n");
+ xfs_debug(mp, "Returning trans error!");
return XFS_ERROR(EIO);
}
}
@@ -427,7 +428,7 @@ shutdown_abort:
*/
#if defined(DEBUG)
if (XFS_BUF_ISSTALE(bp) && XFS_BUF_ISDELAYWRITE(bp))
- cmn_err(CE_NOTE, "about to pop assert, bp == 0x%p", bp);
+ xfs_notice(mp, "about to pop assert, bp == 0x%p", bp);
#endif
ASSERT((XFS_BUF_BFLAGS(bp) & (XBF_STALE|XBF_DELWRI)) !=
(XBF_STALE|XBF_DELWRI));
diff --git a/fs/xfs/xfs_trans_inode.c b/fs/xfs/xfs_trans_inode.c
index ccb3453..048b0c6 100644
--- a/fs/xfs/xfs_trans_inode.c
+++ b/fs/xfs/xfs_trans_inode.c
@@ -44,28 +44,6 @@ xfs_trans_inode_broot_debug(
#endif
/*
- * Get an inode and join it to the transaction.
- */
-int
-xfs_trans_iget(
- xfs_mount_t *mp,
- xfs_trans_t *tp,
- xfs_ino_t ino,
- uint flags,
- uint lock_flags,
- xfs_inode_t **ipp)
-{
- int error;
-
- error = xfs_iget(mp, tp, ino, flags, lock_flags, ipp);
- if (!error && tp) {
- xfs_trans_ijoin(tp, *ipp);
- (*ipp)->i_itemp->ili_lock_flags = lock_flags;
- }
- return error;
-}
-
-/*
* Add a locked inode to the transaction.
*
* The inode must be locked, and it cannot be associated with any transaction.
@@ -103,7 +81,7 @@ xfs_trans_ijoin(
*
*
* Grabs a reference to the inode which will be dropped when the transaction
- * is commited. The inode will also be unlocked at that point. The inode
+ * is committed. The inode will also be unlocked at that point. The inode
* must be locked, and it cannot be associated with any transaction.
*/
void
diff --git a/fs/xfs/xfs_trans_priv.h b/fs/xfs/xfs_trans_priv.h
index 35162c2..6b164e9e 100644
--- a/fs/xfs/xfs_trans_priv.h
+++ b/fs/xfs/xfs_trans_priv.h
@@ -65,16 +65,22 @@ struct xfs_ail_cursor {
struct xfs_ail {
struct xfs_mount *xa_mount;
struct list_head xa_ail;
- uint xa_gen;
- struct task_struct *xa_task;
xfs_lsn_t xa_target;
struct xfs_ail_cursor xa_cursors;
spinlock_t xa_lock;
+ struct delayed_work xa_work;
+ xfs_lsn_t xa_last_pushed_lsn;
+ unsigned long xa_flags;
};
+#define XFS_AIL_PUSHING_BIT 0
+
/*
* From xfs_trans_ail.c
*/
+
+extern struct workqueue_struct *xfs_ail_wq; /* AIL workqueue */
+
void xfs_trans_ail_update_bulk(struct xfs_ail *ailp,
struct xfs_log_item **log_items, int nr_items,
xfs_lsn_t lsn) __releases(ailp->xa_lock);
@@ -98,12 +104,13 @@ xfs_trans_ail_delete(
xfs_trans_ail_delete_bulk(ailp, &lip, 1);
}
-void xfs_trans_ail_push(struct xfs_ail *, xfs_lsn_t);
+void xfs_ail_push(struct xfs_ail *, xfs_lsn_t);
+void xfs_ail_push_all(struct xfs_ail *);
+xfs_lsn_t xfs_ail_min_lsn(struct xfs_ail *ailp);
+
void xfs_trans_unlocked_item(struct xfs_ail *,
xfs_log_item_t *);
-xfs_lsn_t xfs_trans_ail_tail(struct xfs_ail *ailp);
-
struct xfs_log_item *xfs_trans_ail_cursor_first(struct xfs_ail *ailp,
struct xfs_ail_cursor *cur,
xfs_lsn_t lsn);
@@ -112,11 +119,6 @@ struct xfs_log_item *xfs_trans_ail_cursor_next(struct xfs_ail *ailp,
void xfs_trans_ail_cursor_done(struct xfs_ail *ailp,
struct xfs_ail_cursor *cur);
-long xfsaild_push(struct xfs_ail *, xfs_lsn_t *);
-void xfsaild_wakeup(struct xfs_ail *, xfs_lsn_t);
-int xfsaild_start(struct xfs_ail *);
-void xfsaild_stop(struct xfs_ail *);
-
#if BITS_PER_LONG != 64
static inline void
xfs_trans_ail_copy_lsn(
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index d8e6f8c..b7a5fe7 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -953,7 +953,7 @@ xfs_release(
* If we previously truncated this file and removed old data
* in the process, we want to initiate "early" writeout on
* the last close. This is an attempt to combat the notorious
- * NULL files problem which is particularly noticable from a
+ * NULL files problem which is particularly noticeable from a
* truncate down, buffered (re-)write (delalloc), followed by
* a crash. What we are effectively doing here is
* significantly reducing the time window where we'd otherwise
@@ -982,7 +982,7 @@ xfs_release(
*
* Further, check if the inode is being opened, written and
* closed frequently and we have delayed allocation blocks
- * oustanding (e.g. streaming writes from the NFS server),
+ * outstanding (e.g. streaming writes from the NFS server),
* truncating the blocks past EOF will cause fragmentation to
* occur.
*
@@ -1189,9 +1189,8 @@ xfs_inactive(
* inode might be lost for a long time or forever.
*/
if (!XFS_FORCED_SHUTDOWN(mp)) {
- cmn_err(CE_NOTE,
- "xfs_inactive: xfs_ifree() returned an error = %d on %s",
- error, mp->m_fsname);
+ xfs_notice(mp, "%s: xfs_ifree returned error %d",
+ __func__, error);
xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
}
xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT);
@@ -1208,12 +1207,12 @@ xfs_inactive(
*/
error = xfs_bmap_finish(&tp, &free_list, &committed);
if (error)
- xfs_fs_cmn_err(CE_NOTE, mp, "xfs_inactive: "
- "xfs_bmap_finish() returned error %d", error);
+ xfs_notice(mp, "%s: xfs_bmap_finish returned error %d",
+ __func__, error);
error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
if (error)
- xfs_fs_cmn_err(CE_NOTE, mp, "xfs_inactive: "
- "xfs_trans_commit() returned error %d", error);
+ xfs_notice(mp, "%s: xfs_trans_commit returned error %d",
+ __func__, error);
}
/*
@@ -1310,7 +1309,7 @@ xfs_create(
error = xfs_qm_vop_dqalloc(dp, current_fsuid(), current_fsgid(), prid,
XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, &udqp, &gdqp);
if (error)
- goto std_return;
+ return error;
if (is_dir) {
rdev = 0;
@@ -1390,12 +1389,6 @@ xfs_create(
}
/*
- * At this point, we've gotten a newly allocated inode.
- * It is locked (and joined to the transaction).
- */
- ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
-
- /*
* Now we join the directory inode to the transaction. We do not do it
* earlier because xfs_dir_ialloc might commit the previous transaction
* (and release all the locks). An error from here on will result in
@@ -1440,22 +1433,13 @@ xfs_create(
*/
xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp);
- /*
- * xfs_trans_commit normally decrements the vnode ref count
- * when it unlocks the inode. Since we want to return the
- * vnode to the caller, we bump the vnode ref count now.
- */
- IHOLD(ip);
-
error = xfs_bmap_finish(&tp, &free_list, &committed);
if (error)
- goto out_abort_rele;
+ goto out_bmap_cancel;
error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
- if (error) {
- IRELE(ip);
- goto out_dqrele;
- }
+ if (error)
+ goto out_release_inode;
xfs_qm_dqrele(udqp);
xfs_qm_dqrele(gdqp);
@@ -1469,27 +1453,21 @@ xfs_create(
cancel_flags |= XFS_TRANS_ABORT;
out_trans_cancel:
xfs_trans_cancel(tp, cancel_flags);
- out_dqrele:
+ out_release_inode:
+ /*
+ * Wait until after the current transaction is aborted to
+ * release the inode. This prevents recursive transactions
+ * and deadlocks from xfs_inactive.
+ */
+ if (ip)
+ IRELE(ip);
+
xfs_qm_dqrele(udqp);
xfs_qm_dqrele(gdqp);
if (unlock_dp_on_error)
xfs_iunlock(dp, XFS_ILOCK_EXCL);
- std_return:
return error;
-
- out_abort_rele:
- /*
- * Wait until after the current transaction is aborted to
- * release the inode. This prevents recursive transactions
- * and deadlocks from xfs_inactive.
- */
- xfs_bmap_cancel(&free_list);
- cancel_flags |= XFS_TRANS_ABORT;
- xfs_trans_cancel(tp, cancel_flags);
- IRELE(ip);
- unlock_dp_on_error = B_FALSE;
- goto out_dqrele;
}
#ifdef DEBUG
@@ -2114,9 +2092,8 @@ xfs_symlink(
XFS_BMAPI_WRITE | XFS_BMAPI_METADATA,
&first_block, resblks, mval, &nmaps,
&free_list);
- if (error) {
- goto error1;
- }
+ if (error)
+ goto error2;
if (resblks)
resblks -= fs_blocks;
@@ -2148,7 +2125,7 @@ xfs_symlink(
error = xfs_dir_createname(tp, dp, link_name, ip->i_ino,
&first_block, &free_list, resblks);
if (error)
- goto error1;
+ goto error2;
xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
@@ -2161,13 +2138,6 @@ xfs_symlink(
xfs_trans_set_sync(tp);
}
- /*
- * xfs_trans_commit normally decrements the vnode ref count
- * when it unlocks the inode. Since we want to return the
- * vnode to the caller, we bump the vnode ref count now.
- */
- IHOLD(ip);
-
error = xfs_bmap_finish(&tp, &free_list, &committed);
if (error) {
goto error2;
@@ -2861,7 +2831,8 @@ xfs_change_file_space(
ip->i_d.di_flags &= ~XFS_DIFLAG_PREALLOC;
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
- xfs_trans_set_sync(tp);
+ if (attr_flags & XFS_ATTR_SYNC)
+ xfs_trans_set_sync(tp);
error = xfs_trans_commit(tp, 0);
diff --git a/fs/xfs/xfs_vnodeops.h b/fs/xfs/xfs_vnodeops.h
index f6702927..3bcd233 100644
--- a/fs/xfs/xfs_vnodeops.h
+++ b/fs/xfs/xfs_vnodeops.h
@@ -18,6 +18,7 @@ int xfs_setattr(struct xfs_inode *ip, struct iattr *vap, int flags);
#define XFS_ATTR_NONBLOCK 0x02 /* return EAGAIN if operation would block */
#define XFS_ATTR_NOLOCK 0x04 /* Don't grab any conflicting locks */
#define XFS_ATTR_NOACL 0x08 /* Don't call xfs_acl_chmod */
+#define XFS_ATTR_SYNC 0x10 /* synchronous operation required */
int xfs_readlink(struct xfs_inode *ip, char *link);
int xfs_release(struct xfs_inode *ip);
OpenPOWER on IntegriCloud