diff options
author | Anton Altaparmakov <aia21@cantab.net> | 2006-03-23 17:06:08 +0000 |
---|---|---|
committer | Anton Altaparmakov <aia21@cantab.net> | 2006-03-23 17:06:08 +0000 |
commit | 92fe7b9ea8ef101bff3c75ade89b93b5f62a7955 (patch) | |
tree | 3dba4faa78f1bbe4be503275173e3a63b5d60f22 /fs | |
parent | e750d1c7cc314b9ba1934b0b474b7d39f906f865 (diff) | |
parent | b0e6e962992b76580f4900b166a337bad7c1e81b (diff) | |
download | op-kernel-dev-92fe7b9ea8ef101bff3c75ade89b93b5f62a7955.zip op-kernel-dev-92fe7b9ea8ef101bff3c75ade89b93b5f62a7955.tar.gz |
Merge branch 'master' of /usr/src/ntfs-2.6/
Diffstat (limited to 'fs')
64 files changed, 615 insertions, 661 deletions
diff --git a/fs/9p/mux.c b/fs/9p/mux.c index ea1134e..8e8356c 100644 --- a/fs/9p/mux.c +++ b/fs/9p/mux.c @@ -31,6 +31,7 @@ #include <linux/poll.h> #include <linux/kthread.h> #include <linux/idr.h> +#include <linux/mutex.h> #include "debug.h" #include "v9fs.h" @@ -110,7 +111,7 @@ static void v9fs_pollwait(struct file *filp, wait_queue_head_t * wait_address, static u16 v9fs_mux_get_tag(struct v9fs_mux_data *); static void v9fs_mux_put_tag(struct v9fs_mux_data *, u16); -static DECLARE_MUTEX(v9fs_mux_task_lock); +static DEFINE_MUTEX(v9fs_mux_task_lock); static struct workqueue_struct *v9fs_mux_wq; static int v9fs_mux_num; @@ -166,7 +167,7 @@ static int v9fs_mux_poll_start(struct v9fs_mux_data *m) dprintk(DEBUG_MUX, "mux %p muxnum %d procnum %d\n", m, v9fs_mux_num, v9fs_mux_poll_task_num); - up(&v9fs_mux_task_lock); + mutex_lock(&v9fs_mux_task_lock); n = v9fs_mux_calc_poll_procs(v9fs_mux_num + 1); if (n > v9fs_mux_poll_task_num) { @@ -225,7 +226,7 @@ static int v9fs_mux_poll_start(struct v9fs_mux_data *m) } v9fs_mux_num++; - down(&v9fs_mux_task_lock); + mutex_unlock(&v9fs_mux_task_lock); return 0; } @@ -235,7 +236,7 @@ static void v9fs_mux_poll_stop(struct v9fs_mux_data *m) int i; struct v9fs_mux_poll_task *vpt; - up(&v9fs_mux_task_lock); + mutex_lock(&v9fs_mux_task_lock); vpt = m->poll_task; list_del(&m->mux_list); for(i = 0; i < ARRAY_SIZE(m->poll_waddr); i++) { @@ -252,7 +253,7 @@ static void v9fs_mux_poll_stop(struct v9fs_mux_data *m) v9fs_mux_poll_task_num--; } v9fs_mux_num--; - down(&v9fs_mux_task_lock); + mutex_unlock(&v9fs_mux_task_lock); } /** diff --git a/fs/adfs/file.c b/fs/adfs/file.c index afebbfd..6af1088 100644 --- a/fs/adfs/file.c +++ b/fs/adfs/file.c @@ -19,11 +19,7 @@ * * adfs regular file handling primitives */ -#include <linux/errno.h> #include <linux/fs.h> -#include <linux/fcntl.h> -#include <linux/time.h> -#include <linux/stat.h> #include <linux/buffer_head.h> /* for file_fsync() */ #include <linux/adfs_fs.h> diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h index 385bed0..f54c5b2 100644 --- a/fs/autofs4/autofs_i.h +++ b/fs/autofs4/autofs_i.h @@ -13,6 +13,7 @@ /* Internal header file for autofs */ #include <linux/auto_fs4.h> +#include <linux/mutex.h> #include <linux/list.h> /* This is the range of ioctl() numbers we claim as ours */ @@ -102,7 +103,7 @@ struct autofs_sb_info { int reghost_enabled; int needs_reghost; struct super_block *sb; - struct semaphore wq_sem; + struct mutex wq_mutex; spinlock_t fs_lock; struct autofs_wait_queue *queues; /* Wait queue pointer */ }; diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c index 2d30828..1ad98d4 100644 --- a/fs/autofs4/inode.c +++ b/fs/autofs4/inode.c @@ -269,7 +269,7 @@ int autofs4_fill_super(struct super_block *s, void *data, int silent) sbi->sb = s; sbi->version = 0; sbi->sub_version = 0; - init_MUTEX(&sbi->wq_sem); + mutex_init(&sbi->wq_mutex); spin_lock_init(&sbi->fs_lock); sbi->queues = NULL; s->s_blocksize = 1024; diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c index 394ff36..be78e93 100644 --- a/fs/autofs4/waitq.c +++ b/fs/autofs4/waitq.c @@ -178,7 +178,7 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry, return -ENOENT; } - if (down_interruptible(&sbi->wq_sem)) { + if (mutex_lock_interruptible(&sbi->wq_mutex)) { kfree(name); return -EINTR; } @@ -194,7 +194,7 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry, /* Can't wait for an expire if there's no mount */ if (notify == NFY_NONE && !d_mountpoint(dentry)) { kfree(name); - up(&sbi->wq_sem); + mutex_unlock(&sbi->wq_mutex); return -ENOENT; } @@ -202,7 +202,7 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry, wq = kmalloc(sizeof(struct autofs_wait_queue),GFP_KERNEL); if ( !wq ) { kfree(name); - up(&sbi->wq_sem); + mutex_unlock(&sbi->wq_mutex); return -ENOMEM; } @@ -218,10 +218,10 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry, wq->status = -EINTR; /* Status return if interrupted */ atomic_set(&wq->wait_ctr, 2); atomic_set(&wq->notified, 1); - up(&sbi->wq_sem); + mutex_unlock(&sbi->wq_mutex); } else { atomic_inc(&wq->wait_ctr); - up(&sbi->wq_sem); + mutex_unlock(&sbi->wq_mutex); kfree(name); DPRINTK("existing wait id = 0x%08lx, name = %.*s, nfy=%d", (unsigned long) wq->wait_queue_token, wq->len, wq->name, notify); @@ -282,19 +282,19 @@ int autofs4_wait_release(struct autofs_sb_info *sbi, autofs_wqt_t wait_queue_tok { struct autofs_wait_queue *wq, **wql; - down(&sbi->wq_sem); + mutex_lock(&sbi->wq_mutex); for ( wql = &sbi->queues ; (wq = *wql) != 0 ; wql = &wq->next ) { if ( wq->wait_queue_token == wait_queue_token ) break; } if ( !wq ) { - up(&sbi->wq_sem); + mutex_unlock(&sbi->wq_mutex); return -EINVAL; } *wql = wq->next; /* Unlink from chain */ - up(&sbi->wq_sem); + mutex_unlock(&sbi->wq_mutex); kfree(wq->name); wq->name = NULL; /* Do not wait on this queue */ @@ -1243,11 +1243,11 @@ static int __init init_bio(void) scale = 4; /* - * scale number of entries + * Limit number of entries reserved -- mempools are only used when + * the system is completely unable to allocate memory, so we only + * need enough to make progress. */ - bvec_pool_entries = megabytes * 2; - if (bvec_pool_entries > 256) - bvec_pool_entries = 256; + bvec_pool_entries = 1 + scale; fs_bio_set = bioset_create(BIO_POOL_SIZE, bvec_pool_entries, scale); if (!fs_bio_set) diff --git a/fs/block_dev.c b/fs/block_dev.c index 6e50346..44d05e6 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -265,8 +265,8 @@ static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) SLAB_CTOR_CONSTRUCTOR) { memset(bdev, 0, sizeof(*bdev)); - sema_init(&bdev->bd_sem, 1); - sema_init(&bdev->bd_mount_sem, 1); + mutex_init(&bdev->bd_mutex); + mutex_init(&bdev->bd_mount_mutex); INIT_LIST_HEAD(&bdev->bd_inodes); INIT_LIST_HEAD(&bdev->bd_list); inode_init_once(&ei->vfs_inode); @@ -574,7 +574,7 @@ static int do_open(struct block_device *bdev, struct file *file) } owner = disk->fops->owner; - down(&bdev->bd_sem); + mutex_lock(&bdev->bd_mutex); if (!bdev->bd_openers) { bdev->bd_disk = disk; bdev->bd_contains = bdev; @@ -605,21 +605,21 @@ static int do_open(struct block_device *bdev, struct file *file) if (ret) goto out_first; bdev->bd_contains = whole; - down(&whole->bd_sem); + mutex_lock(&whole->bd_mutex); whole->bd_part_count++; p = disk->part[part - 1]; bdev->bd_inode->i_data.backing_dev_info = whole->bd_inode->i_data.backing_dev_info; if (!(disk->flags & GENHD_FL_UP) || !p || !p->nr_sects) { whole->bd_part_count--; - up(&whole->bd_sem); + mutex_unlock(&whole->bd_mutex); ret = -ENXIO; goto out_first; } kobject_get(&p->kobj); bdev->bd_part = p; bd_set_size(bdev, (loff_t) p->nr_sects << 9); - up(&whole->bd_sem); + mutex_unlock(&whole->bd_mutex); } } else { put_disk(disk); @@ -633,13 +633,13 @@ static int do_open(struct block_device *bdev, struct file *file) if (bdev->bd_invalidated) rescan_partitions(bdev->bd_disk, bdev); } else { - down(&bdev->bd_contains->bd_sem); + mutex_lock(&bdev->bd_contains->bd_mutex); bdev->bd_contains->bd_part_count++; - up(&bdev->bd_contains->bd_sem); + mutex_unlock(&bdev->bd_contains->bd_mutex); } } bdev->bd_openers++; - up(&bdev->bd_sem); + mutex_unlock(&bdev->bd_mutex); unlock_kernel(); return 0; @@ -652,7 +652,7 @@ out_first: put_disk(disk); module_put(owner); out: - up(&bdev->bd_sem); + mutex_unlock(&bdev->bd_mutex); unlock_kernel(); if (ret) bdput(bdev); @@ -714,7 +714,7 @@ int blkdev_put(struct block_device *bdev) struct inode *bd_inode = bdev->bd_inode; struct gendisk *disk = bdev->bd_disk; - down(&bdev->bd_sem); + mutex_lock(&bdev->bd_mutex); lock_kernel(); if (!--bdev->bd_openers) { sync_blockdev(bdev); @@ -724,9 +724,9 @@ int blkdev_put(struct block_device *bdev) if (disk->fops->release) ret = disk->fops->release(bd_inode, NULL); } else { - down(&bdev->bd_contains->bd_sem); + mutex_lock(&bdev->bd_contains->bd_mutex); bdev->bd_contains->bd_part_count--; - up(&bdev->bd_contains->bd_sem); + mutex_unlock(&bdev->bd_contains->bd_mutex); } if (!bdev->bd_openers) { struct module *owner = disk->fops->owner; @@ -746,7 +746,7 @@ int blkdev_put(struct block_device *bdev) bdev->bd_contains = NULL; } unlock_kernel(); - up(&bdev->bd_sem); + mutex_unlock(&bdev->bd_mutex); bdput(bdev); return ret; } diff --git a/fs/buffer.c b/fs/buffer.c index 1d3683d..0d6ca7b 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -201,7 +201,7 @@ int fsync_bdev(struct block_device *bdev) * freeze_bdev -- lock a filesystem and force it into a consistent state * @bdev: blockdevice to lock * - * This takes the block device bd_mount_sem to make sure no new mounts + * This takes the block device bd_mount_mutex to make sure no new mounts * happen on bdev until thaw_bdev() is called. * If a superblock is found on this device, we take the s_umount semaphore * on it to make sure nobody unmounts until the snapshot creation is done. @@ -210,7 +210,7 @@ struct super_block *freeze_bdev(struct block_device *bdev) { struct super_block *sb; - down(&bdev->bd_mount_sem); + mutex_lock(&bdev->bd_mount_mutex); sb = get_super(bdev); if (sb && !(sb->s_flags & MS_RDONLY)) { sb->s_frozen = SB_FREEZE_WRITE; @@ -264,7 +264,7 @@ void thaw_bdev(struct block_device *bdev, struct super_block *sb) drop_super(sb); } - up(&bdev->bd_mount_sem); + mutex_unlock(&bdev->bd_mount_mutex); } EXPORT_SYMBOL(thaw_bdev); diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c index fed55e3..632561d 100644 --- a/fs/cifs/dir.c +++ b/fs/cifs/dir.c @@ -138,9 +138,9 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode, cifs_sb = CIFS_SB(inode->i_sb); pTcon = cifs_sb->tcon; - down(&direntry->d_sb->s_vfs_rename_sem); + mutex_lock(&direntry->d_sb->s_vfs_rename_mutex); full_path = build_path_from_dentry(direntry); - up(&direntry->d_sb->s_vfs_rename_sem); + mutex_unlock(&direntry->d_sb->s_vfs_rename_mutex); if(full_path == NULL) { FreeXid(xid); return -ENOMEM; @@ -317,9 +317,9 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, int mode, cifs_sb = CIFS_SB(inode->i_sb); pTcon = cifs_sb->tcon; - down(&direntry->d_sb->s_vfs_rename_sem); + mutex_lock(&direntry->d_sb->s_vfs_rename_mutex); full_path = build_path_from_dentry(direntry); - up(&direntry->d_sb->s_vfs_rename_sem); + mutex_unlock(&direntry->d_sb->s_vfs_rename_mutex); if(full_path == NULL) rc = -ENOMEM; else if (pTcon->ses->capabilities & CAP_UNIX) { diff --git a/fs/cifs/fcntl.c b/fs/cifs/fcntl.c index a7a47bb..ec4dfe9 100644 --- a/fs/cifs/fcntl.c +++ b/fs/cifs/fcntl.c @@ -86,9 +86,9 @@ int cifs_dir_notify(struct file * file, unsigned long arg) cifs_sb = CIFS_SB(file->f_dentry->d_sb); pTcon = cifs_sb->tcon; - down(&file->f_dentry->d_sb->s_vfs_rename_sem); + mutex_lock(&file->f_dentry->d_sb->s_vfs_rename_mutex); full_path = build_path_from_dentry(file->f_dentry); - up(&file->f_dentry->d_sb->s_vfs_rename_sem); + mutex_unlock(&file->f_dentry->d_sb->s_vfs_rename_mutex); if(full_path == NULL) { rc = -ENOMEM; diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 675bd25..165d674 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -203,9 +203,9 @@ int cifs_open(struct inode *inode, struct file *file) } } - down(&inode->i_sb->s_vfs_rename_sem); + mutex_lock(&inode->i_sb->s_vfs_rename_mutex); full_path = build_path_from_dentry(file->f_dentry); - up(&inode->i_sb->s_vfs_rename_sem); + mutex_unlock(&inode->i_sb->s_vfs_rename_mutex); if (full_path == NULL) { FreeXid(xid); return -ENOMEM; diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 5935991..ff93a9f 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -574,9 +574,9 @@ int cifs_unlink(struct inode *inode, struct dentry *direntry) /* Unlink can be called from rename so we can not grab the sem here since we deadlock otherwise */ -/* down(&direntry->d_sb->s_vfs_rename_sem);*/ +/* mutex_lock(&direntry->d_sb->s_vfs_rename_mutex);*/ full_path = build_path_from_dentry(direntry); -/* up(&direntry->d_sb->s_vfs_rename_sem);*/ +/* mutex_unlock(&direntry->d_sb->s_vfs_rename_mutex);*/ if (full_path == NULL) { FreeXid(xid); return -ENOMEM; @@ -718,9 +718,9 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, int mode) cifs_sb = CIFS_SB(inode->i_sb); pTcon = cifs_sb->tcon; - down(&inode->i_sb->s_vfs_rename_sem); + mutex_lock(&inode->i_sb->s_vfs_rename_mutex); full_path = build_path_from_dentry(direntry); - up(&inode->i_sb->s_vfs_rename_sem); + mutex_unlock(&inode->i_sb->s_vfs_rename_mutex); if (full_path == NULL) { FreeXid(xid); return -ENOMEM; @@ -803,9 +803,9 @@ int cifs_rmdir(struct inode *inode, struct dentry *direntry) cifs_sb = CIFS_SB(inode->i_sb); pTcon = cifs_sb->tcon; - down(&inode->i_sb->s_vfs_rename_sem); + mutex_lock(&inode->i_sb->s_vfs_rename_mutex); full_path = build_path_from_dentry(direntry); - up(&inode->i_sb->s_vfs_rename_sem); + mutex_unlock(&inode->i_sb->s_vfs_rename_mutex); if (full_path == NULL) { FreeXid(xid); return -ENOMEM; @@ -1137,9 +1137,9 @@ int cifs_setattr(struct dentry *direntry, struct iattr *attrs) rc = 0; } - down(&direntry->d_sb->s_vfs_rename_sem); + mutex_lock(&direntry->d_sb->s_vfs_rename_mutex); full_path = build_path_from_dentry(direntry); - up(&direntry->d_sb->s_vfs_rename_sem); + mutex_unlock(&direntry->d_sb->s_vfs_rename_mutex); if (full_path == NULL) { FreeXid(xid); return -ENOMEM; diff --git a/fs/cifs/link.c b/fs/cifs/link.c index 0f99aae..8d0da7c 100644 --- a/fs/cifs/link.c +++ b/fs/cifs/link.c @@ -48,10 +48,10 @@ cifs_hardlink(struct dentry *old_file, struct inode *inode, /* No need to check for cross device links since server will do that BB note DFS case in future though (when we may have to check) */ - down(&inode->i_sb->s_vfs_rename_sem); + mutex_lock(&inode->i_sb->s_vfs_rename_mutex); fromName = build_path_from_dentry(old_file); toName = build_path_from_dentry(direntry); - up(&inode->i_sb->s_vfs_rename_sem); + mutex_unlock(&inode->i_sb->s_vfs_rename_mutex); if((fromName == NULL) || (toName == NULL)) { rc = -ENOMEM; goto cifs_hl_exit; @@ -103,9 +103,9 @@ cifs_follow_link(struct dentry *direntry, struct nameidata *nd) xid = GetXid(); - down(&direntry->d_sb->s_vfs_rename_sem); + mutex_lock(&direntry->d_sb->s_vfs_rename_mutex); full_path = build_path_from_dentry(direntry); - up(&direntry->d_sb->s_vfs_rename_sem); + mutex_unlock(&direntry->d_sb->s_vfs_rename_mutex); if (!full_path) goto out_no_free; @@ -164,9 +164,9 @@ cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname) cifs_sb = CIFS_SB(inode->i_sb); pTcon = cifs_sb->tcon; - down(&inode->i_sb->s_vfs_rename_sem); + mutex_lock(&inode->i_sb->s_vfs_rename_mutex); full_path = build_path_from_dentry(direntry); - up(&inode->i_sb->s_vfs_rename_sem); + mutex_unlock(&inode->i_sb->s_vfs_rename_mutex); if(full_path == NULL) { FreeXid(xid); @@ -232,9 +232,9 @@ cifs_readlink(struct dentry *direntry, char __user *pBuffer, int buflen) /* BB would it be safe against deadlock to grab this sem even though rename itself grabs the sem and calls lookup? */ -/* down(&inode->i_sb->s_vfs_rename_sem);*/ +/* mutex_lock(&inode->i_sb->s_vfs_rename_mutex);*/ full_path = build_path_from_dentry(direntry); -/* up(&inode->i_sb->s_vfs_rename_sem);*/ +/* mutex_unlock(&inode->i_sb->s_vfs_rename_mutex);*/ if(full_path == NULL) { FreeXid(xid); diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c index 288cc04..edb3b6e 100644 --- a/fs/cifs/readdir.c +++ b/fs/cifs/readdir.c @@ -404,9 +404,9 @@ static int initiate_cifs_search(const int xid, struct file *file) if(pTcon == NULL) return -EINVAL; - down(&file->f_dentry->d_sb->s_vfs_rename_sem); + mutex_lock(&file->f_dentry->d_sb->s_vfs_rename_mutex); full_path = build_path_from_dentry(file->f_dentry); - up(&file->f_dentry->d_sb->s_vfs_rename_sem); + mutex_unlock(&file->f_dentry->d_sb->s_vfs_rename_mutex); if(full_path == NULL) { return -ENOMEM; diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c index 777e336..3938444 100644 --- a/fs/cifs/xattr.c +++ b/fs/cifs/xattr.c @@ -62,9 +62,9 @@ int cifs_removexattr(struct dentry * direntry, const char * ea_name) cifs_sb = CIFS_SB(sb); pTcon = cifs_sb->tcon; - down(&sb->s_vfs_rename_sem); + mutex_lock(&sb->s_vfs_rename_mutex); full_path = build_path_from_dentry(direntry); - up(&sb->s_vfs_rename_sem); + mutex_unlock(&sb->s_vfs_rename_mutex); if(full_path == NULL) { FreeXid(xid); return -ENOMEM; @@ -116,9 +116,9 @@ int cifs_setxattr(struct dentry * direntry, const char * ea_name, cifs_sb = CIFS_SB(sb); pTcon = cifs_sb->tcon; - down(&sb->s_vfs_rename_sem); + mutex_lock(&sb->s_vfs_rename_mutex); full_path = build_path_from_dentry(direntry); - up(&sb->s_vfs_rename_sem); + mutex_unlock(&sb->s_vfs_rename_mutex); if(full_path == NULL) { FreeXid(xid); return -ENOMEM; @@ -223,9 +223,9 @@ ssize_t cifs_getxattr(struct dentry * direntry, const char * ea_name, cifs_sb = CIFS_SB(sb); pTcon = cifs_sb->tcon; - down(&sb->s_vfs_rename_sem); + mutex_lock(&sb->s_vfs_rename_mutex); full_path = build_path_from_dentry(direntry); - up(&sb->s_vfs_rename_sem); + mutex_unlock(&sb->s_vfs_rename_mutex); if(full_path == NULL) { FreeXid(xid); return -ENOMEM; @@ -341,9 +341,9 @@ ssize_t cifs_listxattr(struct dentry * direntry, char * data, size_t buf_size) cifs_sb = CIFS_SB(sb); pTcon = cifs_sb->tcon; - down(&sb->s_vfs_rename_sem); + mutex_lock(&sb->s_vfs_rename_mutex); full_path = build_path_from_dentry(direntry); - up(&sb->s_vfs_rename_sem); + mutex_unlock(&sb->s_vfs_rename_mutex); if(full_path == NULL) { FreeXid(xid); return -ENOMEM; diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c index bfb8a23..14c5620 100644 --- a/fs/devpts/inode.c +++ b/fs/devpts/inode.c @@ -18,6 +18,7 @@ #include <linux/mount.h> #include <linux/tty.h> #include <linux/devpts_fs.h> +#include <linux/parser.h> #define DEVPTS_SUPER_MAGIC 0x1cd1 @@ -32,39 +33,60 @@ static struct { umode_t mode; } config = {.mode = 0600}; +enum { + Opt_uid, Opt_gid, Opt_mode, + Opt_err +}; + +static match_table_t tokens = { + {Opt_uid, "uid=%u"}, + {Opt_gid, "gid=%u"}, + {Opt_mode, "mode=%o"}, + {Opt_err, NULL} +}; + static int devpts_remount(struct super_block *sb, int *flags, char *data) { - int setuid = 0; - int setgid = 0; - uid_t uid = 0; - gid_t gid = 0; - umode_t mode = 0600; - char *this_char; - - this_char = NULL; - while ((this_char = strsep(&data, ",")) != NULL) { - int n; - char dummy; - if (!*this_char) + char *p; + + config.setuid = 0; + config.setgid = 0; + config.uid = 0; + config.gid = 0; + config.mode = 0600; + + while ((p = strsep(&data, ",")) != NULL) { + substring_t args[MAX_OPT_ARGS]; + int token; + int option; + + if (!*p) continue; - if (sscanf(this_char, "uid=%i%c", &n, &dummy) == 1) { - setuid = 1; - uid = n; - } else if (sscanf(this_char, "gid=%i%c", &n, &dummy) == 1) { - setgid = 1; - gid = n; - } else if (sscanf(this_char, "mode=%o%c", &n, &dummy) == 1) - mode = n & ~S_IFMT; - else { - printk("devpts: called with bogus options\n"); + + token = match_token(p, tokens, args); + switch (token) { + case Opt_uid: + if (match_int(&args[0], &option)) + return -EINVAL; + config.uid = option; + config.setuid = 1; + break; + case Opt_gid: + if (match_int(&args[0], &option)) + return -EINVAL; + config.gid = option; + config.setgid = 1; + break; + case Opt_mode: + if (match_octal(&args[0], &option)) + return -EINVAL; + config.mode = option & ~S_IFMT; + break; + default: + printk(KERN_ERR "devpts: called with bogus options\n"); return -EINVAL; } } - config.setuid = setuid; - config.setgid = setgid; - config.uid = uid; - config.gid = gid; - config.mode = mode; return 0; } @@ -103,12 +103,12 @@ * (these locking rules also apply for S_NOQUOTA flag in the inode - note that * for altering the flag i_mutex is also needed). If operation is holding * reference to dquot in other way (e.g. quotactl ops) it must be guarded by - * dqonoff_sem. + * dqonoff_mutex. * This locking assures that: * a) update/access to dquot pointers in inode is serialized * b) everyone is guarded against invalidate_dquots() * - * Each dquot has its dq_lock semaphore. Locked dquots might not be referenced + * Each dquot has its dq_lock mutex. Locked dquots might not be referenced * from inodes (dquot_alloc_space() and such don't check the dq_lock). * Currently dquot is locked only when it is being read to memory (or space for * it is being allocated) on the first dqget() and when it is being released on @@ -118,9 +118,9 @@ * spinlock to internal buffers before writing. * * Lock ordering (including related VFS locks) is the following: - * i_mutex > dqonoff_sem > iprune_sem > journal_lock > dqptr_sem > - * > dquot->dq_lock > dqio_sem - * i_mutex on quota files is special (it's below dqio_sem) + * i_mutex > dqonoff_sem > journal_lock > dqptr_sem > dquot->dq_lock > + * dqio_mutex + * i_mutex on quota files is special (it's below dqio_mutex) */ static DEFINE_SPINLOCK(dq_list_lock); @@ -281,8 +281,8 @@ static inline void remove_inuse(struct dquot *dquot) static void wait_on_dquot(struct dquot *dquot) { - down(&dquot->dq_lock); - up(&dquot->dq_lock); + mutex_lock(&dquot->dq_lock); + mutex_unlock(&dquot->dq_lock); } #define mark_dquot_dirty(dquot) ((dquot)->dq_sb->dq_op->mark_dirty(dquot)) @@ -321,8 +321,8 @@ int dquot_acquire(struct dquot *dquot) int ret = 0, ret2 = 0; struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); - down(&dquot->dq_lock); - down(&dqopt->dqio_sem); + mutex_lock(&dquot->dq_lock); + mutex_lock(&dqopt->dqio_mutex); if (!test_bit(DQ_READ_B, &dquot->dq_flags)) ret = dqopt->ops[dquot->dq_type]->read_dqblk(dquot); if (ret < 0) @@ -343,8 +343,8 @@ int dquot_acquire(struct dquot *dquot) } set_bit(DQ_ACTIVE_B, &dquot->dq_flags); out_iolock: - up(&dqopt->dqio_sem); - up(&dquot->dq_lock); + mutex_unlock(&dqopt->dqio_mutex); + mutex_unlock(&dquot->dq_lock); return ret; } @@ -356,7 +356,7 @@ int dquot_commit(struct dquot *dquot) int ret = 0, ret2 = 0; struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); - down(&dqopt->dqio_sem); + mutex_lock(&dqopt->dqio_mutex); spin_lock(&dq_list_lock); if (!clear_dquot_dirty(dquot)) { spin_unlock(&dq_list_lock); @@ -373,7 +373,7 @@ int dquot_commit(struct dquot *dquot) ret = ret2; } out_sem: - up(&dqopt->dqio_sem); + mutex_unlock(&dqopt->dqio_mutex); return ret; } @@ -385,11 +385,11 @@ int dquot_release(struct dquot *dquot) int ret = 0, ret2 = 0; struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); - down(&dquot->dq_lock); + mutex_lock(&dquot->dq_lock); /* Check whether we are not racing with some other dqget() */ if (atomic_read(&dquot->dq_count) > 1) goto out_dqlock; - down(&dqopt->dqio_sem); + mutex_lock(&dqopt->dqio_mutex); if (dqopt->ops[dquot->dq_type]->release_dqblk) { ret = dqopt->ops[dquot->dq_type]->release_dqblk(dquot); /* Write the info */ @@ -399,31 +399,57 @@ int dquot_release(struct dquot *dquot) ret = ret2; } clear_bit(DQ_ACTIVE_B, &dquot->dq_flags); - up(&dqopt->dqio_sem); + mutex_unlock(&dqopt->dqio_mutex); out_dqlock: - up(&dquot->dq_lock); + mutex_unlock(&dquot->dq_lock); return ret; } /* Invalidate all dquots on the list. Note that this function is called after * quota is disabled and pointers from inodes removed so there cannot be new - * quota users. Also because we hold dqonoff_sem there can be no quota users - * for this sb+type at all. */ + * quota users. There can still be some users of quotas due to inodes being + * just deleted or pruned by prune_icache() (those are not attached to any + * list). We have to wait for such users. + */ static void invalidate_dquots(struct super_block *sb, int type) { struct dquot *dquot, *tmp; +restart: spin_lock(&dq_list_lock); list_for_each_entry_safe(dquot, tmp, &inuse_list, dq_inuse) { if (dquot->dq_sb != sb) continue; if (dquot->dq_type != type) continue; -#ifdef __DQUOT_PARANOIA - if (atomic_read(&dquot->dq_count)) - BUG(); -#endif - /* Quota now has no users and it has been written on last dqput() */ + /* Wait for dquot users */ + if (atomic_read(&dquot->dq_count)) { + DEFINE_WAIT(wait); + + atomic_inc(&dquot->dq_count); + prepare_to_wait(&dquot->dq_wait_unused, &wait, + TASK_UNINTERRUPTIBLE); + spin_unlock(&dq_list_lock); + /* Once dqput() wakes us up, we know it's time to free + * the dquot. + * IMPORTANT: we rely on the fact that there is always + * at most one process waiting for dquot to free. + * Otherwise dq_count would be > 1 and we would never + * wake up. + */ + if (atomic_read(&dquot->dq_count) > 1) + schedule(); + finish_wait(&dquot->dq_wait_unused, &wait); + dqput(dquot); + /* At this moment dquot() need not exist (it could be + * reclaimed by prune_dqcache(). Hence we must + * restart. */ + goto restart; + } + /* + * Quota now has no users and it has been written on last + * dqput() + */ remove_dquot_hash(dquot); remove_free_dquot(dquot); remove_inuse(dquot); @@ -439,7 +465,7 @@ int vfs_quota_sync(struct super_block *sb, int type) struct quota_info *dqopt = sb_dqopt(sb); int cnt; - down(&dqopt->dqonoff_sem); + mutex_lock(&dqopt->dqonoff_mutex); for (cnt = 0; cnt < MAXQUOTAS; cnt++) { if (type != -1 && cnt != type) continue; @@ -474,7 +500,7 @@ int vfs_quota_sync(struct super_block *sb, int type) spin_lock(&dq_list_lock); dqstats.syncs++; spin_unlock(&dq_list_lock); - up(&dqopt->dqonoff_sem); + mutex_unlock(&dqopt->dqonoff_mutex); return 0; } @@ -515,7 +541,7 @@ static int shrink_dqcache_memory(int nr, gfp_t gfp_mask) /* * Put reference to dquot * NOTE: If you change this function please check whether dqput_blocks() works right... - * MUST be called with either dqptr_sem or dqonoff_sem held + * MUST be called with either dqptr_sem or dqonoff_mutex held */ static void dqput(struct dquot *dquot) { @@ -540,6 +566,10 @@ we_slept: if (atomic_read(&dquot->dq_count) > 1) { /* We have more than one user... nothing to do */ atomic_dec(&dquot->dq_count); + /* Releasing dquot during quotaoff phase? */ + if (!sb_has_quota_enabled(dquot->dq_sb, dquot->dq_type) && + atomic_read(&dquot->dq_count) == 1) + wake_up(&dquot->dq_wait_unused); spin_unlock(&dq_list_lock); return; } @@ -576,11 +606,12 @@ static struct dquot *get_empty_dquot(struct super_block *sb, int type) return NODQUOT; memset((caddr_t)dquot, 0, sizeof(struct dquot)); - sema_init(&dquot->dq_lock, 1); + mutex_init(&dquot->dq_lock); INIT_LIST_HEAD(&dquot->dq_free); INIT_LIST_HEAD(&dquot->dq_inuse); INIT_HLIST_NODE(&dquot->dq_hash); INIT_LIST_HEAD(&dquot->dq_dirty); + init_waitqueue_head(&dquot->dq_wait_unused); dquot->dq_sb = sb; dquot->dq_type = type; atomic_set(&dquot->dq_count, 1); @@ -590,7 +621,7 @@ static struct dquot *get_empty_dquot(struct super_block *sb, int type) /* * Get reference to dquot - * MUST be called with either dqptr_sem or dqonoff_sem held + * MUST be called with either dqptr_sem or dqonoff_mutex held */ static struct dquot *dqget(struct super_block *sb, unsigned int id, int type) { @@ -656,7 +687,7 @@ static int dqinit_needed(struct inode *inode, int type) return 0; } -/* This routine is guarded by dqonoff_sem semaphore */ +/* This routine is guarded by dqonoff_mutex mutex */ static void add_dquot_ref(struct super_block *sb, int type) { struct list_head *p; @@ -732,13 +763,9 @@ static void drop_dquot_ref(struct super_block *sb, int type) { LIST_HEAD(tofree_head); - /* We need to be guarded against prune_icache to reach all the - * inodes - otherwise some can be on the local list of prune_icache */ - down(&iprune_sem); down_write(&sb_dqopt(sb)->dqptr_sem); remove_dquot_ref(sb, type, &tofree_head); up_write(&sb_dqopt(sb)->dqptr_sem); - up(&iprune_sem); put_dquot_list(&tofree_head); } @@ -938,8 +965,8 @@ int dquot_initialize(struct inode *inode, int type) unsigned int id = 0; int cnt, ret = 0; - /* First test before acquiring semaphore - solves deadlocks when we - * re-enter the quota code and are already holding the semaphore */ + /* First test before acquiring mutex - solves deadlocks when we + * re-enter the quota code and are already holding the mutex */ if (IS_NOQUOTA(inode)) return 0; down_write(&sb_dqopt(inode->i_sb)->dqptr_sem); @@ -1002,8 +1029,8 @@ int dquot_alloc_space(struct inode *inode, qsize_t number, int warn) int cnt, ret = NO_QUOTA; char warntype[MAXQUOTAS]; - /* First test before acquiring semaphore - solves deadlocks when we - * re-enter the quota code and are already holding the semaphore */ + /* First test before acquiring mutex - solves deadlocks when we + * re-enter the quota code and are already holding the mutex */ if (IS_NOQUOTA(inode)) { out_add: inode_add_bytes(inode, number); @@ -1051,8 +1078,8 @@ int dquot_alloc_inode(const struct inode *inode, unsigned long number) int cnt, ret = NO_QUOTA; char warntype[MAXQUOTAS]; - /* First test before acquiring semaphore - solves deadlocks when we - * re-enter the quota code and are already holding the semaphore */ + /* First test before acquiring mutex - solves deadlocks when we + * re-enter the quota code and are already holding the mutex */ if (IS_NOQUOTA(inode)) return QUOTA_OK; for (cnt = 0; cnt < MAXQUOTAS; cnt++) @@ -1095,8 +1122,8 @@ int dquot_free_space(struct inode *inode, qsize_t number) { unsigned int cnt; - /* First test before acquiring semaphore - solves deadlocks when we - * re-enter the quota code and are already holding the semaphore */ + /* First test before acquiring mutex - solves deadlocks when we + * re-enter the quota code and are already holding the mutex */ if (IS_NOQUOTA(inode)) { out_sub: inode_sub_bytes(inode, number); @@ -1131,8 +1158,8 @@ int dquot_free_inode(const struct inode *inode, unsigned long number) { unsigned int cnt; - /* First test before acquiring semaphore - solves deadlocks when we - * re-enter the quota code and are already holding the semaphore */ + /* First test before acquiring mutex - solves deadlocks when we + * re-enter the quota code and are already holding the mutex */ if (IS_NOQUOTA(inode)) return QUOTA_OK; down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); @@ -1171,8 +1198,8 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr) chgid = (iattr->ia_valid & ATTR_GID) && inode->i_gid != iattr->ia_gid; char warntype[MAXQUOTAS]; - /* First test before acquiring semaphore - solves deadlocks when we - * re-enter the quota code and are already holding the semaphore */ + /* First test before acquiring mutex - solves deadlocks when we + * re-enter the quota code and are already holding the mutex */ if (IS_NOQUOTA(inode)) return QUOTA_OK; /* Clear the arrays */ @@ -1266,9 +1293,9 @@ int dquot_commit_info(struct super_block *sb, int type) int ret; struct quota_info *dqopt = sb_dqopt(sb); - down(&dqopt->dqio_sem); + mutex_lock(&dqopt->dqio_mutex); ret = dqopt->ops[type]->write_file_info(sb, type); - up(&dqopt->dqio_sem); + mutex_unlock(&dqopt->dqio_mutex); return ret; } @@ -1324,7 +1351,7 @@ int vfs_quota_off(struct super_block *sb, int type) struct inode *toputinode[MAXQUOTAS]; /* We need to serialize quota_off() for device */ - down(&dqopt->dqonoff_sem); + mutex_lock(&dqopt->dqonoff_mutex); for (cnt = 0; cnt < MAXQUOTAS; cnt++) { toputinode[cnt] = NULL; if (type != -1 && cnt != type) @@ -1353,7 +1380,7 @@ int vfs_quota_off(struct super_block *sb, int type) dqopt->info[cnt].dqi_bgrace = 0; dqopt->ops[cnt] = NULL; } - up(&dqopt->dqonoff_sem); + mutex_unlock(&dqopt->dqonoff_mutex); /* Sync the superblock so that buffers with quota data are written to * disk (and so userspace sees correct data afterwards). */ if (sb->s_op->sync_fs) @@ -1366,7 +1393,7 @@ int vfs_quota_off(struct super_block *sb, int type) * changes done by userspace on the next quotaon() */ for (cnt = 0; cnt < MAXQUOTAS; cnt++) if (toputinode[cnt]) { - down(&dqopt->dqonoff_sem); + mutex_lock(&dqopt->dqonoff_mutex); /* If quota was reenabled in the meantime, we have * nothing to do */ if (!sb_has_quota_enabled(sb, cnt)) { @@ -1378,7 +1405,7 @@ int vfs_quota_off(struct super_block *sb, int type) mark_inode_dirty(toputinode[cnt]); iput(toputinode[cnt]); } - up(&dqopt->dqonoff_sem); + mutex_unlock(&dqopt->dqonoff_mutex); } if (sb->s_bdev) invalidate_bdev(sb->s_bdev, 0); @@ -1419,7 +1446,7 @@ static int vfs_quota_on_inode(struct inode *inode, int type, int format_id) /* And now flush the block cache so that kernel sees the changes */ invalidate_bdev(sb->s_bdev, 0); mutex_lock(&inode->i_mutex); - down(&dqopt->dqonoff_sem); + mutex_lock(&dqopt->dqonoff_mutex); if (sb_has_quota_enabled(sb, type)) { error = -EBUSY; goto out_lock; @@ -1444,17 +1471,17 @@ static int vfs_quota_on_inode(struct inode *inode, int type, int format_id) dqopt->ops[type] = fmt->qf_ops; dqopt->info[type].dqi_format = fmt; INIT_LIST_HEAD(&dqopt->info[type].dqi_dirty_list); - down(&dqopt->dqio_sem); + mutex_lock(&dqopt->dqio_mutex); if ((error = dqopt->ops[type]->read_file_info(sb, type)) < 0) { - up(&dqopt->dqio_sem); + mutex_unlock(&dqopt->dqio_mutex); goto out_file_init; } - up(&dqopt->dqio_sem); + mutex_unlock(&dqopt->dqio_mutex); mutex_unlock(&inode->i_mutex); set_enable_flags(dqopt, type); add_dquot_ref(sb, type); - up(&dqopt->dqonoff_sem); + mutex_unlock(&dqopt->dqonoff_mutex); return 0; @@ -1462,7 +1489,7 @@ out_file_init: dqopt->files[type] = NULL; iput(inode); out_lock: - up(&dqopt->dqonoff_sem); + mutex_unlock(&dqopt->dqonoff_mutex); if (oldflags != -1) { down_write(&dqopt->dqptr_sem); /* Set the flags back (in the case of accidental quotaon() @@ -1550,14 +1577,14 @@ int vfs_get_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *d { struct dquot *dquot; - down(&sb_dqopt(sb)->dqonoff_sem); + mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); if (!(dquot = dqget(sb, id, type))) { - up(&sb_dqopt(sb)->dqonoff_sem); + mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); return -ESRCH; } do_get_dqblk(dquot, di); dqput(dquot); - up(&sb_dqopt(sb)->dqonoff_sem); + mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); return 0; } @@ -1619,14 +1646,14 @@ int vfs_set_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *d { struct dquot *dquot; - down(&sb_dqopt(sb)->dqonoff_sem); + mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); if (!(dquot = dqget(sb, id, type))) { - up(&sb_dqopt(sb)->dqonoff_sem); + mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); return -ESRCH; } do_set_dqblk(dquot, di); dqput(dquot); - up(&sb_dqopt(sb)->dqonoff_sem); + mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); return 0; } @@ -1635,9 +1662,9 @@ int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii) { struct mem_dqinfo *mi; - down(&sb_dqopt(sb)->dqonoff_sem); + mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); if (!sb_has_quota_enabled(sb, type)) { - up(&sb_dqopt(sb)->dqonoff_sem); + mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); return -ESRCH; } mi = sb_dqopt(sb)->info + type; @@ -1647,7 +1674,7 @@ int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii) ii->dqi_flags = mi->dqi_flags & DQF_MASK; ii->dqi_valid = IIF_ALL; spin_unlock(&dq_data_lock); - up(&sb_dqopt(sb)->dqonoff_sem); + mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); return 0; } @@ -1656,9 +1683,9 @@ int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii) { struct mem_dqinfo *mi; - down(&sb_dqopt(sb)->dqonoff_sem); + mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); if (!sb_has_quota_enabled(sb, type)) { - up(&sb_dqopt(sb)->dqonoff_sem); + mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); return -ESRCH; } mi = sb_dqopt(sb)->info + type; @@ -1673,7 +1700,7 @@ int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii) mark_info_dirty(sb, type); /* Force write to disk */ sb->dq_op->write_info(sb, type); - up(&sb_dqopt(sb)->dqonoff_sem); + mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); return 0; } diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 4284cd3..1c2b16f 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -34,6 +34,7 @@ #include <linux/eventpoll.h> #include <linux/mount.h> #include <linux/bitops.h> +#include <linux/mutex.h> #include <asm/uaccess.h> #include <asm/system.h> #include <asm/io.h> @@ -46,7 +47,7 @@ * LOCKING: * There are three level of locking required by epoll : * - * 1) epsem (semaphore) + * 1) epmutex (mutex) * 2) ep->sem (rw_semaphore) * 3) ep->lock (rw_lock) * @@ -67,9 +68,9 @@ * if a file has been pushed inside an epoll set and it is then * close()d without a previous call toepoll_ctl(EPOLL_CTL_DEL). * It is possible to drop the "ep->sem" and to use the global - * semaphore "epsem" (together with "ep->lock") to have it working, + * semaphore "epmutex" (together with "ep->lock") to have it working, * but having "ep->sem" will make the interface more scalable. - * Events that require holding "epsem" are very rare, while for + * Events that require holding "epmutex" are very rare, while for * normal operations the epoll private "ep->sem" will guarantee * a greater scalability. */ @@ -274,7 +275,7 @@ static struct super_block *eventpollfs_get_sb(struct file_system_type *fs_type, /* * This semaphore is used to serialize ep_free() and eventpoll_release_file(). */ -static struct semaphore epsem; +static struct mutex epmutex; /* Safe wake up implementation */ static struct poll_safewake psw; @@ -451,15 +452,6 @@ static void ep_poll_safewake(struct poll_safewake *psw, wait_queue_head_t *wq) } -/* Used to initialize the epoll bits inside the "struct file" */ -void eventpoll_init_file(struct file *file) -{ - - INIT_LIST_HEAD(&file->f_ep_links); - spin_lock_init(&file->f_ep_lock); -} - - /* * This is called from eventpoll_release() to unlink files from the eventpoll * interface. We need to have this facility to cleanup correctly files that are @@ -477,10 +469,10 @@ void eventpoll_release_file(struct file *file) * cleanup path, and this means that noone is using this file anymore. * The only hit might come from ep_free() but by holding the semaphore * will correctly serialize the operation. We do need to acquire - * "ep->sem" after "epsem" because ep_remove() requires it when called + * "ep->sem" after "epmutex" because ep_remove() requires it when called * from anywhere but ep_free(). */ - down(&epsem); + mutex_lock(&epmutex); while (!list_empty(lsthead)) { epi = list_entry(lsthead->next, struct epitem, fllink); @@ -492,7 +484,7 @@ void eventpoll_release_file(struct file *file) up_write(&ep->sem); } - up(&epsem); + mutex_unlock(&epmutex); } @@ -819,9 +811,9 @@ static void ep_free(struct eventpoll *ep) * We do not need to hold "ep->sem" here because the epoll file * is on the way to be removed and no one has references to it * anymore. The only hit might come from eventpoll_release_file() but - * holding "epsem" is sufficent here. + * holding "epmutex" is sufficent here. */ - down(&epsem); + mutex_lock(&epmutex); /* * Walks through the whole tree by unregistering poll callbacks. @@ -843,7 +835,7 @@ static void ep_free(struct eventpoll *ep) ep_remove(ep, epi); } - up(&epsem); + mutex_unlock(&epmutex); } @@ -1615,7 +1607,7 @@ static int __init eventpoll_init(void) { int error; - init_MUTEX(&epsem); + mutex_init(&epmutex); /* Initialize the structure used to perform safe poll wait head wake ups */ ep_poll_safewake_init(&psw); diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c index ad1432a..4ca8249 100644 --- a/fs/ext2/namei.c +++ b/fs/ext2/namei.c @@ -36,22 +36,6 @@ #include "acl.h" #include "xip.h" -/* - * Couple of helper functions - make the code slightly cleaner. - */ - -static inline void ext2_inc_count(struct inode *inode) -{ - inode->i_nlink++; - mark_inode_dirty(inode); -} - -static inline void ext2_dec_count(struct inode *inode) -{ - inode->i_nlink--; - mark_inode_dirty(inode); -} - static inline int ext2_add_nondir(struct dentry *dentry, struct inode *inode) { int err = ext2_add_link(dentry, inode); @@ -59,7 +43,7 @@ static inline int ext2_add_nondir(struct dentry *dentry, struct inode *inode) d_instantiate(dentry, inode); return 0; } - ext2_dec_count(inode); + inode_dec_link_count(inode); iput(inode); return err; } @@ -201,7 +185,7 @@ out: return err; out_fail: - ext2_dec_count(inode); + inode_dec_link_count(inode); iput (inode); goto out; } @@ -215,7 +199,7 @@ static int ext2_link (struct dentry * old_dentry, struct inode * dir, return -EMLINK; inode->i_ctime = CURRENT_TIME_SEC; - ext2_inc_count(inode); + inode_inc_link_count(inode); atomic_inc(&inode->i_count); return ext2_add_nondir(dentry, inode); @@ -229,7 +213,7 @@ static int ext2_mkdir(struct inode * dir, struct dentry * dentry, int mode) if (dir->i_nlink >= EXT2_LINK_MAX) goto out; - ext2_inc_count(dir); + inode_inc_link_count(dir); inode = ext2_new_inode (dir, S_IFDIR | mode); err = PTR_ERR(inode); @@ -243,7 +227,7 @@ static int ext2_mkdir(struct inode * dir, struct dentry * dentry, int mode) else inode->i_mapping->a_ops = &ext2_aops; - ext2_inc_count(inode); + inode_inc_link_count(inode); err = ext2_make_empty(inode, dir); if (err) @@ -258,11 +242,11 @@ out: return err; out_fail: - ext2_dec_count(inode); - ext2_dec_count(inode); + inode_dec_link_count(inode); + inode_dec_link_count(inode); iput(inode); out_dir: - ext2_dec_count(dir); + inode_dec_link_count(dir); goto out; } @@ -282,7 +266,7 @@ static int ext2_unlink(struct inode * dir, struct dentry *dentry) goto out; inode->i_ctime = dir->i_ctime; - ext2_dec_count(inode); + inode_dec_link_count(inode); err = 0; out: return err; @@ -297,8 +281,8 @@ static int ext2_rmdir (struct inode * dir, struct dentry *dentry) err = ext2_unlink(dir, dentry); if (!err) { inode->i_size = 0; - ext2_dec_count(inode); - ext2_dec_count(dir); + inode_dec_link_count(inode); + inode_dec_link_count(dir); } } return err; @@ -338,41 +322,41 @@ static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry, new_de = ext2_find_entry (new_dir, new_dentry, &new_page); if (!new_de) goto out_dir; - ext2_inc_count(old_inode); + inode_inc_link_count(old_inode); ext2_set_link(new_dir, new_de, new_page, old_inode); new_inode->i_ctime = CURRENT_TIME_SEC; if (dir_de) new_inode->i_nlink--; - ext2_dec_count(new_inode); + inode_dec_link_count(new_inode); } else { if (dir_de) { err = -EMLINK; if (new_dir->i_nlink >= EXT2_LINK_MAX) goto out_dir; } - ext2_inc_count(old_inode); + inode_inc_link_count(old_inode); err = ext2_add_link(new_dentry, old_inode); if (err) { - ext2_dec_count(old_inode); + inode_dec_link_count(old_inode); goto out_dir; } if (dir_de) - ext2_inc_count(new_dir); + inode_inc_link_count(new_dir); } /* * Like most other Unix systems, set the ctime for inodes on a * rename. - * ext2_dec_count() will mark the inode dirty. + * inode_dec_link_count() will mark the inode dirty. */ old_inode->i_ctime = CURRENT_TIME_SEC; ext2_delete_entry (old_de, old_page); - ext2_dec_count(old_inode); + inode_dec_link_count(old_inode); if (dir_de) { ext2_set_link(old_inode, dir_de, dir_page, new_dir); - ext2_dec_count(old_dir); + inode_dec_link_count(old_dir); } return 0; diff --git a/fs/ext3/dir.c b/fs/ext3/dir.c index 832867a..7734591 100644 --- a/fs/ext3/dir.c +++ b/fs/ext3/dir.c @@ -95,11 +95,10 @@ static int ext3_readdir(struct file * filp, void * dirent, filldir_t filldir) { int error = 0; - unsigned long offset, blk; - int i, num, stored; - struct buffer_head * bh, * tmp, * bha[16]; - struct ext3_dir_entry_2 * de; - struct super_block * sb; + unsigned long offset; + int i, stored; + struct ext3_dir_entry_2 *de; + struct super_block *sb; int err; struct inode *inode = filp->f_dentry->d_inode; int ret = 0; @@ -124,12 +123,29 @@ static int ext3_readdir(struct file * filp, } #endif stored = 0; - bh = NULL; offset = filp->f_pos & (sb->s_blocksize - 1); while (!error && !stored && filp->f_pos < inode->i_size) { - blk = (filp->f_pos) >> EXT3_BLOCK_SIZE_BITS(sb); - bh = ext3_bread(NULL, inode, blk, 0, &err); + unsigned long blk = filp->f_pos >> EXT3_BLOCK_SIZE_BITS(sb); + struct buffer_head map_bh; + struct buffer_head *bh = NULL; + + map_bh.b_state = 0; + err = ext3_get_block_handle(NULL, inode, blk, &map_bh, 0, 0); + if (!err) { + page_cache_readahead(sb->s_bdev->bd_inode->i_mapping, + &filp->f_ra, + filp, + map_bh.b_blocknr >> + (PAGE_CACHE_SHIFT - inode->i_blkbits), + 1); + bh = ext3_bread(NULL, inode, blk, 0, &err); + } + + /* + * We ignore I/O errors on directories so users have a chance + * of recovering data when there's a bad sector + */ if (!bh) { ext3_error (sb, "ext3_readdir", "directory #%lu contains a hole at offset %lu", @@ -138,26 +154,6 @@ static int ext3_readdir(struct file * filp, continue; } - /* - * Do the readahead - */ - if (!offset) { - for (i = 16 >> (EXT3_BLOCK_SIZE_BITS(sb) - 9), num = 0; - i > 0; i--) { - tmp = ext3_getblk (NULL, inode, ++blk, 0, &err); - if (tmp && !buffer_uptodate(tmp) && - !buffer_locked(tmp)) - bha[num++] = tmp; - else - brelse (tmp); - } - if (num) { - ll_rw_block (READA, num, bha); - for (i = 0; i < num; i++) - brelse (bha[i]); - } - } - revalidate: /* If the dir block has changed since the last call to * readdir(2), then we might be pointing to an invalid diff --git a/fs/ext3/file.c b/fs/ext3/file.c index 98e7834..59098ea 100644 --- a/fs/ext3/file.c +++ b/fs/ext3/file.c @@ -37,9 +37,9 @@ static int ext3_release_file (struct inode * inode, struct file * filp) if ((filp->f_mode & FMODE_WRITE) && (atomic_read(&inode->i_writecount) == 1)) { - down(&EXT3_I(inode)->truncate_sem); + mutex_lock(&EXT3_I(inode)->truncate_mutex); ext3_discard_reservation(inode); - up(&EXT3_I(inode)->truncate_sem); + mutex_unlock(&EXT3_I(inode)->truncate_mutex); } if (is_dx(inode) && filp->private_data) ext3_htree_free_dir_info(filp->private_data); diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c index 0384e53..2c36137 100644 --- a/fs/ext3/inode.c +++ b/fs/ext3/inode.c @@ -671,7 +671,7 @@ err_out: * The BKL may not be held on entry here. Be sure to take it early. */ -static int +int ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create, int extend_disksize) { @@ -702,7 +702,7 @@ ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock, if (!create || err == -EIO) goto cleanup; - down(&ei->truncate_sem); + mutex_lock(&ei->truncate_mutex); /* * If the indirect block is missing while we are reading @@ -723,7 +723,7 @@ ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock, } partial = ext3_get_branch(inode, depth, offsets, chain, &err); if (!partial) { - up(&ei->truncate_sem); + mutex_unlock(&ei->truncate_mutex); if (err) goto cleanup; clear_buffer_new(bh_result); @@ -759,13 +759,13 @@ ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock, err = ext3_splice_branch(handle, inode, iblock, chain, partial, left); /* - * i_disksize growing is protected by truncate_sem. Don't forget to + * i_disksize growing is protected by truncate_mutex. Don't forget to * protect it if you're about to implement concurrent * ext3_get_block() -bzzz */ if (!err && extend_disksize && inode->i_size > ei->i_disksize) ei->i_disksize = inode->i_size; - up(&ei->truncate_sem); + mutex_unlock(&ei->truncate_mutex); if (err) goto cleanup; @@ -1227,7 +1227,7 @@ static int journal_dirty_data_fn(handle_t *handle, struct buffer_head *bh) * ext3_file_write() -> generic_file_write() -> __alloc_pages() -> ... * * Same applies to ext3_get_block(). We will deadlock on various things like - * lock_journal and i_truncate_sem. + * lock_journal and i_truncate_mutex. * * Setting PF_MEMALLOC here doesn't work - too many internal memory * allocations fail. @@ -2161,7 +2161,7 @@ void ext3_truncate(struct inode * inode) * From here we block out all ext3_get_block() callers who want to * modify the block allocation tree. */ - down(&ei->truncate_sem); + mutex_lock(&ei->truncate_mutex); if (n == 1) { /* direct blocks */ ext3_free_data(handle, inode, NULL, i_data+offsets[0], @@ -2228,7 +2228,7 @@ do_indirects: ext3_discard_reservation(inode); - up(&ei->truncate_sem); + mutex_unlock(&ei->truncate_mutex); inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC; ext3_mark_inode_dirty(handle, inode); diff --git a/fs/ext3/ioctl.c b/fs/ext3/ioctl.c index 556cd55..aaf1da1 100644 --- a/fs/ext3/ioctl.c +++ b/fs/ext3/ioctl.c @@ -182,7 +182,7 @@ flags_err: * need to allocate reservation structure for this inode * before set the window size */ - down(&ei->truncate_sem); + mutex_lock(&ei->truncate_mutex); if (!ei->i_block_alloc_info) ext3_init_block_alloc_info(inode); @@ -190,7 +190,7 @@ flags_err: struct ext3_reserve_window_node *rsv = &ei->i_block_alloc_info->rsv_window_node; rsv->rsv_goal_size = rsv_window_size; } - up(&ei->truncate_sem); + mutex_unlock(&ei->truncate_mutex); return 0; } case EXT3_IOC_GROUP_EXTEND: { diff --git a/fs/ext3/super.c b/fs/ext3/super.c index 56bf765..efe5b20 100644 --- a/fs/ext3/super.c +++ b/fs/ext3/super.c @@ -472,7 +472,7 @@ static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) #ifdef CONFIG_EXT3_FS_XATTR init_rwsem(&ei->xattr_sem); #endif - init_MUTEX(&ei->truncate_sem); + mutex_init(&ei->truncate_mutex); inode_init_once(&ei->vfs_inode); } } @@ -2382,8 +2382,8 @@ static int ext3_statfs (struct super_block * sb, struct kstatfs * buf) * Process 1 Process 2 * ext3_create() quota_sync() * journal_start() write_dquot() - * DQUOT_INIT() down(dqio_sem) - * down(dqio_sem) journal_start() + * DQUOT_INIT() down(dqio_mutex) + * down(dqio_mutex) journal_start() * */ diff --git a/fs/fat/fatent.c b/fs/fat/fatent.c index a1a9e04..ab171ea8 100644 --- a/fs/fat/fatent.c +++ b/fs/fat/fatent.c @@ -267,19 +267,19 @@ static struct fatent_operations fat32_ops = { static inline void lock_fat(struct msdos_sb_info *sbi) { - down(&sbi->fat_lock); + mutex_lock(&sbi->fat_lock); } static inline void unlock_fat(struct msdos_sb_info *sbi) { - up(&sbi->fat_lock); + mutex_unlock(&sbi->fat_lock); } void fat_ent_access_init(struct super_block *sb) { struct msdos_sb_info *sbi = MSDOS_SB(sb); - init_MUTEX(&sbi->fat_lock); + mutex_init(&sbi->fat_lock); switch (sbi->fat_bits) { case 32: @@ -73,8 +73,8 @@ repeat: * orig_start..fdt->next_fd */ start = orig_start; - if (start < fdt->next_fd) - start = fdt->next_fd; + if (start < files->next_fd) + start = files->next_fd; newfd = start; if (start < fdt->max_fdset) { @@ -102,9 +102,8 @@ repeat: * we reacquire the fdtable pointer and use it while holding * the lock, no one can free it during that time. */ - fdt = files_fdtable(files); - if (start <= fdt->next_fd) - fdt->next_fd = newfd + 1; + if (start <= files->next_fd) + files->next_fd = newfd + 1; error = newfd; @@ -125,7 +125,8 @@ static void free_fdtable_rcu(struct rcu_head *rcu) kmem_cache_free(files_cachep, fdt->free_files); return; } - if (fdt->max_fdset <= __FD_SETSIZE && fdt->max_fds <= NR_OPEN_DEFAULT) { + if (fdt->max_fdset <= EMBEDDED_FD_SET_SIZE && + fdt->max_fds <= NR_OPEN_DEFAULT) { /* * The fdtable was embedded */ @@ -155,8 +156,9 @@ static void free_fdtable_rcu(struct rcu_head *rcu) void free_fdtable(struct fdtable *fdt) { - if (fdt->free_files || fdt->max_fdset > __FD_SETSIZE || - fdt->max_fds > NR_OPEN_DEFAULT) + if (fdt->free_files || + fdt->max_fdset > EMBEDDED_FD_SET_SIZE || + fdt->max_fds > NR_OPEN_DEFAULT) call_rcu(&fdt->rcu, free_fdtable_rcu); } @@ -199,7 +201,6 @@ static void copy_fdtable(struct fdtable *nfdt, struct fdtable *fdt) (nfdt->max_fds - fdt->max_fds) * sizeof(struct file *)); } - nfdt->next_fd = fdt->next_fd; } /* @@ -220,11 +221,9 @@ fd_set * alloc_fdset(int num) void free_fdset(fd_set *array, int num) { - int size = num / 8; - - if (num <= __FD_SETSIZE) /* Don't free an embedded fdset */ + if (num <= EMBEDDED_FD_SET_SIZE) /* Don't free an embedded fdset */ return; - else if (size <= PAGE_SIZE) + else if (num <= 8 * PAGE_SIZE) kfree(array); else vfree(array); @@ -237,22 +236,17 @@ static struct fdtable *alloc_fdtable(int nr) fd_set *new_openset = NULL, *new_execset = NULL; struct file **new_fds; - fdt = kmalloc(sizeof(*fdt), GFP_KERNEL); + fdt = kzalloc(sizeof(*fdt), GFP_KERNEL); if (!fdt) goto out; - memset(fdt, 0, sizeof(*fdt)); - nfds = __FD_SETSIZE; + nfds = 8 * L1_CACHE_BYTES; /* Expand to the max in easy steps */ - do { - if (nfds < (PAGE_SIZE * 8)) - nfds = PAGE_SIZE * 8; - else { - nfds = nfds * 2; - if (nfds > NR_OPEN) - nfds = NR_OPEN; - } - } while (nfds <= nr); + while (nfds <= nr) { + nfds = nfds * 2; + if (nfds > NR_OPEN) + nfds = NR_OPEN; + } new_openset = alloc_fdset(nfds); new_execset = alloc_fdset(nfds); diff --git a/fs/file_table.c b/fs/file_table.c index 44fabea..bcea199 100644 --- a/fs/file_table.c +++ b/fs/file_table.c @@ -88,6 +88,7 @@ int proc_nr_files(ctl_table *table, int write, struct file *filp, */ struct file *get_empty_filp(void) { + struct task_struct *tsk; static int old_max; struct file * f; @@ -112,13 +113,14 @@ struct file *get_empty_filp(void) if (security_file_alloc(f)) goto fail_sec; - eventpoll_init_file(f); + tsk = current; + INIT_LIST_HEAD(&f->f_u.fu_list); atomic_set(&f->f_count, 1); - f->f_uid = current->fsuid; - f->f_gid = current->fsgid; rwlock_init(&f->f_owner.lock); + f->f_uid = tsk->fsuid; + f->f_gid = tsk->fsgid; + eventpoll_init_file(f); /* f->f_version: 0 */ - INIT_LIST_HEAD(&f->f_u.fu_list); return f; over: diff --git a/fs/hpfs/hpfs_fn.h b/fs/hpfs/hpfs_fn.h index 6628c3b..4c6473a 100644 --- a/fs/hpfs/hpfs_fn.h +++ b/fs/hpfs/hpfs_fn.h @@ -9,6 +9,7 @@ //#define DBG //#define DEBUG_LOCKS +#include <linux/mutex.h> #include <linux/pagemap.h> #include <linux/buffer_head.h> #include <linux/hpfs_fs.h> @@ -57,8 +58,8 @@ struct hpfs_inode_info { unsigned i_ea_uid : 1; /* file's uid is stored in ea */ unsigned i_ea_gid : 1; /* file's gid is stored in ea */ unsigned i_dirty : 1; - struct semaphore i_sem; - struct semaphore i_parent; + struct mutex i_mutex; + struct mutex i_parent_mutex; loff_t **i_rddir_off; struct inode vfs_inode; }; diff --git a/fs/hpfs/inode.c b/fs/hpfs/inode.c index e3d17e9..56f2c33 100644 --- a/fs/hpfs/inode.c +++ b/fs/hpfs/inode.c @@ -186,9 +186,9 @@ void hpfs_write_inode(struct inode *i) kfree(hpfs_inode->i_rddir_off); hpfs_inode->i_rddir_off = NULL; } - down(&hpfs_inode->i_parent); + mutex_lock(&hpfs_inode->i_parent_mutex); if (!i->i_nlink) { - up(&hpfs_inode->i_parent); + mutex_unlock(&hpfs_inode->i_parent_mutex); return; } parent = iget_locked(i->i_sb, hpfs_inode->i_parent_dir); @@ -199,14 +199,14 @@ void hpfs_write_inode(struct inode *i) hpfs_read_inode(parent); unlock_new_inode(parent); } - down(&hpfs_inode->i_sem); + mutex_lock(&hpfs_inode->i_mutex); hpfs_write_inode_nolock(i); - up(&hpfs_inode->i_sem); + mutex_unlock(&hpfs_inode->i_mutex); iput(parent); } else { mark_inode_dirty(i); } - up(&hpfs_inode->i_parent); + mutex_unlock(&hpfs_inode->i_parent_mutex); } void hpfs_write_inode_nolock(struct inode *i) diff --git a/fs/hpfs/namei.c b/fs/hpfs/namei.c index 8ff8fc4..a03abb1 100644 --- a/fs/hpfs/namei.c +++ b/fs/hpfs/namei.c @@ -60,7 +60,7 @@ static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) if (dee.read_only) result->i_mode &= ~0222; - down(&hpfs_i(dir)->i_sem); + mutex_lock(&hpfs_i(dir)->i_mutex); r = hpfs_add_dirent(dir, (char *)name, len, &dee, 0); if (r == 1) goto bail3; @@ -101,11 +101,11 @@ static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) hpfs_write_inode_nolock(result); } d_instantiate(dentry, result); - up(&hpfs_i(dir)->i_sem); + mutex_unlock(&hpfs_i(dir)->i_mutex); unlock_kernel(); return 0; bail3: - up(&hpfs_i(dir)->i_sem); + mutex_unlock(&hpfs_i(dir)->i_mutex); iput(result); bail2: hpfs_brelse4(&qbh0); @@ -168,7 +168,7 @@ static int hpfs_create(struct inode *dir, struct dentry *dentry, int mode, struc result->i_data.a_ops = &hpfs_aops; hpfs_i(result)->mmu_private = 0; - down(&hpfs_i(dir)->i_sem); + mutex_lock(&hpfs_i(dir)->i_mutex); r = hpfs_add_dirent(dir, (char *)name, len, &dee, 0); if (r == 1) goto bail2; @@ -193,12 +193,12 @@ static int hpfs_create(struct inode *dir, struct dentry *dentry, int mode, struc hpfs_write_inode_nolock(result); } d_instantiate(dentry, result); - up(&hpfs_i(dir)->i_sem); + mutex_unlock(&hpfs_i(dir)->i_mutex); unlock_kernel(); return 0; bail2: - up(&hpfs_i(dir)->i_sem); + mutex_unlock(&hpfs_i(dir)->i_mutex); iput(result); bail1: brelse(bh); @@ -254,7 +254,7 @@ static int hpfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t result->i_blocks = 1; init_special_inode(result, mode, rdev); - down(&hpfs_i(dir)->i_sem); + mutex_lock(&hpfs_i(dir)->i_mutex); r = hpfs_add_dirent(dir, (char *)name, len, &dee, 0); if (r == 1) goto bail2; @@ -271,12 +271,12 @@ static int hpfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t hpfs_write_inode_nolock(result); d_instantiate(dentry, result); - up(&hpfs_i(dir)->i_sem); + mutex_unlock(&hpfs_i(dir)->i_mutex); brelse(bh); unlock_kernel(); return 0; bail2: - up(&hpfs_i(dir)->i_sem); + mutex_unlock(&hpfs_i(dir)->i_mutex); iput(result); bail1: brelse(bh); @@ -333,7 +333,7 @@ static int hpfs_symlink(struct inode *dir, struct dentry *dentry, const char *sy result->i_op = &page_symlink_inode_operations; result->i_data.a_ops = &hpfs_symlink_aops; - down(&hpfs_i(dir)->i_sem); + mutex_lock(&hpfs_i(dir)->i_mutex); r = hpfs_add_dirent(dir, (char *)name, len, &dee, 0); if (r == 1) goto bail2; @@ -352,11 +352,11 @@ static int hpfs_symlink(struct inode *dir, struct dentry *dentry, const char *sy hpfs_write_inode_nolock(result); d_instantiate(dentry, result); - up(&hpfs_i(dir)->i_sem); + mutex_unlock(&hpfs_i(dir)->i_mutex); unlock_kernel(); return 0; bail2: - up(&hpfs_i(dir)->i_sem); + mutex_unlock(&hpfs_i(dir)->i_mutex); iput(result); bail1: brelse(bh); @@ -382,8 +382,8 @@ static int hpfs_unlink(struct inode *dir, struct dentry *dentry) lock_kernel(); hpfs_adjust_length((char *)name, &len); again: - down(&hpfs_i(inode)->i_parent); - down(&hpfs_i(dir)->i_sem); + mutex_lock(&hpfs_i(inode)->i_parent_mutex); + mutex_lock(&hpfs_i(dir)->i_mutex); err = -ENOENT; de = map_dirent(dir, hpfs_i(dir)->i_dno, (char *)name, len, &dno, &qbh); if (!de) @@ -410,8 +410,8 @@ again: if (rep++) break; - up(&hpfs_i(dir)->i_sem); - up(&hpfs_i(inode)->i_parent); + mutex_unlock(&hpfs_i(dir)->i_mutex); + mutex_unlock(&hpfs_i(inode)->i_parent_mutex); d_drop(dentry); spin_lock(&dentry->d_lock); if (atomic_read(&dentry->d_count) > 1 || @@ -442,8 +442,8 @@ again: out1: hpfs_brelse4(&qbh); out: - up(&hpfs_i(dir)->i_sem); - up(&hpfs_i(inode)->i_parent); + mutex_unlock(&hpfs_i(dir)->i_mutex); + mutex_unlock(&hpfs_i(inode)->i_parent_mutex); unlock_kernel(); return err; } @@ -463,8 +463,8 @@ static int hpfs_rmdir(struct inode *dir, struct dentry *dentry) hpfs_adjust_length((char *)name, &len); lock_kernel(); - down(&hpfs_i(inode)->i_parent); - down(&hpfs_i(dir)->i_sem); + mutex_lock(&hpfs_i(inode)->i_parent_mutex); + mutex_lock(&hpfs_i(dir)->i_mutex); err = -ENOENT; de = map_dirent(dir, hpfs_i(dir)->i_dno, (char *)name, len, &dno, &qbh); if (!de) @@ -502,8 +502,8 @@ static int hpfs_rmdir(struct inode *dir, struct dentry *dentry) out1: hpfs_brelse4(&qbh); out: - up(&hpfs_i(dir)->i_sem); - up(&hpfs_i(inode)->i_parent); + mutex_unlock(&hpfs_i(dir)->i_mutex); + mutex_unlock(&hpfs_i(inode)->i_parent_mutex); unlock_kernel(); return err; } @@ -565,12 +565,12 @@ static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry, lock_kernel(); /* order doesn't matter, due to VFS exclusion */ - down(&hpfs_i(i)->i_parent); + mutex_lock(&hpfs_i(i)->i_parent_mutex); if (new_inode) - down(&hpfs_i(new_inode)->i_parent); - down(&hpfs_i(old_dir)->i_sem); + mutex_lock(&hpfs_i(new_inode)->i_parent_mutex); + mutex_lock(&hpfs_i(old_dir)->i_mutex); if (new_dir != old_dir) - down(&hpfs_i(new_dir)->i_sem); + mutex_lock(&hpfs_i(new_dir)->i_mutex); /* Erm? Moving over the empty non-busy directory is perfectly legal */ if (new_inode && S_ISDIR(new_inode->i_mode)) { @@ -650,11 +650,11 @@ static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry, hpfs_decide_conv(i, (char *)new_name, new_len); end1: if (old_dir != new_dir) - up(&hpfs_i(new_dir)->i_sem); - up(&hpfs_i(old_dir)->i_sem); - up(&hpfs_i(i)->i_parent); + mutex_unlock(&hpfs_i(new_dir)->i_mutex); + mutex_unlock(&hpfs_i(old_dir)->i_mutex); + mutex_unlock(&hpfs_i(i)->i_parent_mutex); if (new_inode) - up(&hpfs_i(new_inode)->i_parent); + mutex_unlock(&hpfs_i(new_inode)->i_parent_mutex); unlock_kernel(); return err; } diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c index 63e88d7..9488a79 100644 --- a/fs/hpfs/super.c +++ b/fs/hpfs/super.c @@ -181,8 +181,8 @@ static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == SLAB_CTOR_CONSTRUCTOR) { - init_MUTEX(&ei->i_sem); - init_MUTEX(&ei->i_parent); + mutex_init(&ei->i_mutex); + mutex_init(&ei->i_parent_mutex); inode_init_once(&ei->vfs_inode); } } @@ -84,14 +84,14 @@ static struct hlist_head *inode_hashtable; DEFINE_SPINLOCK(inode_lock); /* - * iprune_sem provides exclusion between the kswapd or try_to_free_pages + * iprune_mutex provides exclusion between the kswapd or try_to_free_pages * icache shrinking path, and the umount path. Without this exclusion, * by the time prune_icache calls iput for the inode whose pages it has * been invalidating, or by the time it calls clear_inode & destroy_inode * from its final dispose_list, the struct super_block they refer to * (for inode->i_sb->s_op) may already have been freed and reused. */ -DECLARE_MUTEX(iprune_sem); +DEFINE_MUTEX(iprune_mutex); /* * Statistics gathering.. @@ -206,7 +206,7 @@ void inode_init_once(struct inode *inode) i_size_ordered_init(inode); #ifdef CONFIG_INOTIFY INIT_LIST_HEAD(&inode->inotify_watches); - sema_init(&inode->inotify_sem, 1); + mutex_init(&inode->inotify_mutex); #endif } @@ -319,7 +319,7 @@ static int invalidate_list(struct list_head *head, struct list_head *dispose) /* * We can reschedule here without worrying about the list's * consistency because the per-sb list of inodes must not - * change during umount anymore, and because iprune_sem keeps + * change during umount anymore, and because iprune_mutex keeps * shrink_icache_memory() away. */ cond_resched_lock(&inode_lock); @@ -355,14 +355,14 @@ int invalidate_inodes(struct super_block * sb) int busy; LIST_HEAD(throw_away); - down(&iprune_sem); + mutex_lock(&iprune_mutex); spin_lock(&inode_lock); inotify_unmount_inodes(&sb->s_inodes); busy = invalidate_list(&sb->s_inodes, &throw_away); spin_unlock(&inode_lock); dispose_list(&throw_away); - up(&iprune_sem); + mutex_unlock(&iprune_mutex); return busy; } @@ -377,7 +377,7 @@ int __invalidate_device(struct block_device *bdev) if (sb) { /* * no need to lock the super, get_super holds the - * read semaphore so the filesystem cannot go away + * read mutex so the filesystem cannot go away * under us (->put_super runs with the write lock * hold). */ @@ -423,7 +423,7 @@ static void prune_icache(int nr_to_scan) int nr_scanned; unsigned long reap = 0; - down(&iprune_sem); + mutex_lock(&iprune_mutex); spin_lock(&inode_lock); for (nr_scanned = 0; nr_scanned < nr_to_scan; nr_scanned++) { struct inode *inode; @@ -459,7 +459,7 @@ static void prune_icache(int nr_to_scan) spin_unlock(&inode_lock); dispose_list(&freeable); - up(&iprune_sem); + mutex_unlock(&iprune_mutex); if (current_is_kswapd()) mod_page_state(kswapd_inodesteal, reap); diff --git a/fs/inotify.c b/fs/inotify.c index 3041503..0ee39ef 100644 --- a/fs/inotify.c +++ b/fs/inotify.c @@ -54,10 +54,10 @@ int inotify_max_queued_events; * Lock ordering: * * dentry->d_lock (used to keep d_move() away from dentry->d_parent) - * iprune_sem (synchronize shrink_icache_memory()) + * iprune_mutex (synchronize shrink_icache_memory()) * inode_lock (protects the super_block->s_inodes list) - * inode->inotify_sem (protects inode->inotify_watches and watches->i_list) - * inotify_dev->sem (protects inotify_device and watches->d_list) + * inode->inotify_mutex (protects inode->inotify_watches and watches->i_list) + * inotify_dev->mutex (protects inotify_device and watches->d_list) */ /* @@ -79,12 +79,12 @@ int inotify_max_queued_events; /* * struct inotify_device - represents an inotify instance * - * This structure is protected by the semaphore 'sem'. + * This structure is protected by the mutex 'mutex'. */ struct inotify_device { wait_queue_head_t wq; /* wait queue for i/o */ struct idr idr; /* idr mapping wd -> watch */ - struct semaphore sem; /* protects this bad boy */ + struct mutex mutex; /* protects this bad boy */ struct list_head events; /* list of queued events */ struct list_head watches; /* list of watches */ atomic_t count; /* reference count */ @@ -101,7 +101,7 @@ struct inotify_device { * device. In read(), this list is walked and all events that can fit in the * buffer are returned. * - * Protected by dev->sem of the device in which we are queued. + * Protected by dev->mutex of the device in which we are queued. */ struct inotify_kernel_event { struct inotify_event event; /* the user-space event */ @@ -112,8 +112,8 @@ struct inotify_kernel_event { /* * struct inotify_watch - represents a watch request on a specific inode * - * d_list is protected by dev->sem of the associated watch->dev. - * i_list and mask are protected by inode->inotify_sem of the associated inode. + * d_list is protected by dev->mutex of the associated watch->dev. + * i_list and mask are protected by inode->inotify_mutex of the associated inode. * dev, inode, and wd are never written to once the watch is created. */ struct inotify_watch { @@ -261,7 +261,7 @@ static struct inotify_kernel_event * kernel_event(s32 wd, u32 mask, u32 cookie, /* * inotify_dev_get_event - return the next event in the given dev's queue * - * Caller must hold dev->sem. + * Caller must hold dev->mutex. */ static inline struct inotify_kernel_event * inotify_dev_get_event(struct inotify_device *dev) @@ -272,7 +272,7 @@ inotify_dev_get_event(struct inotify_device *dev) /* * inotify_dev_queue_event - add a new event to the given device * - * Caller must hold dev->sem. Can sleep (calls kernel_event()). + * Caller must hold dev->mutex. Can sleep (calls kernel_event()). */ static void inotify_dev_queue_event(struct inotify_device *dev, struct inotify_watch *watch, u32 mask, @@ -315,7 +315,7 @@ static void inotify_dev_queue_event(struct inotify_device *dev, /* * remove_kevent - cleans up and ultimately frees the given kevent * - * Caller must hold dev->sem. + * Caller must hold dev->mutex. */ static void remove_kevent(struct inotify_device *dev, struct inotify_kernel_event *kevent) @@ -332,7 +332,7 @@ static void remove_kevent(struct inotify_device *dev, /* * inotify_dev_event_dequeue - destroy an event on the given device * - * Caller must hold dev->sem. + * Caller must hold dev->mutex. */ static void inotify_dev_event_dequeue(struct inotify_device *dev) { @@ -346,7 +346,7 @@ static void inotify_dev_event_dequeue(struct inotify_device *dev) /* * inotify_dev_get_wd - returns the next WD for use by the given dev * - * Callers must hold dev->sem. This function can sleep. + * Callers must hold dev->mutex. This function can sleep. */ static int inotify_dev_get_wd(struct inotify_device *dev, struct inotify_watch *watch) @@ -383,7 +383,7 @@ static int find_inode(const char __user *dirname, struct nameidata *nd, /* * create_watch - creates a watch on the given device. * - * Callers must hold dev->sem. Calls inotify_dev_get_wd() so may sleep. + * Callers must hold dev->mutex. Calls inotify_dev_get_wd() so may sleep. * Both 'dev' and 'inode' (by way of nameidata) need to be pinned. */ static struct inotify_watch *create_watch(struct inotify_device *dev, @@ -434,7 +434,7 @@ static struct inotify_watch *create_watch(struct inotify_device *dev, /* * inotify_find_dev - find the watch associated with the given inode and dev * - * Callers must hold inode->inotify_sem. + * Callers must hold inode->inotify_mutex. */ static struct inotify_watch *inode_find_dev(struct inode *inode, struct inotify_device *dev) @@ -469,7 +469,7 @@ static void remove_watch_no_event(struct inotify_watch *watch, * the IN_IGNORED event to the given device signifying that the inode is no * longer watched. * - * Callers must hold both inode->inotify_sem and dev->sem. We drop a + * Callers must hold both inode->inotify_mutex and dev->mutex. We drop a * reference to the inode before returning. * * The inode is not iput() so as to remain atomic. If the inode needs to be @@ -507,21 +507,21 @@ void inotify_inode_queue_event(struct inode *inode, u32 mask, u32 cookie, if (!inotify_inode_watched(inode)) return; - down(&inode->inotify_sem); + mutex_lock(&inode->inotify_mutex); list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) { u32 watch_mask = watch->mask; if (watch_mask & mask) { struct inotify_device *dev = watch->dev; get_inotify_watch(watch); - down(&dev->sem); + mutex_lock(&dev->mutex); inotify_dev_queue_event(dev, watch, mask, cookie, name); if (watch_mask & IN_ONESHOT) remove_watch_no_event(watch, dev); - up(&dev->sem); + mutex_unlock(&dev->mutex); put_inotify_watch(watch); } } - up(&inode->inotify_sem); + mutex_unlock(&inode->inotify_mutex); } EXPORT_SYMBOL_GPL(inotify_inode_queue_event); @@ -569,7 +569,7 @@ EXPORT_SYMBOL_GPL(inotify_get_cookie); * @list: list of inodes being unmounted (sb->s_inodes) * * Called with inode_lock held, protecting the unmounting super block's list - * of inodes, and with iprune_sem held, keeping shrink_icache_memory() at bay. + * of inodes, and with iprune_mutex held, keeping shrink_icache_memory() at bay. * We temporarily drop inode_lock, however, and CAN block. */ void inotify_unmount_inodes(struct list_head *list) @@ -618,7 +618,7 @@ void inotify_unmount_inodes(struct list_head *list) * We can safely drop inode_lock here because we hold * references on both inode and next_i. Also no new inodes * will be added since the umount has begun. Finally, - * iprune_sem keeps shrink_icache_memory() away. + * iprune_mutex keeps shrink_icache_memory() away. */ spin_unlock(&inode_lock); @@ -626,16 +626,16 @@ void inotify_unmount_inodes(struct list_head *list) iput(need_iput_tmp); /* for each watch, send IN_UNMOUNT and then remove it */ - down(&inode->inotify_sem); + mutex_lock(&inode->inotify_mutex); watches = &inode->inotify_watches; list_for_each_entry_safe(watch, next_w, watches, i_list) { struct inotify_device *dev = watch->dev; - down(&dev->sem); + mutex_lock(&dev->mutex); inotify_dev_queue_event(dev, watch, IN_UNMOUNT,0,NULL); remove_watch(watch, dev); - up(&dev->sem); + mutex_unlock(&dev->mutex); } - up(&inode->inotify_sem); + mutex_unlock(&inode->inotify_mutex); iput(inode); spin_lock(&inode_lock); @@ -651,14 +651,14 @@ void inotify_inode_is_dead(struct inode *inode) { struct inotify_watch *watch, *next; - down(&inode->inotify_sem); + mutex_lock(&inode->inotify_mutex); list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) { struct inotify_device *dev = watch->dev; - down(&dev->sem); + mutex_lock(&dev->mutex); remove_watch(watch, dev); - up(&dev->sem); + mutex_unlock(&dev->mutex); } - up(&inode->inotify_sem); + mutex_unlock(&inode->inotify_mutex); } EXPORT_SYMBOL_GPL(inotify_inode_is_dead); @@ -670,10 +670,10 @@ static unsigned int inotify_poll(struct file *file, poll_table *wait) int ret = 0; poll_wait(file, &dev->wq, wait); - down(&dev->sem); + mutex_lock(&dev->mutex); if (!list_empty(&dev->events)) ret = POLLIN | POLLRDNORM; - up(&dev->sem); + mutex_unlock(&dev->mutex); return ret; } @@ -695,9 +695,9 @@ static ssize_t inotify_read(struct file *file, char __user *buf, prepare_to_wait(&dev->wq, &wait, TASK_INTERRUPTIBLE); - down(&dev->sem); + mutex_lock(&dev->mutex); events = !list_empty(&dev->events); - up(&dev->sem); + mutex_unlock(&dev->mutex); if (events) { ret = 0; break; @@ -720,7 +720,7 @@ static ssize_t inotify_read(struct file *file, char __user *buf, if (ret) return ret; - down(&dev->sem); + mutex_lock(&dev->mutex); while (1) { struct inotify_kernel_event *kevent; @@ -750,7 +750,7 @@ static ssize_t inotify_read(struct file *file, char __user *buf, remove_kevent(dev, kevent); } - up(&dev->sem); + mutex_unlock(&dev->mutex); return ret; } @@ -763,37 +763,37 @@ static int inotify_release(struct inode *ignored, struct file *file) * Destroy all of the watches on this device. Unfortunately, not very * pretty. We cannot do a simple iteration over the list, because we * do not know the inode until we iterate to the watch. But we need to - * hold inode->inotify_sem before dev->sem. The following works. + * hold inode->inotify_mutex before dev->mutex. The following works. */ while (1) { struct inotify_watch *watch; struct list_head *watches; struct inode *inode; - down(&dev->sem); + mutex_lock(&dev->mutex); watches = &dev->watches; if (list_empty(watches)) { - up(&dev->sem); + mutex_unlock(&dev->mutex); break; } watch = list_entry(watches->next, struct inotify_watch, d_list); get_inotify_watch(watch); - up(&dev->sem); + mutex_unlock(&dev->mutex); inode = watch->inode; - down(&inode->inotify_sem); - down(&dev->sem); + mutex_lock(&inode->inotify_mutex); + mutex_lock(&dev->mutex); remove_watch_no_event(watch, dev); - up(&dev->sem); - up(&inode->inotify_sem); + mutex_unlock(&dev->mutex); + mutex_unlock(&inode->inotify_mutex); put_inotify_watch(watch); } /* destroy all of the events on this device */ - down(&dev->sem); + mutex_lock(&dev->mutex); while (!list_empty(&dev->events)) inotify_dev_event_dequeue(dev); - up(&dev->sem); + mutex_unlock(&dev->mutex); /* free this device: the put matching the get in inotify_init() */ put_inotify_dev(dev); @@ -811,26 +811,26 @@ static int inotify_ignore(struct inotify_device *dev, s32 wd) struct inotify_watch *watch; struct inode *inode; - down(&dev->sem); + mutex_lock(&dev->mutex); watch = idr_find(&dev->idr, wd); if (unlikely(!watch)) { - up(&dev->sem); + mutex_unlock(&dev->mutex); return -EINVAL; } get_inotify_watch(watch); inode = watch->inode; - up(&dev->sem); + mutex_unlock(&dev->mutex); - down(&inode->inotify_sem); - down(&dev->sem); + mutex_lock(&inode->inotify_mutex); + mutex_lock(&dev->mutex); /* make sure that we did not race */ watch = idr_find(&dev->idr, wd); if (likely(watch)) remove_watch(watch, dev); - up(&dev->sem); - up(&inode->inotify_sem); + mutex_unlock(&dev->mutex); + mutex_unlock(&inode->inotify_mutex); put_inotify_watch(watch); return 0; @@ -905,7 +905,7 @@ asmlinkage long sys_inotify_init(void) INIT_LIST_HEAD(&dev->events); INIT_LIST_HEAD(&dev->watches); init_waitqueue_head(&dev->wq); - sema_init(&dev->sem, 1); + mutex_init(&dev->mutex); dev->event_count = 0; dev->queue_size = 0; dev->max_events = inotify_max_queued_events; @@ -960,8 +960,8 @@ asmlinkage long sys_inotify_add_watch(int fd, const char __user *path, u32 mask) inode = nd.dentry->d_inode; dev = filp->private_data; - down(&inode->inotify_sem); - down(&dev->sem); + mutex_lock(&inode->inotify_mutex); + mutex_lock(&dev->mutex); if (mask & IN_MASK_ADD) mask_add = 1; @@ -998,8 +998,8 @@ asmlinkage long sys_inotify_add_watch(int fd, const char __user *path, u32 mask) list_add(&watch->i_list, &inode->inotify_watches); ret = watch->wd; out: - up(&dev->sem); - up(&inode->inotify_sem); + mutex_unlock(&dev->mutex); + mutex_unlock(&inode->inotify_mutex); path_release(&nd); fput_and_out: fput_light(filp, fput_needed); diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c index 543ed54..3f5102b 100644 --- a/fs/jbd/checkpoint.c +++ b/fs/jbd/checkpoint.c @@ -85,7 +85,7 @@ void __log_wait_for_space(journal_t *journal) if (journal->j_flags & JFS_ABORT) return; spin_unlock(&journal->j_state_lock); - down(&journal->j_checkpoint_sem); + mutex_lock(&journal->j_checkpoint_mutex); /* * Test again, another process may have checkpointed while we @@ -98,7 +98,7 @@ void __log_wait_for_space(journal_t *journal) log_do_checkpoint(journal); spin_lock(&journal->j_state_lock); } - up(&journal->j_checkpoint_sem); + mutex_unlock(&journal->j_checkpoint_mutex); } } diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c index e4b516a..95a628d 100644 --- a/fs/jbd/journal.c +++ b/fs/jbd/journal.c @@ -659,8 +659,8 @@ static journal_t * journal_init_common (void) init_waitqueue_head(&journal->j_wait_checkpoint); init_waitqueue_head(&journal->j_wait_commit); init_waitqueue_head(&journal->j_wait_updates); - init_MUTEX(&journal->j_barrier); - init_MUTEX(&journal->j_checkpoint_sem); + mutex_init(&journal->j_barrier); + mutex_init(&journal->j_checkpoint_mutex); spin_lock_init(&journal->j_revoke_lock); spin_lock_init(&journal->j_list_lock); spin_lock_init(&journal->j_state_lock); diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c index ca91797..5fc4088 100644 --- a/fs/jbd/transaction.c +++ b/fs/jbd/transaction.c @@ -455,7 +455,7 @@ void journal_lock_updates(journal_t *journal) * to make sure that we serialise special journal-locked operations * too. */ - down(&journal->j_barrier); + mutex_lock(&journal->j_barrier); } /** @@ -470,7 +470,7 @@ void journal_unlock_updates (journal_t *journal) { J_ASSERT(journal->j_barrier_count != 0); - up(&journal->j_barrier); + mutex_unlock(&journal->j_barrier); spin_lock(&journal->j_state_lock); --journal->j_barrier_count; spin_unlock(&journal->j_state_lock); diff --git a/fs/jffs/inode-v23.c b/fs/jffs/inode-v23.c index fc3855a..890d7ff 100644 --- a/fs/jffs/inode-v23.c +++ b/fs/jffs/inode-v23.c @@ -42,7 +42,7 @@ #include <linux/quotaops.h> #include <linux/highmem.h> #include <linux/vfs.h> -#include <asm/semaphore.h> +#include <linux/mutex.h> #include <asm/byteorder.h> #include <asm/uaccess.h> @@ -203,7 +203,7 @@ jffs_setattr(struct dentry *dentry, struct iattr *iattr) fmc = c->fmc; D3(printk (KERN_NOTICE "notify_change(): down biglock\n")); - down(&fmc->biglock); + mutex_lock(&fmc->biglock); f = jffs_find_file(c, inode->i_ino); @@ -211,7 +211,7 @@ jffs_setattr(struct dentry *dentry, struct iattr *iattr) printk("jffs_setattr(): Invalid inode number: %lu\n", inode->i_ino); D3(printk (KERN_NOTICE "notify_change(): up biglock\n")); - up(&fmc->biglock); + mutex_unlock(&fmc->biglock); res = -EINVAL; goto out; }); @@ -232,7 +232,7 @@ jffs_setattr(struct dentry *dentry, struct iattr *iattr) if (!(new_node = jffs_alloc_node())) { D(printk("jffs_setattr(): Allocation failed!\n")); D3(printk (KERN_NOTICE "notify_change(): up biglock\n")); - up(&fmc->biglock); + mutex_unlock(&fmc->biglock); res = -ENOMEM; goto out; } @@ -319,7 +319,7 @@ jffs_setattr(struct dentry *dentry, struct iattr *iattr) D(printk("jffs_notify_change(): The write failed!\n")); jffs_free_node(new_node); D3(printk (KERN_NOTICE "n_c(): up biglock\n")); - up(&c->fmc->biglock); + mutex_unlock(&c->fmc->biglock); goto out; } @@ -327,7 +327,7 @@ jffs_setattr(struct dentry *dentry, struct iattr *iattr) mark_inode_dirty(inode); D3(printk (KERN_NOTICE "n_c(): up biglock\n")); - up(&c->fmc->biglock); + mutex_unlock(&c->fmc->biglock); out: unlock_kernel(); return res; @@ -461,7 +461,7 @@ jffs_rename(struct inode *old_dir, struct dentry *old_dentry, goto jffs_rename_end; } D3(printk (KERN_NOTICE "rename(): down biglock\n")); - down(&c->fmc->biglock); + mutex_lock(&c->fmc->biglock); /* Create a node and initialize as much as needed. */ result = -ENOMEM; if (!(node = jffs_alloc_node())) { @@ -555,7 +555,7 @@ jffs_rename(struct inode *old_dir, struct dentry *old_dentry, jffs_rename_end: D3(printk (KERN_NOTICE "rename(): up biglock\n")); - up(&c->fmc->biglock); + mutex_unlock(&c->fmc->biglock); unlock_kernel(); return result; } /* jffs_rename() */ @@ -574,14 +574,14 @@ jffs_readdir(struct file *filp, void *dirent, filldir_t filldir) int ddino; lock_kernel(); D3(printk (KERN_NOTICE "readdir(): down biglock\n")); - down(&c->fmc->biglock); + mutex_lock(&c->fmc->biglock); D2(printk("jffs_readdir(): inode: 0x%p, filp: 0x%p\n", inode, filp)); if (filp->f_pos == 0) { D3(printk("jffs_readdir(): \".\" %lu\n", inode->i_ino)); if (filldir(dirent, ".", 1, filp->f_pos, inode->i_ino, DT_DIR) < 0) { D3(printk (KERN_NOTICE "readdir(): up biglock\n")); - up(&c->fmc->biglock); + mutex_unlock(&c->fmc->biglock); unlock_kernel(); return 0; } @@ -598,7 +598,7 @@ jffs_readdir(struct file *filp, void *dirent, filldir_t filldir) D3(printk("jffs_readdir(): \"..\" %u\n", ddino)); if (filldir(dirent, "..", 2, filp->f_pos, ddino, DT_DIR) < 0) { D3(printk (KERN_NOTICE "readdir(): up biglock\n")); - up(&c->fmc->biglock); + mutex_unlock(&c->fmc->biglock); unlock_kernel(); return 0; } @@ -617,7 +617,7 @@ jffs_readdir(struct file *filp, void *dirent, filldir_t filldir) if (filldir(dirent, f->name, f->nsize, filp->f_pos , f->ino, DT_UNKNOWN) < 0) { D3(printk (KERN_NOTICE "readdir(): up biglock\n")); - up(&c->fmc->biglock); + mutex_unlock(&c->fmc->biglock); unlock_kernel(); return 0; } @@ -627,7 +627,7 @@ jffs_readdir(struct file *filp, void *dirent, filldir_t filldir) } while(f && f->deleted); } D3(printk (KERN_NOTICE "readdir(): up biglock\n")); - up(&c->fmc->biglock); + mutex_unlock(&c->fmc->biglock); unlock_kernel(); return filp->f_pos; } /* jffs_readdir() */ @@ -660,7 +660,7 @@ jffs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) }); D3(printk (KERN_NOTICE "lookup(): down biglock\n")); - down(&c->fmc->biglock); + mutex_lock(&c->fmc->biglock); r = -ENAMETOOLONG; if (len > JFFS_MAX_NAME_LEN) { @@ -683,31 +683,31 @@ jffs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) if ((len == 1) && (name[0] == '.')) { D3(printk (KERN_NOTICE "lookup(): up biglock\n")); - up(&c->fmc->biglock); + mutex_unlock(&c->fmc->biglock); if (!(inode = iget(dir->i_sb, d->ino))) { D(printk("jffs_lookup(): . iget() ==> NULL\n")); goto jffs_lookup_end_no_biglock; } D3(printk (KERN_NOTICE "lookup(): down biglock\n")); - down(&c->fmc->biglock); + mutex_lock(&c->fmc->biglock); } else if ((len == 2) && (name[0] == '.') && (name[1] == '.')) { D3(printk (KERN_NOTICE "lookup(): up biglock\n")); - up(&c->fmc->biglock); + mutex_unlock(&c->fmc->biglock); if (!(inode = iget(dir->i_sb, d->pino))) { D(printk("jffs_lookup(): .. iget() ==> NULL\n")); goto jffs_lookup_end_no_biglock; } D3(printk (KERN_NOTICE "lookup(): down biglock\n")); - down(&c->fmc->biglock); + mutex_lock(&c->fmc->biglock); } else if ((f = jffs_find_child(d, name, len))) { D3(printk (KERN_NOTICE "lookup(): up biglock\n")); - up(&c->fmc->biglock); + mutex_unlock(&c->fmc->biglock); if (!(inode = iget(dir->i_sb, f->ino))) { D(printk("jffs_lookup(): iget() ==> NULL\n")); goto jffs_lookup_end_no_biglock; } D3(printk (KERN_NOTICE "lookup(): down biglock\n")); - down(&c->fmc->biglock); + mutex_lock(&c->fmc->biglock); } else { D3(printk("jffs_lookup(): Couldn't find the file. " "f = 0x%p, name = \"%s\", d = 0x%p, d->ino = %u\n", @@ -717,13 +717,13 @@ jffs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) d_add(dentry, inode); D3(printk (KERN_NOTICE "lookup(): up biglock\n")); - up(&c->fmc->biglock); + mutex_unlock(&c->fmc->biglock); unlock_kernel(); return NULL; jffs_lookup_end: D3(printk (KERN_NOTICE "lookup(): up biglock\n")); - up(&c->fmc->biglock); + mutex_unlock(&c->fmc->biglock); jffs_lookup_end_no_biglock: unlock_kernel(); @@ -753,7 +753,7 @@ jffs_do_readpage_nolock(struct file *file, struct page *page) ClearPageError(page); D3(printk (KERN_NOTICE "readpage(): down biglock\n")); - down(&c->fmc->biglock); + mutex_lock(&c->fmc->biglock); read_len = 0; result = 0; @@ -782,7 +782,7 @@ jffs_do_readpage_nolock(struct file *file, struct page *page) kunmap(page); D3(printk (KERN_NOTICE "readpage(): up biglock\n")); - up(&c->fmc->biglock); + mutex_unlock(&c->fmc->biglock); if (result) { SetPageError(page); @@ -839,7 +839,7 @@ jffs_mkdir(struct inode *dir, struct dentry *dentry, int mode) c = dir_f->c; D3(printk (KERN_NOTICE "mkdir(): down biglock\n")); - down(&c->fmc->biglock); + mutex_lock(&c->fmc->biglock); dir_mode = S_IFDIR | (mode & (S_IRWXUGO|S_ISVTX) & ~current->fs->umask); @@ -906,7 +906,7 @@ jffs_mkdir(struct inode *dir, struct dentry *dentry, int mode) result = 0; jffs_mkdir_end: D3(printk (KERN_NOTICE "mkdir(): up biglock\n")); - up(&c->fmc->biglock); + mutex_unlock(&c->fmc->biglock); unlock_kernel(); return result; } /* jffs_mkdir() */ @@ -921,10 +921,10 @@ jffs_rmdir(struct inode *dir, struct dentry *dentry) D3(printk("***jffs_rmdir()\n")); D3(printk (KERN_NOTICE "rmdir(): down biglock\n")); lock_kernel(); - down(&c->fmc->biglock); + mutex_lock(&c->fmc->biglock); ret = jffs_remove(dir, dentry, S_IFDIR); D3(printk (KERN_NOTICE "rmdir(): up biglock\n")); - up(&c->fmc->biglock); + mutex_unlock(&c->fmc->biglock); unlock_kernel(); return ret; } @@ -940,10 +940,10 @@ jffs_unlink(struct inode *dir, struct dentry *dentry) lock_kernel(); D3(printk("***jffs_unlink()\n")); D3(printk (KERN_NOTICE "unlink(): down biglock\n")); - down(&c->fmc->biglock); + mutex_lock(&c->fmc->biglock); ret = jffs_remove(dir, dentry, 0); D3(printk (KERN_NOTICE "unlink(): up biglock\n")); - up(&c->fmc->biglock); + mutex_unlock(&c->fmc->biglock); unlock_kernel(); return ret; } @@ -1086,7 +1086,7 @@ jffs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t rdev) c = dir_f->c; D3(printk (KERN_NOTICE "mknod(): down biglock\n")); - down(&c->fmc->biglock); + mutex_lock(&c->fmc->biglock); /* Create and initialize a new node. */ if (!(node = jffs_alloc_node())) { @@ -1152,7 +1152,7 @@ jffs_mknod_err: jffs_mknod_end: D3(printk (KERN_NOTICE "mknod(): up biglock\n")); - up(&c->fmc->biglock); + mutex_unlock(&c->fmc->biglock); unlock_kernel(); return result; } /* jffs_mknod() */ @@ -1203,7 +1203,7 @@ jffs_symlink(struct inode *dir, struct dentry *dentry, const char *symname) return -ENOMEM; } D3(printk (KERN_NOTICE "symlink(): down biglock\n")); - down(&c->fmc->biglock); + mutex_lock(&c->fmc->biglock); node->data_offset = 0; node->removed_size = 0; @@ -1253,7 +1253,7 @@ jffs_symlink(struct inode *dir, struct dentry *dentry, const char *symname) d_instantiate(dentry, inode); jffs_symlink_end: D3(printk (KERN_NOTICE "symlink(): up biglock\n")); - up(&c->fmc->biglock); + mutex_unlock(&c->fmc->biglock); unlock_kernel(); return err; } /* jffs_symlink() */ @@ -1306,7 +1306,7 @@ jffs_create(struct inode *dir, struct dentry *dentry, int mode, return -ENOMEM; } D3(printk (KERN_NOTICE "create(): down biglock\n")); - down(&c->fmc->biglock); + mutex_lock(&c->fmc->biglock); node->data_offset = 0; node->removed_size = 0; @@ -1359,7 +1359,7 @@ jffs_create(struct inode *dir, struct dentry *dentry, int mode, d_instantiate(dentry, inode); jffs_create_end: D3(printk (KERN_NOTICE "create(): up biglock\n")); - up(&c->fmc->biglock); + mutex_unlock(&c->fmc->biglock); unlock_kernel(); return err; } /* jffs_create() */ @@ -1423,7 +1423,7 @@ jffs_file_write(struct file *filp, const char *buf, size_t count, thiscount = min(c->fmc->max_chunk_size - sizeof(struct jffs_raw_inode), count); D3(printk (KERN_NOTICE "file_write(): down biglock\n")); - down(&c->fmc->biglock); + mutex_lock(&c->fmc->biglock); /* Urgh. POSIX says we can do short writes if we feel like it. * In practice, we can't. Nothing will cope. So we loop until @@ -1511,7 +1511,7 @@ jffs_file_write(struct file *filp, const char *buf, size_t count, } out: D3(printk (KERN_NOTICE "file_write(): up biglock\n")); - up(&c->fmc->biglock); + mutex_unlock(&c->fmc->biglock); /* Fix things in the real inode. */ if (pos > inode->i_size) { @@ -1567,7 +1567,7 @@ jffs_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, return -EIO; } D3(printk (KERN_NOTICE "ioctl(): down biglock\n")); - down(&c->fmc->biglock); + mutex_lock(&c->fmc->biglock); switch (cmd) { case JFFS_PRINT_HASH: @@ -1609,7 +1609,7 @@ jffs_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, ret = -ENOTTY; } D3(printk (KERN_NOTICE "ioctl(): up biglock\n")); - up(&c->fmc->biglock); + mutex_unlock(&c->fmc->biglock); return ret; } /* jffs_ioctl() */ @@ -1685,12 +1685,12 @@ jffs_read_inode(struct inode *inode) } c = (struct jffs_control *)inode->i_sb->s_fs_info; D3(printk (KERN_NOTICE "read_inode(): down biglock\n")); - down(&c->fmc->biglock); + mutex_lock(&c->fmc->biglock); if (!(f = jffs_find_file(c, inode->i_ino))) { D(printk("jffs_read_inode(): No such inode (%lu).\n", inode->i_ino)); D3(printk (KERN_NOTICE "read_inode(): up biglock\n")); - up(&c->fmc->biglock); + mutex_unlock(&c->fmc->biglock); return; } inode->u.generic_ip = (void *)f; @@ -1732,7 +1732,7 @@ jffs_read_inode(struct inode *inode) } D3(printk (KERN_NOTICE "read_inode(): up biglock\n")); - up(&c->fmc->biglock); + mutex_unlock(&c->fmc->biglock); } diff --git a/fs/jffs/intrep.c b/fs/jffs/intrep.c index ce7b54b..0ef207d 100644 --- a/fs/jffs/intrep.c +++ b/fs/jffs/intrep.c @@ -62,7 +62,7 @@ #include <linux/fs.h> #include <linux/stat.h> #include <linux/pagemap.h> -#include <asm/semaphore.h> +#include <linux/mutex.h> #include <asm/byteorder.h> #include <linux/smp_lock.h> #include <linux/time.h> @@ -3416,7 +3416,7 @@ jffs_garbage_collect_thread(void *ptr) D1(printk (KERN_NOTICE "jffs_garbage_collect_thread(): collecting.\n")); D3(printk (KERN_NOTICE "g_c_thread(): down biglock\n")); - down(&fmc->biglock); + mutex_lock(&fmc->biglock); D1(printk("***jffs_garbage_collect_thread(): round #%u, " "fmc->dirty_size = %u\n", i++, fmc->dirty_size)); @@ -3447,6 +3447,6 @@ jffs_garbage_collect_thread(void *ptr) gc_end: D3(printk (KERN_NOTICE "g_c_thread(): up biglock\n")); - up(&fmc->biglock); + mutex_unlock(&fmc->biglock); } /* for (;;) */ } /* jffs_garbage_collect_thread() */ diff --git a/fs/jffs/jffs_fm.c b/fs/jffs/jffs_fm.c index 6da13b30..7d8ca1a 100644 --- a/fs/jffs/jffs_fm.c +++ b/fs/jffs/jffs_fm.c @@ -139,7 +139,7 @@ jffs_build_begin(struct jffs_control *c, int unit) fmc->tail = NULL; fmc->head_extra = NULL; fmc->tail_extra = NULL; - init_MUTEX(&fmc->biglock); + mutex_init(&fmc->biglock); return fmc; } diff --git a/fs/jffs/jffs_fm.h b/fs/jffs/jffs_fm.h index f64151e..c794d92 100644 --- a/fs/jffs/jffs_fm.h +++ b/fs/jffs/jffs_fm.h @@ -20,10 +20,11 @@ #ifndef __LINUX_JFFS_FM_H__ #define __LINUX_JFFS_FM_H__ +#include <linux/config.h> #include <linux/types.h> #include <linux/jffs.h> #include <linux/mtd/mtd.h> -#include <linux/config.h> +#include <linux/mutex.h> /* The alignment between two nodes in the flash memory. */ #define JFFS_ALIGN_SIZE 4 @@ -97,7 +98,7 @@ struct jffs_fmcontrol struct jffs_fm *tail; struct jffs_fm *head_extra; struct jffs_fm *tail_extra; - struct semaphore biglock; + struct mutex biglock; }; /* Notice the two members head_extra and tail_extra in the jffs_control @@ -7,6 +7,8 @@ #include <linux/pagemap.h> #include <linux/mount.h> #include <linux/vfs.h> +#include <linux/mutex.h> + #include <asm/uaccess.h> int simple_getattr(struct vfsmount *mnt, struct dentry *dentry, @@ -530,7 +532,7 @@ struct simple_attr { char set_buf[24]; void *data; const char *fmt; /* format for read operation */ - struct semaphore sem; /* protects access to these buffers */ + struct mutex mutex; /* protects access to these buffers */ }; /* simple_attr_open is called by an actual attribute open file operation @@ -549,7 +551,7 @@ int simple_attr_open(struct inode *inode, struct file *file, attr->set = set; attr->data = inode->u.generic_ip; attr->fmt = fmt; - init_MUTEX(&attr->sem); + mutex_init(&attr->mutex); file->private_data = attr; @@ -575,7 +577,7 @@ ssize_t simple_attr_read(struct file *file, char __user *buf, if (!attr->get) return -EACCES; - down(&attr->sem); + mutex_lock(&attr->mutex); if (*ppos) /* continued read */ size = strlen(attr->get_buf); else /* first read */ @@ -584,7 +586,7 @@ ssize_t simple_attr_read(struct file *file, char __user *buf, (unsigned long long)attr->get(attr->data)); ret = simple_read_from_buffer(buf, len, ppos, attr->get_buf, size); - up(&attr->sem); + mutex_unlock(&attr->mutex); return ret; } @@ -602,7 +604,7 @@ ssize_t simple_attr_write(struct file *file, const char __user *buf, if (!attr->set) return -EACCES; - down(&attr->sem); + mutex_lock(&attr->mutex); ret = -EFAULT; size = min(sizeof(attr->set_buf) - 1, len); if (copy_from_user(attr->set_buf, buf, size)) @@ -613,7 +615,7 @@ ssize_t simple_attr_write(struct file *file, const char __user *buf, val = simple_strtol(attr->set_buf, NULL, 0); attr->set(attr->data, val); out: - up(&attr->sem); + mutex_unlock(&attr->mutex); return ret; } diff --git a/fs/minix/namei.c b/fs/minix/namei.c index b25bca5..5b6a454 100644 --- a/fs/minix/namei.c +++ b/fs/minix/namei.c @@ -6,18 +6,6 @@ #include "minix.h" -static inline void inc_count(struct inode *inode) -{ - inode->i_nlink++; - mark_inode_dirty(inode); -} - -static inline void dec_count(struct inode *inode) -{ - inode->i_nlink--; - mark_inode_dirty(inode); -} - static int add_nondir(struct dentry *dentry, struct inode *inode) { int err = minix_add_link(dentry, inode); @@ -25,7 +13,7 @@ static int add_nondir(struct dentry *dentry, struct inode *inode) d_instantiate(dentry, inode); return 0; } - dec_count(inode); + inode_dec_link_count(inode); iput(inode); return err; } @@ -125,7 +113,7 @@ out: return err; out_fail: - dec_count(inode); + inode_dec_link_count(inode); iput(inode); goto out; } @@ -139,7 +127,7 @@ static int minix_link(struct dentry * old_dentry, struct inode * dir, return -EMLINK; inode->i_ctime = CURRENT_TIME_SEC; - inc_count(inode); + inode_inc_link_count(inode); atomic_inc(&inode->i_count); return add_nondir(dentry, inode); } @@ -152,7 +140,7 @@ static int minix_mkdir(struct inode * dir, struct dentry *dentry, int mode) if (dir->i_nlink >= minix_sb(dir->i_sb)->s_link_max) goto out; - inc_count(dir); + inode_inc_link_count(dir); inode = minix_new_inode(dir, &err); if (!inode) @@ -163,7 +151,7 @@ static int minix_mkdir(struct inode * dir, struct dentry *dentry, int mode) inode->i_mode |= S_ISGID; minix_set_inode(inode, 0); - inc_count(inode); + inode_inc_link_count(inode); err = minix_make_empty(inode, dir); if (err) @@ -178,11 +166,11 @@ out: return err; out_fail: - dec_count(inode); - dec_count(inode); + inode_dec_link_count(inode); + inode_dec_link_count(inode); iput(inode); out_dir: - dec_count(dir); + inode_dec_link_count(dir); goto out; } @@ -202,7 +190,7 @@ static int minix_unlink(struct inode * dir, struct dentry *dentry) goto end_unlink; inode->i_ctime = dir->i_ctime; - dec_count(inode); + inode_dec_link_count(inode); end_unlink: return err; } @@ -215,8 +203,8 @@ static int minix_rmdir(struct inode * dir, struct dentry *dentry) if (minix_empty_dir(inode)) { err = minix_unlink(dir, dentry); if (!err) { - dec_count(dir); - dec_count(inode); + inode_dec_link_count(dir); + inode_dec_link_count(inode); } } return err; @@ -257,34 +245,34 @@ static int minix_rename(struct inode * old_dir, struct dentry *old_dentry, new_de = minix_find_entry(new_dentry, &new_page); if (!new_de) goto out_dir; - inc_count(old_inode); + inode_inc_link_count(old_inode); minix_set_link(new_de, new_page, old_inode); new_inode->i_ctime = CURRENT_TIME_SEC; if (dir_de) new_inode->i_nlink--; - dec_count(new_inode); + inode_dec_link_count(new_inode); } else { if (dir_de) { err = -EMLINK; if (new_dir->i_nlink >= info->s_link_max) goto out_dir; } - inc_count(old_inode); + inode_inc_link_count(old_inode); err = minix_add_link(new_dentry, old_inode); if (err) { - dec_count(old_inode); + inode_dec_link_count(old_inode); goto out_dir; } if (dir_de) - inc_count(new_dir); + inode_inc_link_count(new_dir); } minix_delete_entry(old_de, old_page); - dec_count(old_inode); + inode_dec_link_count(old_inode); if (dir_de) { minix_set_link(dir_de, dir_page, new_dir); - dec_count(old_dir); + inode_dec_link_count(old_dir); } return 0; @@ -104,7 +104,7 @@ */ /* * [Sep 2001 AV] Single-semaphore locking scheme (kudos to David Holland) - * implemented. Let's see if raised priority of ->s_vfs_rename_sem gives + * implemented. Let's see if raised priority of ->s_vfs_rename_mutex gives * any extra contention... */ @@ -1422,7 +1422,7 @@ struct dentry *lock_rename(struct dentry *p1, struct dentry *p2) return NULL; } - down(&p1->d_inode->i_sb->s_vfs_rename_sem); + mutex_lock(&p1->d_inode->i_sb->s_vfs_rename_mutex); for (p = p1; p->d_parent != p; p = p->d_parent) { if (p->d_parent == p2) { @@ -1450,7 +1450,7 @@ void unlock_rename(struct dentry *p1, struct dentry *p2) mutex_unlock(&p1->d_inode->i_mutex); if (p1 != p2) { mutex_unlock(&p2->d_inode->i_mutex); - up(&p1->d_inode->i_sb->s_vfs_rename_sem); + mutex_unlock(&p1->d_inode->i_sb->s_vfs_rename_mutex); } } @@ -2277,17 +2277,17 @@ asmlinkage long sys_link(const char __user *oldname, const char __user *newname) * a) we can get into loop creation. Check is done in is_subdir(). * b) race potential - two innocent renames can create a loop together. * That's where 4.4 screws up. Current fix: serialization on - * sb->s_vfs_rename_sem. We might be more accurate, but that's another + * sb->s_vfs_rename_mutex. We might be more accurate, but that's another * story. * c) we have to lock _three_ objects - parents and victim (if it exists). * And that - after we got ->i_mutex on parents (until then we don't know * whether the target exists). Solution: try to be smart with locking * order for inodes. We rely on the fact that tree topology may change - * only under ->s_vfs_rename_sem _and_ that parent of the object we + * only under ->s_vfs_rename_mutex _and_ that parent of the object we * move will be locked. Thus we can rank directories by the tree * (ancestors first) and rank all non-directories after them. * That works since everybody except rename does "lock parent, lookup, - * lock child" and rename is under ->s_vfs_rename_sem. + * lock child" and rename is under ->s_vfs_rename_mutex. * HOWEVER, it relies on the assumption that any object with ->lookup() * has no more than 1 dentry. If "hybrid" objects will ever appear, * we'd better make sure that there's no link(2) for them. diff --git a/fs/ncpfs/file.c b/fs/ncpfs/file.c index 973b444..ebdad8f 100644 --- a/fs/ncpfs/file.c +++ b/fs/ncpfs/file.c @@ -46,7 +46,7 @@ int ncp_make_open(struct inode *inode, int right) NCP_FINFO(inode)->volNumber, NCP_FINFO(inode)->dirEntNum); error = -EACCES; - down(&NCP_FINFO(inode)->open_sem); + mutex_lock(&NCP_FINFO(inode)->open_mutex); if (!atomic_read(&NCP_FINFO(inode)->opened)) { struct ncp_entry_info finfo; int result; @@ -93,7 +93,7 @@ int ncp_make_open(struct inode *inode, int right) } out_unlock: - up(&NCP_FINFO(inode)->open_sem); + mutex_unlock(&NCP_FINFO(inode)->open_mutex); out: return error; } diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c index d277a58..0b521d3 100644 --- a/fs/ncpfs/inode.c +++ b/fs/ncpfs/inode.c @@ -63,7 +63,7 @@ static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == SLAB_CTOR_CONSTRUCTOR) { - init_MUTEX(&ei->open_sem); + mutex_init(&ei->open_mutex); inode_init_once(&ei->vfs_inode); } } @@ -520,7 +520,7 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent) } /* server->lock = 0; */ - init_MUTEX(&server->sem); + mutex_init(&server->mutex); server->packet = NULL; /* server->buffer_size = 0; */ /* server->conn_status = 0; */ @@ -557,7 +557,7 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent) server->dentry_ttl = 0; /* no caching */ INIT_LIST_HEAD(&server->tx.requests); - init_MUTEX(&server->rcv.creq_sem); + mutex_init(&server->rcv.creq_mutex); server->tx.creq = NULL; server->rcv.creq = NULL; server->data_ready = sock->sk->sk_data_ready; diff --git a/fs/ncpfs/ncplib_kernel.c b/fs/ncpfs/ncplib_kernel.c index c755e18..d9ebf64 100644 --- a/fs/ncpfs/ncplib_kernel.c +++ b/fs/ncpfs/ncplib_kernel.c @@ -291,7 +291,7 @@ ncp_make_closed(struct inode *inode) int err; err = 0; - down(&NCP_FINFO(inode)->open_sem); + mutex_lock(&NCP_FINFO(inode)->open_mutex); if (atomic_read(&NCP_FINFO(inode)->opened) == 1) { atomic_set(&NCP_FINFO(inode)->opened, 0); err = ncp_close_file(NCP_SERVER(inode), NCP_FINFO(inode)->file_handle); @@ -301,7 +301,7 @@ ncp_make_closed(struct inode *inode) NCP_FINFO(inode)->volNumber, NCP_FINFO(inode)->dirEntNum, err); } - up(&NCP_FINFO(inode)->open_sem); + mutex_unlock(&NCP_FINFO(inode)->open_mutex); return err; } diff --git a/fs/ncpfs/sock.c b/fs/ncpfs/sock.c index 6593a5c..8783eb7 100644 --- a/fs/ncpfs/sock.c +++ b/fs/ncpfs/sock.c @@ -171,9 +171,9 @@ static inline void __ncp_abort_request(struct ncp_server *server, struct ncp_req static inline void ncp_abort_request(struct ncp_server *server, struct ncp_request_reply *req, int err) { - down(&server->rcv.creq_sem); + mutex_lock(&server->rcv.creq_mutex); __ncp_abort_request(server, req, err); - up(&server->rcv.creq_sem); + mutex_unlock(&server->rcv.creq_mutex); } static inline void __ncptcp_abort(struct ncp_server *server) @@ -303,20 +303,20 @@ static inline void __ncp_start_request(struct ncp_server *server, struct ncp_req static int ncp_add_request(struct ncp_server *server, struct ncp_request_reply *req) { - down(&server->rcv.creq_sem); + mutex_lock(&server->rcv.creq_mutex); if (!ncp_conn_valid(server)) { - up(&server->rcv.creq_sem); + mutex_unlock(&server->rcv.creq_mutex); printk(KERN_ERR "ncpfs: tcp: Server died\n"); return -EIO; } if (server->tx.creq || server->rcv.creq) { req->status = RQ_QUEUED; list_add_tail(&req->req, &server->tx.requests); - up(&server->rcv.creq_sem); + mutex_unlock(&server->rcv.creq_mutex); return 0; } __ncp_start_request(server, req); - up(&server->rcv.creq_sem); + mutex_unlock(&server->rcv.creq_mutex); return 0; } @@ -400,7 +400,7 @@ void ncpdgram_rcv_proc(void *s) info_server(server, 0, server->unexpected_packet.data, result); continue; } - down(&server->rcv.creq_sem); + mutex_lock(&server->rcv.creq_mutex); req = server->rcv.creq; if (req && (req->tx_type == NCP_ALLOC_SLOT_REQUEST || (server->sequence == reply.sequence && server->connection == get_conn_number(&reply)))) { @@ -430,11 +430,11 @@ void ncpdgram_rcv_proc(void *s) server->rcv.creq = NULL; ncp_finish_request(req, result); __ncp_next_request(server); - up(&server->rcv.creq_sem); + mutex_unlock(&server->rcv.creq_mutex); continue; } } - up(&server->rcv.creq_sem); + mutex_unlock(&server->rcv.creq_mutex); } drop:; _recv(sock, &reply, sizeof(reply), MSG_DONTWAIT); @@ -472,9 +472,9 @@ static void __ncpdgram_timeout_proc(struct ncp_server *server) void ncpdgram_timeout_proc(void *s) { struct ncp_server *server = s; - down(&server->rcv.creq_sem); + mutex_lock(&server->rcv.creq_mutex); __ncpdgram_timeout_proc(server); - up(&server->rcv.creq_sem); + mutex_unlock(&server->rcv.creq_mutex); } static inline void ncp_init_req(struct ncp_request_reply* req) @@ -657,18 +657,18 @@ void ncp_tcp_rcv_proc(void *s) { struct ncp_server *server = s; - down(&server->rcv.creq_sem); + mutex_lock(&server->rcv.creq_mutex); __ncptcp_rcv_proc(server); - up(&server->rcv.creq_sem); + mutex_unlock(&server->rcv.creq_mutex); } void ncp_tcp_tx_proc(void *s) { struct ncp_server *server = s; - down(&server->rcv.creq_sem); + mutex_lock(&server->rcv.creq_mutex); __ncptcp_try_send(server); - up(&server->rcv.creq_sem); + mutex_unlock(&server->rcv.creq_mutex); } static int do_ncp_rpc_call(struct ncp_server *server, int size, @@ -833,7 +833,7 @@ int ncp_disconnect(struct ncp_server *server) void ncp_lock_server(struct ncp_server *server) { - down(&server->sem); + mutex_lock(&server->mutex); if (server->lock) printk(KERN_WARNING "ncp_lock_server: was locked!\n"); server->lock = 1; @@ -846,5 +846,5 @@ void ncp_unlock_server(struct ncp_server *server) return; } server->lock = 0; - up(&server->sem); + mutex_unlock(&server->mutex); } @@ -973,7 +973,7 @@ repeat: fdt = files_fdtable(files); fd = find_next_zero_bit(fdt->open_fds->fds_bits, fdt->max_fdset, - fdt->next_fd); + files->next_fd); /* * N.B. For clone tasks sharing a files structure, this test @@ -998,7 +998,7 @@ repeat: FD_SET(fd, fdt->open_fds); FD_CLR(fd, fdt->close_on_exec); - fdt->next_fd = fd + 1; + files->next_fd = fd + 1; #if 1 /* Sanity check */ if (fdt->fd[fd] != NULL) { @@ -1019,8 +1019,8 @@ static void __put_unused_fd(struct files_struct *files, unsigned int fd) { struct fdtable *fdt = files_fdtable(files); __FD_CLR(fd, fdt->open_fds); - if (fd < fdt->next_fd) - fdt->next_fd = fd; + if (fd < files->next_fd) + files->next_fd = fd; } void fastcall put_unused_fd(unsigned int fd) diff --git a/fs/proc/proc_misc.c b/fs/proc/proc_misc.c index 1d24fea..826c131 100644 --- a/fs/proc/proc_misc.c +++ b/fs/proc/proc_misc.c @@ -312,7 +312,7 @@ static void *devinfo_next(struct seq_file *f, void *v, loff_t *pos) case BLK_HDR: info->state = BLK_LIST; (*pos)++; - break; + /*fallthrough*/ case BLK_LIST: if (get_blkdev_info(info->blkdev,&idummy,&ndummy)) { /* diff --git a/fs/qnx4/file.c b/fs/qnx4/file.c index b471315..c33963f 100644 --- a/fs/qnx4/file.c +++ b/fs/qnx4/file.c @@ -12,10 +12,7 @@ * 27-06-1998 by Frank Denis : file overwriting. */ -#include <linux/config.h> -#include <linux/types.h> #include <linux/fs.h> -#include <linux/time.h> #include <linux/qnx4_fs.h> /* @@ -170,10 +170,10 @@ static void quota_sync_sb(struct super_block *sb, int type) /* Now when everything is written we can discard the pagecache so * that userspace sees the changes. We need i_mutex and so we could - * not do it inside dqonoff_sem. Moreover we need to be carefull + * not do it inside dqonoff_mutex. Moreover we need to be carefull * about races with quotaoff() (that is the reason why we have own * reference to inode). */ - down(&sb_dqopt(sb)->dqonoff_sem); + mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); for (cnt = 0; cnt < MAXQUOTAS; cnt++) { discard[cnt] = NULL; if (type != -1 && cnt != type) @@ -182,7 +182,7 @@ static void quota_sync_sb(struct super_block *sb, int type) continue; discard[cnt] = igrab(sb_dqopt(sb)->files[cnt]); } - up(&sb_dqopt(sb)->dqonoff_sem); + mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); for (cnt = 0; cnt < MAXQUOTAS; cnt++) { if (discard[cnt]) { mutex_lock(&discard[cnt]->i_mutex); diff --git a/fs/quota_v2.c b/fs/quota_v2.c index b4199ec..c519a58 100644 --- a/fs/quota_v2.c +++ b/fs/quota_v2.c @@ -394,7 +394,7 @@ static int v2_write_dquot(struct dquot *dquot) ssize_t ret; struct v2_disk_dqblk ddquot, empty; - /* dq_off is guarded by dqio_sem */ + /* dq_off is guarded by dqio_mutex */ if (!dquot->dq_off) if ((ret = dq_insert_tree(dquot)) < 0) { printk(KERN_ERR "VFS: Error %zd occurred while creating quota.\n", ret); diff --git a/fs/ramfs/file-mmu.c b/fs/ramfs/file-mmu.c index 2115383..6ada209 100644 --- a/fs/ramfs/file-mmu.c +++ b/fs/ramfs/file-mmu.c @@ -24,18 +24,7 @@ * caches is sufficient. */ -#include <linux/module.h> #include <linux/fs.h> -#include <linux/pagemap.h> -#include <linux/highmem.h> -#include <linux/init.h> -#include <linux/string.h> -#include <linux/smp_lock.h> -#include <linux/backing-dev.h> -#include <linux/ramfs.h> - -#include <asm/uaccess.h> -#include "internal.h" struct address_space_operations ramfs_aops = { .readpage = simple_readpage, diff --git a/fs/seq_file.c b/fs/seq_file.c index 7c40570..555b9ac 100644 --- a/fs/seq_file.c +++ b/fs/seq_file.c @@ -37,7 +37,7 @@ int seq_open(struct file *file, struct seq_operations *op) file->private_data = p; } memset(p, 0, sizeof(*p)); - sema_init(&p->sem, 1); + mutex_init(&p->lock); p->op = op; /* @@ -71,7 +71,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos) void *p; int err = 0; - down(&m->sem); + mutex_lock(&m->lock); /* * seq_file->op->..m_start/m_stop/m_next may do special actions * or optimisations based on the file->f_version, so we want to @@ -164,7 +164,7 @@ Done: else *ppos += copied; file->f_version = m->version; - up(&m->sem); + mutex_unlock(&m->lock); return copied; Enomem: err = -ENOMEM; @@ -237,7 +237,7 @@ loff_t seq_lseek(struct file *file, loff_t offset, int origin) struct seq_file *m = (struct seq_file *)file->private_data; long long retval = -EINVAL; - down(&m->sem); + mutex_lock(&m->lock); m->version = file->f_version; switch (origin) { case 1: @@ -260,7 +260,7 @@ loff_t seq_lseek(struct file *file, loff_t offset, int origin) } } } - up(&m->sem); + mutex_unlock(&m->lock); file->f_version = m->version; return retval; } @@ -76,9 +76,9 @@ static struct super_block *alloc_super(void) down_write(&s->s_umount); s->s_count = S_BIAS; atomic_set(&s->s_active, 1); - sema_init(&s->s_vfs_rename_sem,1); - sema_init(&s->s_dquot.dqio_sem, 1); - sema_init(&s->s_dquot.dqonoff_sem, 1); + mutex_init(&s->s_vfs_rename_mutex); + mutex_init(&s->s_dquot.dqio_mutex); + mutex_init(&s->s_dquot.dqonoff_mutex); init_rwsem(&s->s_dquot.dqptr_sem); init_waitqueue_head(&s->s_wait_unfrozen); s->s_maxbytes = MAX_NON_LFS; @@ -693,9 +693,9 @@ struct super_block *get_sb_bdev(struct file_system_type *fs_type, * will protect the lockfs code from trying to start a snapshot * while we are mounting */ - down(&bdev->bd_mount_sem); + mutex_lock(&bdev->bd_mount_mutex); s = sget(fs_type, test_bdev_super, set_bdev_super, bdev); - up(&bdev->bd_mount_sem); + mutex_unlock(&bdev->bd_mount_mutex); if (IS_ERR(s)) goto out; diff --git a/fs/sysv/namei.c b/fs/sysv/namei.c index 7f0e4b5..b8a73f7 100644 --- a/fs/sysv/namei.c +++ b/fs/sysv/namei.c @@ -16,18 +16,6 @@ #include <linux/smp_lock.h> #include "sysv.h" -static inline void inc_count(struct inode *inode) -{ - inode->i_nlink++; - mark_inode_dirty(inode); -} - -static inline void dec_count(struct inode *inode) -{ - inode->i_nlink--; - mark_inode_dirty(inode); -} - static int add_nondir(struct dentry *dentry, struct inode *inode) { int err = sysv_add_link(dentry, inode); @@ -35,7 +23,7 @@ static int add_nondir(struct dentry *dentry, struct inode *inode) d_instantiate(dentry, inode); return 0; } - dec_count(inode); + inode_dec_link_count(inode); iput(inode); return err; } @@ -124,7 +112,7 @@ out: return err; out_fail: - dec_count(inode); + inode_dec_link_count(inode); iput(inode); goto out; } @@ -138,7 +126,7 @@ static int sysv_link(struct dentry * old_dentry, struct inode * dir, return -EMLINK; inode->i_ctime = CURRENT_TIME_SEC; - inc_count(inode); + inode_inc_link_count(inode); atomic_inc(&inode->i_count); return add_nondir(dentry, inode); @@ -151,7 +139,7 @@ static int sysv_mkdir(struct inode * dir, struct dentry *dentry, int mode) if (dir->i_nlink >= SYSV_SB(dir->i_sb)->s_link_max) goto out; - inc_count(dir); + inode_inc_link_count(dir); inode = sysv_new_inode(dir, S_IFDIR|mode); err = PTR_ERR(inode); @@ -160,7 +148,7 @@ static int sysv_mkdir(struct inode * dir, struct dentry *dentry, int mode) sysv_set_inode(inode, 0); - inc_count(inode); + inode_inc_link_count(inode); err = sysv_make_empty(inode, dir); if (err) @@ -175,11 +163,11 @@ out: return err; out_fail: - dec_count(inode); - dec_count(inode); + inode_dec_link_count(inode); + inode_dec_link_count(inode); iput(inode); out_dir: - dec_count(dir); + inode_dec_link_count(dir); goto out; } @@ -199,7 +187,7 @@ static int sysv_unlink(struct inode * dir, struct dentry * dentry) goto out; inode->i_ctime = dir->i_ctime; - dec_count(inode); + inode_dec_link_count(inode); out: return err; } @@ -213,8 +201,8 @@ static int sysv_rmdir(struct inode * dir, struct dentry * dentry) err = sysv_unlink(dir, dentry); if (!err) { inode->i_size = 0; - dec_count(inode); - dec_count(dir); + inode_dec_link_count(inode); + inode_dec_link_count(dir); } } return err; @@ -258,34 +246,34 @@ static int sysv_rename(struct inode * old_dir, struct dentry * old_dentry, new_de = sysv_find_entry(new_dentry, &new_page); if (!new_de) goto out_dir; - inc_count(old_inode); + inode_inc_link_count(old_inode); sysv_set_link(new_de, new_page, old_inode); new_inode->i_ctime = CURRENT_TIME_SEC; if (dir_de) new_inode->i_nlink--; - dec_count(new_inode); + inode_dec_link_count(new_inode); } else { if (dir_de) { err = -EMLINK; if (new_dir->i_nlink >= SYSV_SB(new_dir->i_sb)->s_link_max) goto out_dir; } - inc_count(old_inode); + inode_inc_link_count(old_inode); err = sysv_add_link(new_dentry, old_inode); if (err) { - dec_count(old_inode); + inode_dec_link_count(old_inode); goto out_dir; } if (dir_de) - inc_count(new_dir); + inode_inc_link_count(new_dir); } sysv_delete_entry(old_de, old_page); - dec_count(old_inode); + inode_dec_link_count(old_inode); if (dir_de) { sysv_set_link(dir_de, dir_page, new_dir); - dec_count(old_dir); + inode_dec_link_count(old_dir); } return 0; diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c index 201049a..ea521f8 100644 --- a/fs/udf/balloc.c +++ b/fs/udf/balloc.c @@ -152,7 +152,7 @@ static void udf_bitmap_free_blocks(struct super_block * sb, int bitmap_nr; unsigned long overflow; - down(&sbi->s_alloc_sem); + mutex_lock(&sbi->s_alloc_mutex); if (bloc.logicalBlockNum < 0 || (bloc.logicalBlockNum + count) > UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum)) { @@ -211,7 +211,7 @@ error_return: sb->s_dirt = 1; if (UDF_SB_LVIDBH(sb)) mark_buffer_dirty(UDF_SB_LVIDBH(sb)); - up(&sbi->s_alloc_sem); + mutex_unlock(&sbi->s_alloc_mutex); return; } @@ -226,7 +226,7 @@ static int udf_bitmap_prealloc_blocks(struct super_block * sb, int nr_groups, bitmap_nr; struct buffer_head *bh; - down(&sbi->s_alloc_sem); + mutex_lock(&sbi->s_alloc_mutex); if (first_block < 0 || first_block >= UDF_SB_PARTLEN(sb, partition)) goto out; @@ -275,7 +275,7 @@ out: mark_buffer_dirty(UDF_SB_LVIDBH(sb)); } sb->s_dirt = 1; - up(&sbi->s_alloc_sem); + mutex_unlock(&sbi->s_alloc_mutex); return alloc_count; } @@ -291,7 +291,7 @@ static int udf_bitmap_new_block(struct super_block * sb, int newblock = 0; *err = -ENOSPC; - down(&sbi->s_alloc_sem); + mutex_lock(&sbi->s_alloc_mutex); repeat: if (goal < 0 || goal >= UDF_SB_PARTLEN(sb, partition)) @@ -364,7 +364,7 @@ repeat: } if (i >= (nr_groups*2)) { - up(&sbi->s_alloc_sem); + mutex_unlock(&sbi->s_alloc_mutex); return newblock; } if (bit < sb->s_blocksize << 3) @@ -373,7 +373,7 @@ repeat: bit = udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3, group_start << 3); if (bit >= sb->s_blocksize << 3) { - up(&sbi->s_alloc_sem); + mutex_unlock(&sbi->s_alloc_mutex); return 0; } @@ -387,7 +387,7 @@ got_block: */ if (inode && DQUOT_ALLOC_BLOCK(inode, 1)) { - up(&sbi->s_alloc_sem); + mutex_unlock(&sbi->s_alloc_mutex); *err = -EDQUOT; return 0; } @@ -410,13 +410,13 @@ got_block: mark_buffer_dirty(UDF_SB_LVIDBH(sb)); } sb->s_dirt = 1; - up(&sbi->s_alloc_sem); + mutex_unlock(&sbi->s_alloc_mutex); *err = 0; return newblock; error_return: *err = -EIO; - up(&sbi->s_alloc_sem); + mutex_unlock(&sbi->s_alloc_mutex); return 0; } @@ -433,7 +433,7 @@ static void udf_table_free_blocks(struct super_block * sb, int8_t etype; int i; - down(&sbi->s_alloc_sem); + mutex_lock(&sbi->s_alloc_mutex); if (bloc.logicalBlockNum < 0 || (bloc.logicalBlockNum + count) > UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum)) { @@ -666,7 +666,7 @@ static void udf_table_free_blocks(struct super_block * sb, error_return: sb->s_dirt = 1; - up(&sbi->s_alloc_sem); + mutex_unlock(&sbi->s_alloc_mutex); return; } @@ -692,7 +692,7 @@ static int udf_table_prealloc_blocks(struct super_block * sb, else return 0; - down(&sbi->s_alloc_sem); + mutex_lock(&sbi->s_alloc_mutex); extoffset = sizeof(struct unallocSpaceEntry); bloc = UDF_I_LOCATION(table); @@ -736,7 +736,7 @@ static int udf_table_prealloc_blocks(struct super_block * sb, mark_buffer_dirty(UDF_SB_LVIDBH(sb)); sb->s_dirt = 1; } - up(&sbi->s_alloc_sem); + mutex_unlock(&sbi->s_alloc_mutex); return alloc_count; } @@ -761,7 +761,7 @@ static int udf_table_new_block(struct super_block * sb, else return newblock; - down(&sbi->s_alloc_sem); + mutex_lock(&sbi->s_alloc_mutex); if (goal < 0 || goal >= UDF_SB_PARTLEN(sb, partition)) goal = 0; @@ -811,7 +811,7 @@ static int udf_table_new_block(struct super_block * sb, if (spread == 0xFFFFFFFF) { udf_release_data(goal_bh); - up(&sbi->s_alloc_sem); + mutex_unlock(&sbi->s_alloc_mutex); return 0; } @@ -827,7 +827,7 @@ static int udf_table_new_block(struct super_block * sb, if (inode && DQUOT_ALLOC_BLOCK(inode, 1)) { udf_release_data(goal_bh); - up(&sbi->s_alloc_sem); + mutex_unlock(&sbi->s_alloc_mutex); *err = -EDQUOT; return 0; } @@ -846,7 +846,7 @@ static int udf_table_new_block(struct super_block * sb, } sb->s_dirt = 1; - up(&sbi->s_alloc_sem); + mutex_unlock(&sbi->s_alloc_mutex); *err = 0; return newblock; } diff --git a/fs/udf/ialloc.c b/fs/udf/ialloc.c index c9b707b..3873c67 100644 --- a/fs/udf/ialloc.c +++ b/fs/udf/ialloc.c @@ -42,7 +42,7 @@ void udf_free_inode(struct inode * inode) clear_inode(inode); - down(&sbi->s_alloc_sem); + mutex_lock(&sbi->s_alloc_mutex); if (sbi->s_lvidbh) { if (S_ISDIR(inode->i_mode)) UDF_SB_LVIDIU(sb)->numDirs = @@ -53,7 +53,7 @@ void udf_free_inode(struct inode * inode) mark_buffer_dirty(sbi->s_lvidbh); } - up(&sbi->s_alloc_sem); + mutex_unlock(&sbi->s_alloc_mutex); udf_free_blocks(sb, NULL, UDF_I_LOCATION(inode), 0, 1); } @@ -83,7 +83,7 @@ struct inode * udf_new_inode (struct inode *dir, int mode, int * err) return NULL; } - down(&sbi->s_alloc_sem); + mutex_lock(&sbi->s_alloc_mutex); UDF_I_UNIQUE(inode) = 0; UDF_I_LENEXTENTS(inode) = 0; UDF_I_NEXT_ALLOC_BLOCK(inode) = 0; @@ -148,7 +148,7 @@ struct inode * udf_new_inode (struct inode *dir, int mode, int * err) UDF_I_CRTIME(inode) = current_fs_time(inode->i_sb); insert_inode_hash(inode); mark_inode_dirty(inode); - up(&sbi->s_alloc_sem); + mutex_unlock(&sbi->s_alloc_mutex); if (DQUOT_ALLOC_INODE(inode)) { diff --git a/fs/udf/super.c b/fs/udf/super.c index 368d8f8..9303c50 100644 --- a/fs/udf/super.c +++ b/fs/udf/super.c @@ -1515,7 +1515,7 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent) sb->s_fs_info = sbi; memset(UDF_SB(sb), 0x00, sizeof(struct udf_sb_info)); - init_MUTEX(&sbi->s_alloc_sem); + mutex_init(&sbi->s_alloc_mutex); if (!udf_parse_options((char *)options, &uopt)) goto error_out; diff --git a/fs/ufs/file.c b/fs/ufs/file.c index ed69d7fe..62ad481 100644 --- a/fs/ufs/file.c +++ b/fs/ufs/file.c @@ -23,18 +23,8 @@ * ext2 fs regular file handling primitives */ -#include <asm/uaccess.h> -#include <asm/system.h> - -#include <linux/errno.h> #include <linux/fs.h> #include <linux/ufs_fs.h> -#include <linux/fcntl.h> -#include <linux/time.h> -#include <linux/stat.h> -#include <linux/mm.h> -#include <linux/pagemap.h> -#include <linux/smp_lock.h> /* * We have mostly NULL's here: the current defaults are ok for diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c index 2958cde..8d5f98a 100644 --- a/fs/ufs/namei.c +++ b/fs/ufs/namei.c @@ -43,18 +43,6 @@ #define UFSD(x) #endif -static inline void ufs_inc_count(struct inode *inode) -{ - inode->i_nlink++; - mark_inode_dirty(inode); -} - -static inline void ufs_dec_count(struct inode *inode) -{ - inode->i_nlink--; - mark_inode_dirty(inode); -} - static inline int ufs_add_nondir(struct dentry *dentry, struct inode *inode) { int err = ufs_add_link(dentry, inode); @@ -62,7 +50,7 @@ static inline int ufs_add_nondir(struct dentry *dentry, struct inode *inode) d_instantiate(dentry, inode); return 0; } - ufs_dec_count(inode); + inode_dec_link_count(inode); iput(inode); return err; } @@ -173,7 +161,7 @@ out: return err; out_fail: - ufs_dec_count(inode); + inode_dec_link_count(inode); iput(inode); goto out; } @@ -191,7 +179,7 @@ static int ufs_link (struct dentry * old_dentry, struct inode * dir, } inode->i_ctime = CURRENT_TIME_SEC; - ufs_inc_count(inode); + inode_inc_link_count(inode); atomic_inc(&inode->i_count); error = ufs_add_nondir(dentry, inode); @@ -208,7 +196,7 @@ static int ufs_mkdir(struct inode * dir, struct dentry * dentry, int mode) goto out; lock_kernel(); - ufs_inc_count(dir); + inode_inc_link_count(dir); inode = ufs_new_inode(dir, S_IFDIR|mode); err = PTR_ERR(inode); @@ -218,7 +206,7 @@ static int ufs_mkdir(struct inode * dir, struct dentry * dentry, int mode) inode->i_op = &ufs_dir_inode_operations; inode->i_fop = &ufs_dir_operations; - ufs_inc_count(inode); + inode_inc_link_count(inode); err = ufs_make_empty(inode, dir); if (err) @@ -234,11 +222,11 @@ out: return err; out_fail: - ufs_dec_count(inode); - ufs_dec_count(inode); + inode_dec_link_count(inode); + inode_dec_link_count(inode); iput (inode); out_dir: - ufs_dec_count(dir); + inode_dec_link_count(dir); unlock_kernel(); goto out; } @@ -260,7 +248,7 @@ static int ufs_unlink(struct inode * dir, struct dentry *dentry) goto out; inode->i_ctime = dir->i_ctime; - ufs_dec_count(inode); + inode_dec_link_count(inode); err = 0; out: unlock_kernel(); @@ -277,8 +265,8 @@ static int ufs_rmdir (struct inode * dir, struct dentry *dentry) err = ufs_unlink(dir, dentry); if (!err) { inode->i_size = 0; - ufs_dec_count(inode); - ufs_dec_count(dir); + inode_dec_link_count(inode); + inode_dec_link_count(dir); } } unlock_kernel(); @@ -319,35 +307,35 @@ static int ufs_rename (struct inode * old_dir, struct dentry * old_dentry, new_de = ufs_find_entry (new_dentry, &new_bh); if (!new_de) goto out_dir; - ufs_inc_count(old_inode); + inode_inc_link_count(old_inode); ufs_set_link(new_dir, new_de, new_bh, old_inode); new_inode->i_ctime = CURRENT_TIME_SEC; if (dir_de) new_inode->i_nlink--; - ufs_dec_count(new_inode); + inode_dec_link_count(new_inode); } else { if (dir_de) { err = -EMLINK; if (new_dir->i_nlink >= UFS_LINK_MAX) goto out_dir; } - ufs_inc_count(old_inode); + inode_inc_link_count(old_inode); err = ufs_add_link(new_dentry, old_inode); if (err) { - ufs_dec_count(old_inode); + inode_dec_link_count(old_inode); goto out_dir; } if (dir_de) - ufs_inc_count(new_dir); + inode_inc_link_count(new_dir); } ufs_delete_entry (old_dir, old_de, old_bh); - ufs_dec_count(old_inode); + inode_dec_link_count(old_inode); if (dir_de) { ufs_set_link(old_inode, dir_de, dir_bh, new_dir); - ufs_dec_count(old_dir); + inode_dec_link_count(old_dir); } unlock_kernel(); return 0; diff --git a/fs/xfs/linux-2.6/xfs_stats.c b/fs/xfs/linux-2.6/xfs_stats.c index 8955720..713e6a7 100644 --- a/fs/xfs/linux-2.6/xfs_stats.c +++ b/fs/xfs/linux-2.6/xfs_stats.c @@ -62,18 +62,15 @@ xfs_read_xfsstats( while (j < xstats[i].endpoint) { val = 0; /* sum over all cpus */ - for (c = 0; c < NR_CPUS; c++) { - if (!cpu_possible(c)) continue; + for_each_cpu(c) val += *(((__u32*)&per_cpu(xfsstats, c) + j)); - } len += sprintf(buffer + len, " %u", val); j++; } buffer[len++] = '\n'; } /* extra precision counters */ - for (i = 0; i < NR_CPUS; i++) { - if (!cpu_possible(i)) continue; + for_each_cpu(i) { xs_xstrat_bytes += per_cpu(xfsstats, i).xs_xstrat_bytes; xs_write_bytes += per_cpu(xfsstats, i).xs_write_bytes; xs_read_bytes += per_cpu(xfsstats, i).xs_read_bytes; diff --git a/fs/xfs/linux-2.6/xfs_sysctl.c b/fs/xfs/linux-2.6/xfs_sysctl.c index a025649..7079cc8 100644 --- a/fs/xfs/linux-2.6/xfs_sysctl.c +++ b/fs/xfs/linux-2.6/xfs_sysctl.c @@ -38,8 +38,7 @@ xfs_stats_clear_proc_handler( if (!ret && write && *valp) { printk("XFS Clearing xfsstats\n"); - for (c = 0; c < NR_CPUS; c++) { - if (!cpu_possible(c)) continue; + for_each_cpu(c) { preempt_disable(); /* save vn_active, it's a universal truth! */ vn_active = per_cpu(xfsstats, c).vn_active; |