summaryrefslogtreecommitdiffstats
path: root/fs/namei.c
diff options
context:
space:
mode:
authorNick Piggin <npiggin@kernel.dk>2011-01-07 17:49:53 +1100
committerNick Piggin <npiggin@kernel.dk>2011-01-07 17:50:27 +1100
commitc28cc36469554dc55540f059fbdc7fa22a2c31fc (patch)
tree6b867456be48b8633a2d56a99e00bb3faf9dccc7 /fs/namei.c
parent31e6b01f4183ff419a6d1f86177cbf4662347cec (diff)
downloadop-kernel-dev-c28cc36469554dc55540f059fbdc7fa22a2c31fc.zip
op-kernel-dev-c28cc36469554dc55540f059fbdc7fa22a2c31fc.tar.gz
fs: fs_struct use seqlock
Use a seqlock in the fs_struct to enable us to take an atomic copy of the complete cwd and root paths. Use this in the RCU lookup path to avoid a thread-shared spinlock in RCU lookup operations. Multi-threaded apps may now perform path lookups with scalability matching multi-process apps. Operations such as stat(2) become very scalable for multi-threaded workload. Signed-off-by: Nick Piggin <npiggin@kernel.dk>
Diffstat (limited to 'fs/namei.c')
-rw-r--r--fs/namei.c34
1 files changed, 21 insertions, 13 deletions
diff --git a/fs/namei.c b/fs/namei.c
index 8d3f15b..c731b50 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -684,9 +684,12 @@ static __always_inline void set_root_rcu(struct nameidata *nd)
{
if (!nd->root.mnt) {
struct fs_struct *fs = current->fs;
- spin_lock(&fs->lock);
- nd->root = fs->root;
- spin_unlock(&fs->lock);
+ unsigned seq;
+
+ do {
+ seq = read_seqcount_begin(&fs->seq);
+ nd->root = fs->root;
+ } while (read_seqcount_retry(&fs->seq, seq));
}
}
@@ -1369,26 +1372,31 @@ static int path_init_rcu(int dfd, const char *name, unsigned int flags, struct n
if (*name=='/') {
struct fs_struct *fs = current->fs;
+ unsigned seq;
br_read_lock(vfsmount_lock);
rcu_read_lock();
- spin_lock(&fs->lock);
- nd->root = fs->root;
- nd->path = nd->root;
- nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
- spin_unlock(&fs->lock);
+ do {
+ seq = read_seqcount_begin(&fs->seq);
+ nd->root = fs->root;
+ nd->path = nd->root;
+ nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
+ } while (read_seqcount_retry(&fs->seq, seq));
} else if (dfd == AT_FDCWD) {
struct fs_struct *fs = current->fs;
+ unsigned seq;
br_read_lock(vfsmount_lock);
rcu_read_lock();
- spin_lock(&fs->lock);
- nd->path = fs->pwd;
- nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
- spin_unlock(&fs->lock);
+ do {
+ seq = read_seqcount_begin(&fs->seq);
+ nd->path = fs->pwd;
+ nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
+ } while (read_seqcount_retry(&fs->seq, seq));
+
} else {
struct dentry *dentry;
@@ -1411,7 +1419,7 @@ static int path_init_rcu(int dfd, const char *name, unsigned int flags, struct n
if (fput_needed)
nd->file = file;
- nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
+ nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
br_read_lock(vfsmount_lock);
rcu_read_lock();
}
OpenPOWER on IntegriCloud