summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/autofs4/waitq.c22
-rw-r--r--fs/bio.c3
-rw-r--r--fs/btrfs/extent-tree.c51
-rw-r--r--fs/btrfs/volumes.c1
-rw-r--r--fs/buffer.c2
-rw-r--r--fs/inode.c32
-rw-r--r--fs/jbd/commit.c6
-rw-r--r--fs/proc/loadavg.c18
8 files changed, 96 insertions, 39 deletions
diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
index eeb2468..2341375 100644
--- a/fs/autofs4/waitq.c
+++ b/fs/autofs4/waitq.c
@@ -297,20 +297,14 @@ static int validate_request(struct autofs_wait_queue **wait,
*/
if (notify == NFY_MOUNT) {
/*
- * If the dentry isn't hashed just go ahead and try the
- * mount again with a new wait (not much else we can do).
- */
- if (!d_unhashed(dentry)) {
- /*
- * But if the dentry is hashed, that means that we
- * got here through the revalidate path. Thus, we
- * need to check if the dentry has been mounted
- * while we waited on the wq_mutex. If it has,
- * simply return success.
- */
- if (d_mountpoint(dentry))
- return 0;
- }
+ * If the dentry was successfully mounted while we slept
+ * on the wait queue mutex we can return success. If it
+ * isn't mounted (doesn't have submounts for the case of
+ * a multi-mount with no mount at it's base) we can
+ * continue on and create a new request.
+ */
+ if (have_submounts(dentry))
+ return 0;
}
return 1;
diff --git a/fs/bio.c b/fs/bio.c
index 9871164..740699c 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -26,10 +26,9 @@
#include <linux/mempool.h>
#include <linux/workqueue.h>
#include <linux/blktrace_api.h>
-#include <trace/block.h>
#include <scsi/sg.h> /* for struct sg_iovec */
-DEFINE_TRACE(block_split);
+#include <trace/events/block.h>
/*
* Test patch to inline a certain number of bi_io_vec's inside the bio
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 3e2c7c7..35af933 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2622,7 +2622,18 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
search_start);
if (block_group && block_group_bits(block_group, data)) {
down_read(&space_info->groups_sem);
- goto have_block_group;
+ if (list_empty(&block_group->list) ||
+ block_group->ro) {
+ /*
+ * someone is removing this block group,
+ * we can't jump into the have_block_group
+ * target because our list pointers are not
+ * valid
+ */
+ btrfs_put_block_group(block_group);
+ up_read(&space_info->groups_sem);
+ } else
+ goto have_block_group;
} else if (block_group) {
btrfs_put_block_group(block_group);
}
@@ -2656,6 +2667,13 @@ have_block_group:
* people trying to start a new cluster
*/
spin_lock(&last_ptr->refill_lock);
+ if (last_ptr->block_group &&
+ (last_ptr->block_group->ro ||
+ !block_group_bits(last_ptr->block_group, data))) {
+ offset = 0;
+ goto refill_cluster;
+ }
+
offset = btrfs_alloc_from_cluster(block_group, last_ptr,
num_bytes, search_start);
if (offset) {
@@ -2681,10 +2699,17 @@ have_block_group:
last_ptr_loop = 1;
search_start = block_group->key.objectid;
+ /*
+ * we know this block group is properly
+ * in the list because
+ * btrfs_remove_block_group, drops the
+ * cluster before it removes the block
+ * group from the list
+ */
goto have_block_group;
}
spin_unlock(&last_ptr->lock);
-
+refill_cluster:
/*
* this cluster didn't work out, free it and
* start over
@@ -5968,6 +5993,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
{
struct btrfs_path *path;
struct btrfs_block_group_cache *block_group;
+ struct btrfs_free_cluster *cluster;
struct btrfs_key key;
int ret;
@@ -5979,6 +6005,21 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
memcpy(&key, &block_group->key, sizeof(key));
+ /* make sure this block group isn't part of an allocation cluster */
+ cluster = &root->fs_info->data_alloc_cluster;
+ spin_lock(&cluster->refill_lock);
+ btrfs_return_cluster_to_free_space(block_group, cluster);
+ spin_unlock(&cluster->refill_lock);
+
+ /*
+ * make sure this block group isn't part of a metadata
+ * allocation cluster
+ */
+ cluster = &root->fs_info->meta_alloc_cluster;
+ spin_lock(&cluster->refill_lock);
+ btrfs_return_cluster_to_free_space(block_group, cluster);
+ spin_unlock(&cluster->refill_lock);
+
path = btrfs_alloc_path();
BUG_ON(!path);
@@ -5988,7 +6029,11 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
spin_unlock(&root->fs_info->block_group_cache_lock);
btrfs_remove_free_space_cache(block_group);
down_write(&block_group->space_info->groups_sem);
- list_del(&block_group->list);
+ /*
+ * we must use list_del_init so people can check to see if they
+ * are still on the list after taking the semaphore
+ */
+ list_del_init(&block_group->list);
up_write(&block_group->space_info->groups_sem);
spin_lock(&block_group->space_info->lock);
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 5f01dad..a6d35b0 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -1440,6 +1440,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
device->io_align = root->sectorsize;
device->sector_size = root->sectorsize;
device->total_bytes = i_size_read(bdev->bd_inode);
+ device->disk_total_bytes = device->total_bytes;
device->dev_root = root->fs_info->dev_root;
device->bdev = bdev;
device->in_fs_metadata = 1;
diff --git a/fs/buffer.c b/fs/buffer.c
index aed2977..4910612 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -2736,6 +2736,8 @@ has_buffers:
pos += blocksize;
}
+ map_bh.b_size = blocksize;
+ map_bh.b_state = 0;
err = get_block(inode, iblock, &map_bh, 0);
if (err)
goto unlock;
diff --git a/fs/inode.c b/fs/inode.c
index 0571983..bca0c61 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -219,6 +219,7 @@ static struct inode *alloc_inode(struct super_block *sb)
void destroy_inode(struct inode *inode)
{
BUG_ON(inode_has_buffers(inode));
+ ima_inode_free(inode);
security_inode_free(inode);
if (inode->i_sb->s_op->destroy_inode)
inode->i_sb->s_op->destroy_inode(inode);
@@ -1053,13 +1054,22 @@ int insert_inode_locked(struct inode *inode)
struct super_block *sb = inode->i_sb;
ino_t ino = inode->i_ino;
struct hlist_head *head = inode_hashtable + hash(sb, ino);
- struct inode *old;
inode->i_state |= I_LOCK|I_NEW;
while (1) {
+ struct hlist_node *node;
+ struct inode *old = NULL;
spin_lock(&inode_lock);
- old = find_inode_fast(sb, head, ino);
- if (likely(!old)) {
+ hlist_for_each_entry(old, node, head, i_hash) {
+ if (old->i_ino != ino)
+ continue;
+ if (old->i_sb != sb)
+ continue;
+ if (old->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE))
+ continue;
+ break;
+ }
+ if (likely(!node)) {
hlist_add_head(&inode->i_hash, head);
spin_unlock(&inode_lock);
return 0;
@@ -1081,14 +1091,24 @@ int insert_inode_locked4(struct inode *inode, unsigned long hashval,
{
struct super_block *sb = inode->i_sb;
struct hlist_head *head = inode_hashtable + hash(sb, hashval);
- struct inode *old;
inode->i_state |= I_LOCK|I_NEW;
while (1) {
+ struct hlist_node *node;
+ struct inode *old = NULL;
+
spin_lock(&inode_lock);
- old = find_inode(sb, head, test, data);
- if (likely(!old)) {
+ hlist_for_each_entry(old, node, head, i_hash) {
+ if (old->i_sb != sb)
+ continue;
+ if (!test(old, data))
+ continue;
+ if (old->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE))
+ continue;
+ break;
+ }
+ if (likely(!node)) {
hlist_add_head(&inode->i_hash, head);
spin_unlock(&inode_lock);
return 0;
diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c
index 06560c5..618e21c 100644
--- a/fs/jbd/commit.c
+++ b/fs/jbd/commit.c
@@ -241,7 +241,7 @@ write_out_data:
spin_lock(&journal->j_list_lock);
}
/* Someone already cleaned up the buffer? */
- if (!buffer_jbd(bh)
+ if (!buffer_jbd(bh) || bh2jh(bh) != jh
|| jh->b_transaction != commit_transaction
|| jh->b_jlist != BJ_SyncData) {
jbd_unlock_bh_state(bh);
@@ -478,7 +478,9 @@ void journal_commit_transaction(journal_t *journal)
spin_lock(&journal->j_list_lock);
continue;
}
- if (buffer_jbd(bh) && jh->b_jlist == BJ_Locked) {
+ if (buffer_jbd(bh) && bh2jh(bh) == jh &&
+ jh->b_transaction == commit_transaction &&
+ jh->b_jlist == BJ_Locked) {
__journal_unfile_buffer(jh);
jbd_unlock_bh_state(bh);
journal_remove_journal_head(bh);
diff --git a/fs/proc/loadavg.c b/fs/proc/loadavg.c
index 9bca39c..1afa4dd 100644
--- a/fs/proc/loadavg.c
+++ b/fs/proc/loadavg.c
@@ -12,20 +12,14 @@
static int loadavg_proc_show(struct seq_file *m, void *v)
{
- int a, b, c;
- unsigned long seq;
+ unsigned long avnrun[3];
- do {
- seq = read_seqbegin(&xtime_lock);
- a = avenrun[0] + (FIXED_1/200);
- b = avenrun[1] + (FIXED_1/200);
- c = avenrun[2] + (FIXED_1/200);
- } while (read_seqretry(&xtime_lock, seq));
+ get_avenrun(avnrun, FIXED_1/200, 0);
- seq_printf(m, "%d.%02d %d.%02d %d.%02d %ld/%d %d\n",
- LOAD_INT(a), LOAD_FRAC(a),
- LOAD_INT(b), LOAD_FRAC(b),
- LOAD_INT(c), LOAD_FRAC(c),
+ seq_printf(m, "%lu.%02lu %lu.%02lu %lu.%02lu %ld/%d %d\n",
+ LOAD_INT(avnrun[0]), LOAD_FRAC(avnrun[0]),
+ LOAD_INT(avnrun[1]), LOAD_FRAC(avnrun[1]),
+ LOAD_INT(avnrun[2]), LOAD_FRAC(avnrun[2]),
nr_running(), nr_threads,
task_active_pid_ns(current)->last_pid);
return 0;
OpenPOWER on IntegriCloud