summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorDongsheng Yang <yangds.fnst@cn.fujitsu.com>2015-02-06 10:26:52 -0500
committerChris Mason <clm@fb.com>2015-04-13 07:52:50 -0700
commite2d1f92399afb6ec518b68867ed10db2585b283a (patch)
treea6964e3b445268bf5c67c4b482de6c98b03571c8 /fs
parent237c0e9f1fbfdca7287f3539f1fa73e5063156b5 (diff)
downloadop-kernel-dev-e2d1f92399afb6ec518b68867ed10db2585b283a.zip
op-kernel-dev-e2d1f92399afb6ec518b68867ed10db2585b283a.tar.gz
btrfs: qgroup: do a reservation in a higher level.
There are two problems in qgroup: a). The PAGE_CACHE is 4K, even when we are writing a data of 1K, qgroup will reserve a 4K size. It will cause the last 3K in a qgroup is not available to user. b). When user is writing a inline data, qgroup will not reserve it, it means this is a window we can exceed the limit of a qgroup. The main idea of this patch is reserving the data size of write_bytes rather than the reserve_bytes. It means qgroup will not care about the data size btrfs will reserve for user, but only care about the data size user is going to write. Then reserve it when user want to write and release it in transaction committed. In this way, qgroup can be released from the complex procedure in btrfs and only do the reserve when user want to write and account when the data is written in commit_transaction(). Signed-off-by: Dongsheng Yang <yangds.fnst@cn.fujitsu.com> Signed-off-by: Chris Mason <clm@fb.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/btrfs/ctree.h2
-rw-r--r--fs/btrfs/extent-tree.c38
-rw-r--r--fs/btrfs/file.c4
-rw-r--r--fs/btrfs/inode.c15
-rw-r--r--fs/btrfs/qgroup.c70
-rw-r--r--fs/btrfs/qgroup.h4
-rw-r--r--fs/btrfs/relocation.c2
7 files changed, 14 insertions, 121 deletions
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index d48b22f..851f235 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -3448,7 +3448,7 @@ enum btrfs_reserve_flush_enum {
BTRFS_RESERVE_FLUSH_ALL,
};
-int btrfs_check_data_free_space(struct inode *inode, u64 bytes);
+int btrfs_check_data_free_space(struct inode *inode, u64 bytes, u64 write_bytes);
void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes);
void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 4d37746..1eef4ee 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -3331,7 +3331,7 @@ again:
num_pages *= 16;
num_pages *= PAGE_CACHE_SIZE;
- ret = btrfs_check_data_free_space(inode, num_pages);
+ ret = btrfs_check_data_free_space(inode, num_pages, num_pages);
if (ret)
goto out_put;
@@ -3851,7 +3851,7 @@ u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
* This will check the space that the inode allocates from to make sure we have
* enough space for bytes.
*/
-int btrfs_check_data_free_space(struct inode *inode, u64 bytes)
+int btrfs_check_data_free_space(struct inode *inode, u64 bytes, u64 write_bytes)
{
struct btrfs_space_info *data_sinfo;
struct btrfs_root *root = BTRFS_I(inode)->root;
@@ -3969,7 +3969,7 @@ commit_trans:
data_sinfo->flags, bytes, 1);
return -ENOSPC;
}
- ret = btrfs_qgroup_reserve(root, bytes);
+ ret = btrfs_qgroup_reserve(root, write_bytes);
if (ret)
goto out;
data_sinfo->bytes_may_use += bytes;
@@ -3995,7 +3995,6 @@ void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
data_sinfo = root->fs_info->data_sinfo;
spin_lock(&data_sinfo->lock);
WARN_ON(data_sinfo->bytes_may_use < bytes);
- btrfs_qgroup_free(root, bytes);
data_sinfo->bytes_may_use -= bytes;
trace_btrfs_space_reservation(root->fs_info, "space_info",
data_sinfo->flags, bytes, 0);
@@ -5243,8 +5242,6 @@ void btrfs_subvolume_release_metadata(struct btrfs_root *root,
u64 qgroup_reserved)
{
btrfs_block_rsv_release(root, rsv, (u64)-1);
- if (qgroup_reserved)
- btrfs_qgroup_free(root, qgroup_reserved);
}
/**
@@ -5478,11 +5475,8 @@ out_fail:
to_free = 0;
}
spin_unlock(&BTRFS_I(inode)->lock);
- if (dropped) {
- if (root->fs_info->quota_enabled)
- btrfs_qgroup_free(root, dropped * root->nodesize);
+ if (dropped)
to_free += btrfs_calc_trans_metadata_size(root, dropped);
- }
if (to_free) {
btrfs_block_rsv_release(root, block_rsv, to_free);
@@ -5524,9 +5518,6 @@ void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
trace_btrfs_space_reservation(root->fs_info, "delalloc",
btrfs_ino(inode), to_free, 0);
- if (root->fs_info->quota_enabled) {
- btrfs_qgroup_free(root, dropped * root->nodesize);
- }
btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
to_free);
@@ -5551,7 +5542,7 @@ int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes)
{
int ret;
- ret = btrfs_check_data_free_space(inode, num_bytes);
+ ret = btrfs_check_data_free_space(inode, num_bytes, num_bytes);
if (ret)
return ret;
@@ -5727,12 +5718,8 @@ static int pin_down_extent(struct btrfs_root *root,
set_extent_dirty(root->fs_info->pinned_extents, bytenr,
bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
- if (reserved) {
- btrfs_qgroup_update_reserved_bytes(root->fs_info,
- root->root_key.objectid,
- num_bytes, -1);
+ if (reserved)
trace_btrfs_reserved_extent_free(root, bytenr, num_bytes);
- }
return 0;
}
@@ -6470,9 +6457,6 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
btrfs_put_block_group(cache);
trace_btrfs_reserved_extent_free(root, buf->start, buf->len);
pin = 0;
- btrfs_qgroup_update_reserved_bytes(root->fs_info,
- root->root_key.objectid,
- buf->len, -1);
}
out:
if (pin)
@@ -7205,9 +7189,6 @@ static int __btrfs_free_reserved_extent(struct btrfs_root *root,
ret = btrfs_discard_extent(root, start, len, NULL);
btrfs_add_free_space(cache, start, len);
btrfs_update_reserved_bytes(cache, len, RESERVE_FREE, delalloc);
- btrfs_qgroup_update_reserved_bytes(root->fs_info,
- root->root_key.objectid,
- len, -1);
}
btrfs_put_block_group(cache);
@@ -7446,9 +7427,6 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
BUG_ON(ret); /* logic error */
ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
0, owner, offset, ins, 1);
- btrfs_qgroup_update_reserved_bytes(root->fs_info,
- root->root_key.objectid,
- ins->offset, 1);
btrfs_put_block_group(block_group);
return ret;
}
@@ -7595,10 +7573,6 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
return ERR_PTR(ret);
}
- btrfs_qgroup_update_reserved_bytes(root->fs_info,
- root_objectid,
- ins.offset, 1);
-
buf = btrfs_init_new_buffer(trans, root, ins.objectid, level);
BUG_ON(IS_ERR(buf)); /* -ENOMEM */
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index faef1d6..23b6e03 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1510,7 +1510,7 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
}
reserve_bytes = num_pages << PAGE_CACHE_SHIFT;
- ret = btrfs_check_data_free_space(inode, reserve_bytes);
+ ret = btrfs_check_data_free_space(inode, reserve_bytes, write_bytes);
if (ret == -ENOSPC &&
(BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
BTRFS_INODE_PREALLOC))) {
@@ -2573,7 +2573,7 @@ static long btrfs_fallocate(struct file *file, int mode,
* Make sure we have enough space before we do the
* allocation.
*/
- ret = btrfs_check_data_free_space(inode, alloc_end - alloc_start);
+ ret = btrfs_check_data_free_space(inode, alloc_end - alloc_start, alloc_end - alloc_start);
if (ret)
return ret;
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index a9f69a0..27b59b83 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -754,9 +754,6 @@ retry:
}
goto out_free;
}
- btrfs_qgroup_update_reserved_bytes(root->fs_info,
- root->root_key.objectid,
- ins.offset, 1);
/*
* here we're doing allocation and writeback of the
* compressed pages
@@ -981,10 +978,6 @@ static noinline int cow_file_range(struct inode *inode,
if (ret < 0)
goto out_unlock;
- btrfs_qgroup_update_reserved_bytes(root->fs_info,
- root->root_key.objectid,
- ins.offset, 1);
-
em = alloc_extent_map();
if (!em) {
ret = -ENOMEM;
@@ -7037,10 +7030,6 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
return ERR_PTR(ret);
}
- btrfs_qgroup_update_reserved_bytes(root->fs_info,
- root->root_key.objectid,
- ins.offset, 1);
-
return em;
}
@@ -9595,10 +9584,6 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
break;
}
- btrfs_qgroup_update_reserved_bytes(root->fs_info,
- root->root_key.objectid,
- ins.offset, 1);
-
btrfs_drop_extent_cache(inode, cur_offset,
cur_offset + ins.offset -1, 0);
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index cd29173..17881ad8 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -72,7 +72,6 @@ struct btrfs_qgroup {
/*
* reservation tracking
*/
- u64 may_use;
u64 reserved;
/*
@@ -2383,67 +2382,6 @@ out:
return ret;
}
-int btrfs_qgroup_update_reserved_bytes(struct btrfs_fs_info *fs_info,
- u64 ref_root,
- u64 num_bytes,
- int sign)
-{
- struct btrfs_root *quota_root;
- struct btrfs_qgroup *qgroup;
- int ret = 0;
- struct ulist_node *unode;
- struct ulist_iterator uiter;
-
- if (!is_fstree(ref_root) || !fs_info->quota_enabled)
- return 0;
-
- if (num_bytes == 0)
- return 0;
-
- spin_lock(&fs_info->qgroup_lock);
- quota_root = fs_info->quota_root;
- if (!quota_root)
- goto out;
-
- qgroup = find_qgroup_rb(fs_info, ref_root);
- if (!qgroup)
- goto out;
-
- ulist_reinit(fs_info->qgroup_ulist);
- ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
- (uintptr_t)qgroup, GFP_ATOMIC);
- if (ret < 0)
- goto out;
-
- ULIST_ITER_INIT(&uiter);
- while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
- struct btrfs_qgroup *qg;
- struct btrfs_qgroup_list *glist;
-
- qg = u64_to_ptr(unode->aux);
-
- qg->reserved += sign * num_bytes;
-
- list_for_each_entry(glist, &qg->groups, next_group) {
- ret = ulist_add(fs_info->qgroup_ulist,
- glist->group->qgroupid,
- (uintptr_t)glist->group, GFP_ATOMIC);
- if (ret < 0)
- goto out;
- }
- }
-
-out:
- spin_unlock(&fs_info->qgroup_lock);
- return ret;
-}
-
-/*
- * reserve some space for a qgroup and all its parents. The reservation takes
- * place with start_transaction or dealloc_reserve, similar to ENOSPC
- * accounting. If not enough space is available, EDQUOT is returned.
- * We assume that the requested space is new for all qgroups.
- */
int btrfs_qgroup_reserve(struct btrfs_root *root, u64 num_bytes)
{
struct btrfs_root *quota_root;
@@ -2486,14 +2424,14 @@ int btrfs_qgroup_reserve(struct btrfs_root *root, u64 num_bytes)
qg = u64_to_ptr(unode->aux);
if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) &&
- qg->reserved + qg->may_use + (s64)qg->rfer + num_bytes >
+ qg->reserved + (s64)qg->rfer + num_bytes >
qg->max_rfer) {
ret = -EDQUOT;
goto out;
}
if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) &&
- qg->reserved + qg->may_use + (s64)qg->excl + num_bytes >
+ qg->reserved + (s64)qg->excl + num_bytes >
qg->max_excl) {
ret = -EDQUOT;
goto out;
@@ -2517,7 +2455,7 @@ int btrfs_qgroup_reserve(struct btrfs_root *root, u64 num_bytes)
qg = u64_to_ptr(unode->aux);
- qg->may_use += num_bytes;
+ qg->reserved += num_bytes;
}
out:
@@ -2563,7 +2501,7 @@ void btrfs_qgroup_free(struct btrfs_root *root, u64 num_bytes)
qg = u64_to_ptr(unode->aux);
- qg->may_use -= num_bytes;
+ qg->reserved -= num_bytes;
list_for_each_entry(glist, &qg->groups, next_group) {
ret = ulist_add(fs_info->qgroup_ulist,
diff --git a/fs/btrfs/qgroup.h b/fs/btrfs/qgroup.h
index 64d49b8..c5242aa 100644
--- a/fs/btrfs/qgroup.h
+++ b/fs/btrfs/qgroup.h
@@ -94,10 +94,6 @@ int btrfs_run_qgroups(struct btrfs_trans_handle *trans,
int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info, u64 srcid, u64 objectid,
struct btrfs_qgroup_inherit *inherit);
-int btrfs_qgroup_update_reserved_bytes(struct btrfs_fs_info *fs_info,
- u64 ref_root,
- u64 num_bytes,
- int sign);
int btrfs_qgroup_reserve(struct btrfs_root *root, u64 num_bytes);
void btrfs_qgroup_free(struct btrfs_root *root, u64 num_bytes);
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 840a4eb..74b24b0 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -3027,7 +3027,7 @@ int prealloc_file_extent_cluster(struct inode *inode,
mutex_lock(&inode->i_mutex);
ret = btrfs_check_data_free_space(inode, cluster->end +
- 1 - cluster->start);
+ 1 - cluster->start, 0);
if (ret)
goto out;
OpenPOWER on IntegriCloud