summaryrefslogtreecommitdiffstats
path: root/fs/btrfs/free-space-cache.c
diff options
context:
space:
mode:
authorJeff Mahoney <jeffm@suse.com>2016-06-22 18:54:23 -0400
committerDavid Sterba <dsterba@suse.com>2016-12-06 16:06:59 +0100
commit0b246afa62b0cf5b09d078121f543135f28492ad (patch)
tree200ad296d09f1b2f5329658c8de81bc625007ace /fs/btrfs/free-space-cache.c
parent6202df6921494f29308307e0ae6f567c2ab2ba19 (diff)
downloadop-kernel-dev-0b246afa62b0cf5b09d078121f543135f28492ad.zip
op-kernel-dev-0b246afa62b0cf5b09d078121f543135f28492ad.tar.gz
btrfs: root->fs_info cleanup, add fs_info convenience variables
In routines where someptr->fs_info is referenced multiple times, we introduce a convenience variable. This makes the code considerably more readable. Signed-off-by: Jeff Mahoney <jeffm@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs/free-space-cache.c')
-rw-r--r--fs/btrfs/free-space-cache.c89
1 files changed, 51 insertions, 38 deletions
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 8424617..e937636 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -47,6 +47,7 @@ static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
struct btrfs_path *path,
u64 offset)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_key key;
struct btrfs_key location;
struct btrfs_disk_key disk_key;
@@ -74,7 +75,7 @@ static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
btrfs_disk_key_to_cpu(&location, &disk_key);
btrfs_release_path(path);
- inode = btrfs_iget(root->fs_info->sb, &location, root, NULL);
+ inode = btrfs_iget(fs_info->sb, &location, root, NULL);
if (IS_ERR(inode))
return inode;
if (is_bad_inode(inode)) {
@@ -94,6 +95,7 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root,
*block_group, struct btrfs_path *path)
{
struct inode *inode = NULL;
+ struct btrfs_fs_info *fs_info = root->fs_info;
u32 flags = BTRFS_INODE_NODATASUM | BTRFS_INODE_NODATACOW;
spin_lock(&block_group->lock);
@@ -110,8 +112,7 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root,
spin_lock(&block_group->lock);
if (!((BTRFS_I(inode)->flags & flags) == flags)) {
- btrfs_info(root->fs_info,
- "Old style space inode found, converting.");
+ btrfs_info(fs_info, "Old style space inode found, converting.");
BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM |
BTRFS_INODE_NODATACOW;
block_group->disk_cache_state = BTRFS_DC_CLEAR;
@@ -206,12 +207,13 @@ int create_free_space_inode(struct btrfs_root *root,
int btrfs_check_trunc_cache_free_space(struct btrfs_root *root,
struct btrfs_block_rsv *rsv)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
u64 needed_bytes;
int ret;
/* 1 for slack space, 1 for updating the inode */
- needed_bytes = btrfs_calc_trunc_metadata_size(root->fs_info, 1) +
- btrfs_calc_trans_metadata_size(root->fs_info, 1);
+ needed_bytes = btrfs_calc_trunc_metadata_size(fs_info, 1) +
+ btrfs_calc_trans_metadata_size(fs_info, 1);
spin_lock(&rsv->lock);
if (rsv->reserved < needed_bytes)
@@ -667,6 +669,7 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
struct btrfs_free_space_ctl *ctl,
struct btrfs_path *path, u64 offset)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_free_space_header *header;
struct extent_buffer *leaf;
struct btrfs_io_ctl io_ctl;
@@ -706,16 +709,16 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
btrfs_release_path(path);
if (!BTRFS_I(inode)->generation) {
- btrfs_info(root->fs_info,
+ btrfs_info(fs_info,
"The free space cache file (%llu) is invalid. skip it\n",
offset);
return 0;
}
if (BTRFS_I(inode)->generation != generation) {
- btrfs_err(root->fs_info,
- "free space inode generation (%llu) did not match free space cache generation (%llu)",
- BTRFS_I(inode)->generation, generation);
+ btrfs_err(fs_info,
+ "free space inode generation (%llu) did not match free space cache generation (%llu)",
+ BTRFS_I(inode)->generation, generation);
return 0;
}
@@ -764,7 +767,7 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
ret = link_free_space(ctl, e);
spin_unlock(&ctl->tree_lock);
if (ret) {
- btrfs_err(root->fs_info,
+ btrfs_err(fs_info,
"Duplicate entries in free space cache, dumping");
kmem_cache_free(btrfs_free_space_cachep, e);
goto free_cache;
@@ -784,7 +787,7 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
ctl->op->recalc_thresholds(ctl);
spin_unlock(&ctl->tree_lock);
if (ret) {
- btrfs_err(root->fs_info,
+ btrfs_err(fs_info,
"Duplicate entries in free space cache, dumping");
kmem_cache_free(btrfs_free_space_cachep, e);
goto free_cache;
@@ -1036,6 +1039,7 @@ write_pinned_extent_entries(struct btrfs_root *root,
struct btrfs_io_ctl *io_ctl,
int *entries)
{
+ struct btrfs_fs_info *fs_info;
u64 start, extent_start, extent_end, len;
struct extent_io_tree *unpin = NULL;
int ret;
@@ -1043,6 +1047,8 @@ write_pinned_extent_entries(struct btrfs_root *root,
if (!block_group)
return 0;
+ fs_info = block_group->fs_info;
+
/*
* We want to add any pinned extents to our free space cache
* so we don't leak the space
@@ -1050,7 +1056,7 @@ write_pinned_extent_entries(struct btrfs_root *root,
* We shouldn't have switched the pinned extents yet so this is the
* right one
*/
- unpin = root->fs_info->pinned_extents;
+ unpin = fs_info->pinned_extents;
start = block_group->key.objectid;
@@ -1141,12 +1147,15 @@ int btrfs_wait_cache_io(struct btrfs_root *root,
{
int ret;
struct inode *inode = io_ctl->inode;
+ struct btrfs_fs_info *fs_info;
if (!inode)
return 0;
+ fs_info = btrfs_sb(inode->i_sb);
+
if (block_group)
- root = root->fs_info->tree_root;
+ root = fs_info->tree_root;
/* Flush the dirty pages in the cache file. */
ret = flush_dirty_cache(inode);
@@ -1163,9 +1172,9 @@ out:
BTRFS_I(inode)->generation = 0;
if (block_group) {
#ifdef DEBUG
- btrfs_err(root->fs_info,
- "failed to write free space cache for block group %llu",
- block_group->key.objectid);
+ btrfs_err(fs_info,
+ "failed to write free space cache for block group %llu",
+ block_group->key.objectid);
#endif
}
}
@@ -1376,9 +1385,9 @@ int btrfs_write_out_cache(struct btrfs_fs_info *fs_info,
path, block_group->key.objectid);
if (ret) {
#ifdef DEBUG
- btrfs_err(root->fs_info,
- "failed to write free space cache for block group %llu",
- block_group->key.objectid);
+ btrfs_err(fs_info,
+ "failed to write free space cache for block group %llu",
+ block_group->key.objectid);
#endif
spin_lock(&block_group->lock);
block_group->disk_cache_state = BTRFS_DC_ERROR;
@@ -1965,11 +1974,11 @@ static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
struct btrfs_free_space *info)
{
struct btrfs_block_group_cache *block_group = ctl->private;
+ struct btrfs_fs_info *fs_info = block_group->fs_info;
bool forced = false;
#ifdef CONFIG_BTRFS_DEBUG
- if (btrfs_should_fragment_free_space(block_group->fs_info->extent_root,
- block_group))
+ if (btrfs_should_fragment_free_space(fs_info->extent_root, block_group))
forced = true;
#endif
@@ -1985,7 +1994,7 @@ static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
* of cache left then go ahead an dadd them, no sense in adding
* the overhead of a bitmap if we don't have to.
*/
- if (info->bytes <= block_group->fs_info->sectorsize * 4) {
+ if (info->bytes <= fs_info->sectorsize * 4) {
if (ctl->free_extents * 2 <= ctl->extents_thresh)
return false;
} else {
@@ -2444,6 +2453,7 @@ out:
void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
u64 bytes)
{
+ struct btrfs_fs_info *fs_info = block_group->fs_info;
struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
struct btrfs_free_space *info;
struct rb_node *n;
@@ -2453,23 +2463,23 @@ void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
info = rb_entry(n, struct btrfs_free_space, offset_index);
if (info->bytes >= bytes && !block_group->ro)
count++;
- btrfs_crit(block_group->fs_info,
- "entry offset %llu, bytes %llu, bitmap %s",
+ btrfs_crit(fs_info, "entry offset %llu, bytes %llu, bitmap %s",
info->offset, info->bytes,
(info->bitmap) ? "yes" : "no");
}
- btrfs_info(block_group->fs_info, "block group has cluster?: %s",
+ btrfs_info(fs_info, "block group has cluster?: %s",
list_empty(&block_group->cluster_list) ? "no" : "yes");
- btrfs_info(block_group->fs_info,
+ btrfs_info(fs_info,
"%d blocks of free space at or bigger than bytes is", count);
}
void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group)
{
+ struct btrfs_fs_info *fs_info = block_group->fs_info;
struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
spin_lock_init(&ctl->tree_lock);
- ctl->unit = block_group->fs_info->sectorsize;
+ ctl->unit = fs_info->sectorsize;
ctl->start = block_group->key.objectid;
ctl->private = block_group;
ctl->op = &free_space_op;
@@ -3014,6 +3024,7 @@ int btrfs_find_space_cluster(struct btrfs_root *root,
u64 offset, u64 bytes, u64 empty_size)
{
struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
+ struct btrfs_fs_info *fs_info = block_group->fs_info;
struct btrfs_free_space *entry, *tmp;
LIST_HEAD(bitmaps);
u64 min_bytes;
@@ -3026,14 +3037,14 @@ int btrfs_find_space_cluster(struct btrfs_root *root,
* For metadata, allow allocates with smaller extents. For
* data, keep it dense.
*/
- if (btrfs_test_opt(root->fs_info, SSD_SPREAD)) {
+ if (btrfs_test_opt(fs_info, SSD_SPREAD)) {
cont1_bytes = min_bytes = bytes + empty_size;
} else if (block_group->flags & BTRFS_BLOCK_GROUP_METADATA) {
cont1_bytes = bytes;
- min_bytes = block_group->fs_info->sectorsize;
+ min_bytes = fs_info->sectorsize;
} else {
cont1_bytes = max(bytes, (bytes + empty_size) >> 2);
- min_bytes = block_group->fs_info->sectorsize;
+ min_bytes = fs_info->sectorsize;
}
spin_lock(&ctl->tree_lock);
@@ -3318,6 +3329,7 @@ void btrfs_get_block_group_trimming(struct btrfs_block_group_cache *cache)
void btrfs_put_block_group_trimming(struct btrfs_block_group_cache *block_group)
{
+ struct btrfs_fs_info *fs_info = block_group->fs_info;
struct extent_map_tree *em_tree;
struct extent_map *em;
bool cleanup;
@@ -3328,8 +3340,8 @@ void btrfs_put_block_group_trimming(struct btrfs_block_group_cache *block_group)
spin_unlock(&block_group->lock);
if (cleanup) {
- lock_chunks(block_group->fs_info);
- em_tree = &block_group->fs_info->mapping_tree.map_tree;
+ lock_chunks(fs_info);
+ em_tree = &fs_info->mapping_tree.map_tree;
write_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, block_group->key.objectid,
1);
@@ -3340,7 +3352,7 @@ void btrfs_put_block_group_trimming(struct btrfs_block_group_cache *block_group)
*/
remove_extent_mapping(em_tree, em);
write_unlock(&em_tree->lock);
- unlock_chunks(block_group->fs_info);
+ unlock_chunks(fs_info);
/* once for us and once for the tree */
free_extent_map(em);
@@ -3470,7 +3482,7 @@ int load_free_ino_cache(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
int ret = 0;
u64 root_gen = btrfs_root_generation(&root->root_item);
- if (!btrfs_test_opt(root->fs_info, INODE_MAP_CACHE))
+ if (!btrfs_test_opt(fs_info, INODE_MAP_CACHE))
return 0;
/*
@@ -3509,12 +3521,13 @@ int btrfs_write_out_ino_cache(struct btrfs_root *root,
struct btrfs_path *path,
struct inode *inode)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
int ret;
struct btrfs_io_ctl io_ctl;
bool release_metadata = true;
- if (!btrfs_test_opt(root->fs_info, INODE_MAP_CACHE))
+ if (!btrfs_test_opt(fs_info, INODE_MAP_CACHE))
return 0;
memset(&io_ctl, 0, sizeof(io_ctl));
@@ -3535,9 +3548,9 @@ int btrfs_write_out_ino_cache(struct btrfs_root *root,
if (release_metadata)
btrfs_delalloc_release_metadata(inode, inode->i_size);
#ifdef DEBUG
- btrfs_err(root->fs_info,
- "failed to write free ino cache for root %llu",
- root->root_key.objectid);
+ btrfs_err(fs_info,
+ "failed to write free ino cache for root %llu",
+ root->root_key.objectid);
#endif
}
OpenPOWER on IntegriCloud