summaryrefslogtreecommitdiffstats
path: root/fs/btrfs/raid56.c
diff options
context:
space:
mode:
authorQu Wenruo <quwenruo@cn.fujitsu.com>2014-02-28 10:46:11 +0800
committerJosef Bacik <jbacik@fb.com>2014-03-10 15:17:09 -0400
commitd05a33ac265c62d4be35788dd978b2665033f077 (patch)
treef71e59e7c3121a5e75f93282c104bea8267d6677 /fs/btrfs/raid56.c
parentfccb5d86d8f52161e013025ccf3101d8fab99a32 (diff)
downloadop-kernel-dev-d05a33ac265c62d4be35788dd978b2665033f077.zip
op-kernel-dev-d05a33ac265c62d4be35788dd978b2665033f077.tar.gz
btrfs: Replace fs_info->rmw_workers workqueue with btrfs_workqueue.
Replace the fs_info->rmw_workers with the newly created btrfs_workqueue. Signed-off-by: Qu Wenruo <quwenruo@cn.fujitsu.com> Tested-by: David Sterba <dsterba@suse.cz> Signed-off-by: Josef Bacik <jbacik@fb.com>
Diffstat (limited to 'fs/btrfs/raid56.c')
-rw-r--r--fs/btrfs/raid56.c35
1 files changed, 16 insertions, 19 deletions
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index 24ac218..5afa564 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -87,7 +87,7 @@ struct btrfs_raid_bio {
/*
* for scheduling work in the helper threads
*/
- struct btrfs_work work;
+ struct btrfs_work_struct work;
/*
* bio list and bio_list_lock are used
@@ -166,8 +166,8 @@ struct btrfs_raid_bio {
static int __raid56_parity_recover(struct btrfs_raid_bio *rbio);
static noinline void finish_rmw(struct btrfs_raid_bio *rbio);
-static void rmw_work(struct btrfs_work *work);
-static void read_rebuild_work(struct btrfs_work *work);
+static void rmw_work(struct btrfs_work_struct *work);
+static void read_rebuild_work(struct btrfs_work_struct *work);
static void async_rmw_stripe(struct btrfs_raid_bio *rbio);
static void async_read_rebuild(struct btrfs_raid_bio *rbio);
static int fail_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio);
@@ -1416,20 +1416,18 @@ cleanup:
static void async_rmw_stripe(struct btrfs_raid_bio *rbio)
{
- rbio->work.flags = 0;
- rbio->work.func = rmw_work;
+ btrfs_init_work(&rbio->work, rmw_work, NULL, NULL);
- btrfs_queue_worker(&rbio->fs_info->rmw_workers,
- &rbio->work);
+ btrfs_queue_work(rbio->fs_info->rmw_workers,
+ &rbio->work);
}
static void async_read_rebuild(struct btrfs_raid_bio *rbio)
{
- rbio->work.flags = 0;
- rbio->work.func = read_rebuild_work;
+ btrfs_init_work(&rbio->work, read_rebuild_work, NULL, NULL);
- btrfs_queue_worker(&rbio->fs_info->rmw_workers,
- &rbio->work);
+ btrfs_queue_work(rbio->fs_info->rmw_workers,
+ &rbio->work);
}
/*
@@ -1590,7 +1588,7 @@ struct btrfs_plug_cb {
struct blk_plug_cb cb;
struct btrfs_fs_info *info;
struct list_head rbio_list;
- struct btrfs_work work;
+ struct btrfs_work_struct work;
};
/*
@@ -1654,7 +1652,7 @@ static void run_plug(struct btrfs_plug_cb *plug)
* if the unplug comes from schedule, we have to push the
* work off to a helper thread
*/
-static void unplug_work(struct btrfs_work *work)
+static void unplug_work(struct btrfs_work_struct *work)
{
struct btrfs_plug_cb *plug;
plug = container_of(work, struct btrfs_plug_cb, work);
@@ -1667,10 +1665,9 @@ static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
plug = container_of(cb, struct btrfs_plug_cb, cb);
if (from_schedule) {
- plug->work.flags = 0;
- plug->work.func = unplug_work;
- btrfs_queue_worker(&plug->info->rmw_workers,
- &plug->work);
+ btrfs_init_work(&plug->work, unplug_work, NULL, NULL);
+ btrfs_queue_work(plug->info->rmw_workers,
+ &plug->work);
return;
}
run_plug(plug);
@@ -2082,7 +2079,7 @@ int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
}
-static void rmw_work(struct btrfs_work *work)
+static void rmw_work(struct btrfs_work_struct *work)
{
struct btrfs_raid_bio *rbio;
@@ -2090,7 +2087,7 @@ static void rmw_work(struct btrfs_work *work)
raid56_rmw_stripe(rbio);
}
-static void read_rebuild_work(struct btrfs_work *work)
+static void read_rebuild_work(struct btrfs_work_struct *work)
{
struct btrfs_raid_bio *rbio;
OpenPOWER on IntegriCloud