summaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorJoe Thornber <ejt@redhat.com>2015-05-29 10:20:56 +0100
committerMike Snitzer <snitzer@redhat.com>2015-06-11 17:13:01 -0400
commitfba10109a45d864bab98ae90dd63bcc2789352b3 (patch)
tree9cfe1ba112421a7b18515d7854836a1b76aacb0b /drivers/md
parentb61d9509628fea995196a96b4c1713fa67dade88 (diff)
downloadop-kernel-dev-fba10109a45d864bab98ae90dd63bcc2789352b3.zip
op-kernel-dev-fba10109a45d864bab98ae90dd63bcc2789352b3.tar.gz
dm cache: age and write back cache entries even without active IO
The policy tick() method is normally called from interrupt context. Both the mq and smq policies do some bottom half work for the tick method in their map functions. However if no IO is going through the cache, then that bottom half work doesn't occur. With these policies this means recently hit entries do not age and do not get written back as early as we'd like. Fix this by introducing a new 'can_block' parameter to the tick() method. When this is set the bottom half work occurs immediately. 'can_block' is set when the tick method is called every second by the core target (not in interrupt context). Signed-off-by: Joe Thornber <ejt@redhat.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/dm-cache-policy-internal.h4
-rw-r--r--drivers/md/dm-cache-policy-mq.c8
-rw-r--r--drivers/md/dm-cache-policy-smq.c8
-rw-r--r--drivers/md/dm-cache-policy.h4
-rw-r--r--drivers/md/dm-cache-target.c4
5 files changed, 20 insertions, 8 deletions
diff --git a/drivers/md/dm-cache-policy-internal.h b/drivers/md/dm-cache-policy-internal.h
index ccbe852..2816018 100644
--- a/drivers/md/dm-cache-policy-internal.h
+++ b/drivers/md/dm-cache-policy-internal.h
@@ -83,10 +83,10 @@ static inline dm_cblock_t policy_residency(struct dm_cache_policy *p)
return p->residency(p);
}
-static inline void policy_tick(struct dm_cache_policy *p)
+static inline void policy_tick(struct dm_cache_policy *p, bool can_block)
{
if (p->tick)
- return p->tick(p);
+ return p->tick(p, can_block);
}
static inline int policy_emit_config_values(struct dm_cache_policy *p, char *result,
diff --git a/drivers/md/dm-cache-policy-mq.c b/drivers/md/dm-cache-policy-mq.c
index 084eec6..838665b 100644
--- a/drivers/md/dm-cache-policy-mq.c
+++ b/drivers/md/dm-cache-policy-mq.c
@@ -1283,7 +1283,7 @@ static dm_cblock_t mq_residency(struct dm_cache_policy *p)
return r;
}
-static void mq_tick(struct dm_cache_policy *p)
+static void mq_tick(struct dm_cache_policy *p, bool can_block)
{
struct mq_policy *mq = to_mq_policy(p);
unsigned long flags;
@@ -1291,6 +1291,12 @@ static void mq_tick(struct dm_cache_policy *p)
spin_lock_irqsave(&mq->tick_lock, flags);
mq->tick_protected++;
spin_unlock_irqrestore(&mq->tick_lock, flags);
+
+ if (can_block) {
+ mutex_lock(&mq->lock);
+ copy_tick(mq);
+ mutex_unlock(&mq->lock);
+ }
}
static int mq_set_config_value(struct dm_cache_policy *p,
diff --git a/drivers/md/dm-cache-policy-smq.c b/drivers/md/dm-cache-policy-smq.c
index 55a657f..66feb30 100644
--- a/drivers/md/dm-cache-policy-smq.c
+++ b/drivers/md/dm-cache-policy-smq.c
@@ -1581,7 +1581,7 @@ static dm_cblock_t smq_residency(struct dm_cache_policy *p)
return r;
}
-static void smq_tick(struct dm_cache_policy *p)
+static void smq_tick(struct dm_cache_policy *p, bool can_block)
{
struct smq_policy *mq = to_smq_policy(p);
unsigned long flags;
@@ -1589,6 +1589,12 @@ static void smq_tick(struct dm_cache_policy *p)
spin_lock_irqsave(&mq->tick_lock, flags);
mq->tick_protected++;
spin_unlock_irqrestore(&mq->tick_lock, flags);
+
+ if (can_block) {
+ mutex_lock(&mq->lock);
+ copy_tick(mq);
+ mutex_unlock(&mq->lock);
+ }
}
/* Init the policy plugin interface function pointers. */
diff --git a/drivers/md/dm-cache-policy.h b/drivers/md/dm-cache-policy.h
index 7470912..05db56e 100644
--- a/drivers/md/dm-cache-policy.h
+++ b/drivers/md/dm-cache-policy.h
@@ -200,10 +200,10 @@ struct dm_cache_policy {
* Because of where we sit in the block layer, we can be asked to
* map a lot of little bios that are all in the same block (no
* queue merging has occurred). To stop the policy being fooled by
- * these the core target sends regular tick() calls to the policy.
+ * these, the core target sends regular tick() calls to the policy.
* The policy should only count an entry as hit once per tick.
*/
- void (*tick)(struct dm_cache_policy *p);
+ void (*tick)(struct dm_cache_policy *p, bool can_block);
/*
* Configuration.
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 5aad875..1b4e175 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -2271,7 +2271,7 @@ static void do_worker(struct work_struct *ws)
static void do_waker(struct work_struct *ws)
{
struct cache *cache = container_of(to_delayed_work(ws), struct cache, waker);
- policy_tick(cache->policy);
+ policy_tick(cache->policy, true);
wake_worker(cache);
queue_delayed_work(cache->wq, &cache->waker, COMMIT_PERIOD);
}
@@ -3148,7 +3148,7 @@ static int cache_end_io(struct dm_target *ti, struct bio *bio, int error)
struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
if (pb->tick) {
- policy_tick(cache->policy);
+ policy_tick(cache->policy, false);
spin_lock_irqsave(&cache->lock, flags);
cache->need_tick_bio = true;
OpenPOWER on IntegriCloud