summaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorKent Overstreet <kmo@daterainc.com>2013-11-01 18:03:08 -0700
committerKent Overstreet <kmo@daterainc.com>2013-11-10 21:56:10 -0800
commitfaadf0c96547ec8277ad0abd6959f2ef48522f31 (patch)
treef75304e875f24f583bd28c229d8d6f82290f55d2 /drivers/md
parentb54d6934da7857f87b092df9b77dc1f42818ba94 (diff)
downloadop-kernel-dev-faadf0c96547ec8277ad0abd6959f2ef48522f31.zip
op-kernel-dev-faadf0c96547ec8277ad0abd6959f2ef48522f31.tar.gz
bcache: Drop some closure stuff
With a the recent bcache refactoring, some of the closure code isn't needed anymore. Signed-off-by: Kent Overstreet <kmo@daterainc.com>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/bcache/btree.c6
-rw-r--r--drivers/md/bcache/closure.c103
-rw-r--r--drivers/md/bcache/closure.h181
3 files changed, 40 insertions, 250 deletions
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index cb1a490..3e0c901 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -407,7 +407,7 @@ static void do_btree_node_write(struct btree *b)
b->bio = bch_bbio_alloc(b->c);
b->bio->bi_end_io = btree_node_write_endio;
- b->bio->bi_private = &b->io.cl;
+ b->bio->bi_private = cl;
b->bio->bi_rw = REQ_META|WRITE_SYNC|REQ_FUA;
b->bio->bi_size = set_blocks(i, b->c) * block_bytes(b->c);
bch_bio_map(b->bio, i);
@@ -672,8 +672,8 @@ static int mca_reap(struct btree *b, unsigned min_order, bool flush)
}
/* wait for any in flight btree write */
- closure_wait_event_sync(&b->io.wait, &cl,
- atomic_read(&b->io.cl.remaining) == -1);
+ closure_wait_event(&b->io.wait, &cl,
+ atomic_read(&b->io.cl.remaining) == -1);
return 0;
}
diff --git a/drivers/md/bcache/closure.c b/drivers/md/bcache/closure.c
index 9aba201..dfff241 100644
--- a/drivers/md/bcache/closure.c
+++ b/drivers/md/bcache/closure.c
@@ -11,17 +11,6 @@
#include "closure.h"
-void closure_queue(struct closure *cl)
-{
- struct workqueue_struct *wq = cl->wq;
- if (wq) {
- INIT_WORK(&cl->work, cl->work.func);
- BUG_ON(!queue_work(wq, &cl->work));
- } else
- cl->fn(cl);
-}
-EXPORT_SYMBOL_GPL(closure_queue);
-
#define CL_FIELD(type, field) \
case TYPE_ ## type: \
return &container_of(cl, struct type, cl)->field
@@ -30,17 +19,6 @@ static struct closure_waitlist *closure_waitlist(struct closure *cl)
{
switch (cl->type) {
CL_FIELD(closure_with_waitlist, wait);
- CL_FIELD(closure_with_waitlist_and_timer, wait);
- default:
- return NULL;
- }
-}
-
-static struct timer_list *closure_timer(struct closure *cl)
-{
- switch (cl->type) {
- CL_FIELD(closure_with_timer, timer);
- CL_FIELD(closure_with_waitlist_and_timer, timer);
default:
return NULL;
}
@@ -51,7 +29,7 @@ static inline void closure_put_after_sub(struct closure *cl, int flags)
int r = flags & CLOSURE_REMAINING_MASK;
BUG_ON(flags & CLOSURE_GUARD_MASK);
- BUG_ON(!r && (flags & ~(CLOSURE_DESTRUCTOR|CLOSURE_BLOCKING)));
+ BUG_ON(!r && (flags & ~CLOSURE_DESTRUCTOR));
/* Must deliver precisely one wakeup */
if (r == 1 && (flags & CLOSURE_SLEEPING))
@@ -59,7 +37,6 @@ static inline void closure_put_after_sub(struct closure *cl, int flags)
if (!r) {
if (cl->fn && !(flags & CLOSURE_DESTRUCTOR)) {
- /* CLOSURE_BLOCKING might be set - clear it */
atomic_set(&cl->remaining,
CLOSURE_REMAINING_INITIALIZER);
closure_queue(cl);
@@ -90,13 +67,13 @@ void closure_sub(struct closure *cl, int v)
{
closure_put_after_sub(cl, atomic_sub_return(v, &cl->remaining));
}
-EXPORT_SYMBOL_GPL(closure_sub);
+EXPORT_SYMBOL(closure_sub);
void closure_put(struct closure *cl)
{
closure_put_after_sub(cl, atomic_dec_return(&cl->remaining));
}
-EXPORT_SYMBOL_GPL(closure_put);
+EXPORT_SYMBOL(closure_put);
static void set_waiting(struct closure *cl, unsigned long f)
{
@@ -133,7 +110,7 @@ void __closure_wake_up(struct closure_waitlist *wait_list)
closure_sub(cl, CLOSURE_WAITING + 1);
}
}
-EXPORT_SYMBOL_GPL(__closure_wake_up);
+EXPORT_SYMBOL(__closure_wake_up);
bool closure_wait(struct closure_waitlist *list, struct closure *cl)
{
@@ -146,7 +123,7 @@ bool closure_wait(struct closure_waitlist *list, struct closure *cl)
return true;
}
-EXPORT_SYMBOL_GPL(closure_wait);
+EXPORT_SYMBOL(closure_wait);
/**
* closure_sync() - sleep until a closure a closure has nothing left to wait on
@@ -169,7 +146,7 @@ void closure_sync(struct closure *cl)
__closure_end_sleep(cl);
}
-EXPORT_SYMBOL_GPL(closure_sync);
+EXPORT_SYMBOL(closure_sync);
/**
* closure_trylock() - try to acquire the closure, without waiting
@@ -183,17 +160,17 @@ bool closure_trylock(struct closure *cl, struct closure *parent)
CLOSURE_REMAINING_INITIALIZER) != -1)
return false;
- closure_set_ret_ip(cl);
-
smp_mb();
+
cl->parent = parent;
if (parent)
closure_get(parent);
+ closure_set_ret_ip(cl);
closure_debug_create(cl);
return true;
}
-EXPORT_SYMBOL_GPL(closure_trylock);
+EXPORT_SYMBOL(closure_trylock);
void __closure_lock(struct closure *cl, struct closure *parent,
struct closure_waitlist *wait_list)
@@ -205,57 +182,11 @@ void __closure_lock(struct closure *cl, struct closure *parent,
if (closure_trylock(cl, parent))
return;
- closure_wait_event_sync(wait_list, &wait,
- atomic_read(&cl->remaining) == -1);
+ closure_wait_event(wait_list, &wait,
+ atomic_read(&cl->remaining) == -1);
}
}
-EXPORT_SYMBOL_GPL(__closure_lock);
-
-static void closure_delay_timer_fn(unsigned long data)
-{
- struct closure *cl = (struct closure *) data;
- closure_sub(cl, CLOSURE_TIMER + 1);
-}
-
-void do_closure_timer_init(struct closure *cl)
-{
- struct timer_list *timer = closure_timer(cl);
-
- init_timer(timer);
- timer->data = (unsigned long) cl;
- timer->function = closure_delay_timer_fn;
-}
-EXPORT_SYMBOL_GPL(do_closure_timer_init);
-
-bool __closure_delay(struct closure *cl, unsigned long delay,
- struct timer_list *timer)
-{
- if (atomic_read(&cl->remaining) & CLOSURE_TIMER)
- return false;
-
- BUG_ON(timer_pending(timer));
-
- timer->expires = jiffies + delay;
-
- atomic_add(CLOSURE_TIMER + 1, &cl->remaining);
- add_timer(timer);
- return true;
-}
-EXPORT_SYMBOL_GPL(__closure_delay);
-
-void __closure_flush(struct closure *cl, struct timer_list *timer)
-{
- if (del_timer(timer))
- closure_sub(cl, CLOSURE_TIMER + 1);
-}
-EXPORT_SYMBOL_GPL(__closure_flush);
-
-void __closure_flush_sync(struct closure *cl, struct timer_list *timer)
-{
- if (del_timer_sync(timer))
- closure_sub(cl, CLOSURE_TIMER + 1);
-}
-EXPORT_SYMBOL_GPL(__closure_flush_sync);
+EXPORT_SYMBOL(__closure_lock);
#ifdef CONFIG_BCACHE_CLOSURES_DEBUG
@@ -273,7 +204,7 @@ void closure_debug_create(struct closure *cl)
list_add(&cl->all, &closure_list);
spin_unlock_irqrestore(&closure_list_lock, flags);
}
-EXPORT_SYMBOL_GPL(closure_debug_create);
+EXPORT_SYMBOL(closure_debug_create);
void closure_debug_destroy(struct closure *cl)
{
@@ -286,7 +217,7 @@ void closure_debug_destroy(struct closure *cl)
list_del(&cl->all);
spin_unlock_irqrestore(&closure_list_lock, flags);
}
-EXPORT_SYMBOL_GPL(closure_debug_destroy);
+EXPORT_SYMBOL(closure_debug_destroy);
static struct dentry *debug;
@@ -304,14 +235,12 @@ static int debug_seq_show(struct seq_file *f, void *data)
cl, (void *) cl->ip, cl->fn, cl->parent,
r & CLOSURE_REMAINING_MASK);
- seq_printf(f, "%s%s%s%s%s%s\n",
+ seq_printf(f, "%s%s%s%s\n",
test_bit(WORK_STRUCT_PENDING,
work_data_bits(&cl->work)) ? "Q" : "",
r & CLOSURE_RUNNING ? "R" : "",
- r & CLOSURE_BLOCKING ? "B" : "",
r & CLOSURE_STACK ? "S" : "",
- r & CLOSURE_SLEEPING ? "Sl" : "",
- r & CLOSURE_TIMER ? "T" : "");
+ r & CLOSURE_SLEEPING ? "Sl" : "");
if (r & CLOSURE_WAITING)
seq_printf(f, " W %pF\n",
diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h
index ab011f0..9762f1b 100644
--- a/drivers/md/bcache/closure.h
+++ b/drivers/md/bcache/closure.h
@@ -155,21 +155,6 @@
* delayed_work embeds a work item and a timer_list. The important thing is, use
* it exactly like you would a regular closure and closure_put() will magically
* handle everything for you.
- *
- * We've got closures that embed timers, too. They're called, appropriately
- * enough:
- * struct closure_with_timer;
- *
- * This gives you access to closure_delay(). It takes a refcount for a specified
- * number of jiffies - you could then call closure_sync() (for a slightly
- * convoluted version of msleep()) or continue_at() - which gives you the same
- * effect as using a delayed work item, except you can reuse the work_struct
- * already embedded in struct closure.
- *
- * Lastly, there's struct closure_with_waitlist_and_timer. It does what you
- * probably expect, if you happen to need the features of both. (You don't
- * really want to know how all this is implemented, but if I've done my job
- * right you shouldn't have to care).
*/
struct closure;
@@ -182,16 +167,11 @@ struct closure_waitlist {
enum closure_type {
TYPE_closure = 0,
TYPE_closure_with_waitlist = 1,
- TYPE_closure_with_timer = 2,
- TYPE_closure_with_waitlist_and_timer = 3,
- MAX_CLOSURE_TYPE = 3,
+ MAX_CLOSURE_TYPE = 1,
};
enum closure_state {
/*
- * CLOSURE_BLOCKING: Causes closure_wait_event() to block, instead of
- * waiting asynchronously
- *
* CLOSURE_WAITING: Set iff the closure is on a waitlist. Must be set by
* the thread that owns the closure, and cleared by the thread that's
* waking up the closure.
@@ -200,10 +180,6 @@ enum closure_state {
* - indicates that cl->task is valid and closure_put() may wake it up.
* Only set or cleared by the thread that owns the closure.
*
- * CLOSURE_TIMER: Analagous to CLOSURE_WAITING, indicates that a closure
- * has an outstanding timer. Must be set by the thread that owns the
- * closure, and cleared by the timer function when the timer goes off.
- *
* The rest are for debugging and don't affect behaviour:
*
* CLOSURE_RUNNING: Set when a closure is running (i.e. by
@@ -218,19 +194,17 @@ enum closure_state {
* closure with this flag set
*/
- CLOSURE_BITS_START = (1 << 19),
- CLOSURE_DESTRUCTOR = (1 << 19),
- CLOSURE_BLOCKING = (1 << 21),
- CLOSURE_WAITING = (1 << 23),
- CLOSURE_SLEEPING = (1 << 25),
- CLOSURE_TIMER = (1 << 27),
+ CLOSURE_BITS_START = (1 << 23),
+ CLOSURE_DESTRUCTOR = (1 << 23),
+ CLOSURE_WAITING = (1 << 25),
+ CLOSURE_SLEEPING = (1 << 27),
CLOSURE_RUNNING = (1 << 29),
CLOSURE_STACK = (1 << 31),
};
#define CLOSURE_GUARD_MASK \
- ((CLOSURE_DESTRUCTOR|CLOSURE_BLOCKING|CLOSURE_WAITING| \
- CLOSURE_SLEEPING|CLOSURE_TIMER|CLOSURE_RUNNING|CLOSURE_STACK) << 1)
+ ((CLOSURE_DESTRUCTOR|CLOSURE_WAITING|CLOSURE_SLEEPING| \
+ CLOSURE_RUNNING|CLOSURE_STACK) << 1)
#define CLOSURE_REMAINING_MASK (CLOSURE_BITS_START - 1)
#define CLOSURE_REMAINING_INITIALIZER (1|CLOSURE_RUNNING)
@@ -268,17 +242,6 @@ struct closure_with_waitlist {
struct closure_waitlist wait;
};
-struct closure_with_timer {
- struct closure cl;
- struct timer_list timer;
-};
-
-struct closure_with_waitlist_and_timer {
- struct closure cl;
- struct closure_waitlist wait;
- struct timer_list timer;
-};
-
extern unsigned invalid_closure_type(void);
#define __CLOSURE_TYPE(cl, _t) \
@@ -289,14 +252,11 @@ extern unsigned invalid_closure_type(void);
( \
__CLOSURE_TYPE(cl, closure) \
__CLOSURE_TYPE(cl, closure_with_waitlist) \
- __CLOSURE_TYPE(cl, closure_with_timer) \
- __CLOSURE_TYPE(cl, closure_with_waitlist_and_timer) \
invalid_closure_type() \
)
void closure_sub(struct closure *cl, int v);
void closure_put(struct closure *cl);
-void closure_queue(struct closure *cl);
void __closure_wake_up(struct closure_waitlist *list);
bool closure_wait(struct closure_waitlist *list, struct closure *cl);
void closure_sync(struct closure *cl);
@@ -305,12 +265,6 @@ bool closure_trylock(struct closure *cl, struct closure *parent);
void __closure_lock(struct closure *cl, struct closure *parent,
struct closure_waitlist *wait_list);
-void do_closure_timer_init(struct closure *cl);
-bool __closure_delay(struct closure *cl, unsigned long delay,
- struct timer_list *timer);
-void __closure_flush(struct closure *cl, struct timer_list *timer);
-void __closure_flush_sync(struct closure *cl, struct timer_list *timer);
-
#ifdef CONFIG_BCACHE_CLOSURES_DEBUG
void closure_debug_init(void);
@@ -354,11 +308,6 @@ static inline void closure_set_stopped(struct closure *cl)
atomic_sub(CLOSURE_RUNNING, &cl->remaining);
}
-static inline bool closure_is_stopped(struct closure *cl)
-{
- return !(atomic_read(&cl->remaining) & CLOSURE_RUNNING);
-}
-
static inline bool closure_is_unlocked(struct closure *cl)
{
return atomic_read(&cl->remaining) == -1;
@@ -367,14 +316,6 @@ static inline bool closure_is_unlocked(struct closure *cl)
static inline void do_closure_init(struct closure *cl, struct closure *parent,
bool running)
{
- switch (cl->type) {
- case TYPE_closure_with_timer:
- case TYPE_closure_with_waitlist_and_timer:
- do_closure_timer_init(cl);
- default:
- break;
- }
-
cl->parent = parent;
if (parent)
closure_get(parent);
@@ -429,8 +370,7 @@ do { \
static inline void closure_init_stack(struct closure *cl)
{
memset(cl, 0, sizeof(struct closure));
- atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER|
- CLOSURE_BLOCKING|CLOSURE_STACK);
+ atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER|CLOSURE_STACK);
}
/**
@@ -461,24 +401,6 @@ do { \
#define closure_lock(cl, parent) \
__closure_lock(__to_internal_closure(cl), parent, &(cl)->wait)
-/**
- * closure_delay() - delay some number of jiffies
- * @cl: the closure that will sleep
- * @delay: the delay in jiffies
- *
- * Takes a refcount on @cl which will be released after @delay jiffies; this may
- * be used to have a function run after a delay with continue_at(), or
- * closure_sync() may be used for a convoluted version of msleep().
- */
-#define closure_delay(cl, delay) \
- __closure_delay(__to_internal_closure(cl), delay, &(cl)->timer)
-
-#define closure_flush(cl) \
- __closure_flush(__to_internal_closure(cl), &(cl)->timer)
-
-#define closure_flush_sync(cl) \
- __closure_flush_sync(__to_internal_closure(cl), &(cl)->timer)
-
static inline void __closure_end_sleep(struct closure *cl)
{
__set_current_state(TASK_RUNNING);
@@ -498,40 +420,6 @@ static inline void __closure_start_sleep(struct closure *cl)
}
/**
- * closure_blocking() - returns true if the closure is in blocking mode.
- *
- * If a closure is in blocking mode, closure_wait_event() will sleep until the
- * condition is true instead of waiting asynchronously.
- */
-static inline bool closure_blocking(struct closure *cl)
-{
- return atomic_read(&cl->remaining) & CLOSURE_BLOCKING;
-}
-
-/**
- * set_closure_blocking() - put a closure in blocking mode.
- *
- * If a closure is in blocking mode, closure_wait_event() will sleep until the
- * condition is true instead of waiting asynchronously.
- *
- * Not thread safe - can only be called by the thread running the closure.
- */
-static inline void set_closure_blocking(struct closure *cl)
-{
- if (!closure_blocking(cl))
- atomic_add(CLOSURE_BLOCKING, &cl->remaining);
-}
-
-/*
- * Not thread safe - can only be called by the thread running the closure.
- */
-static inline void clear_closure_blocking(struct closure *cl)
-{
- if (closure_blocking(cl))
- atomic_sub(CLOSURE_BLOCKING, &cl->remaining);
-}
-
-/**
* closure_wake_up() - wake up all closures on a wait list.
*/
static inline void closure_wake_up(struct closure_waitlist *list)
@@ -561,63 +449,36 @@ static inline void closure_wake_up(struct closure_waitlist *list)
* refcount on our closure. If this was a stack allocated closure, that would be
* bad.
*/
-#define __closure_wait_event(list, cl, condition, _block) \
+#define closure_wait_event(list, cl, condition) \
({ \
- bool block = _block; \
typeof(condition) ret; \
\
while (1) { \
ret = (condition); \
if (ret) { \
__closure_wake_up(list); \
- if (block) \
- closure_sync(cl); \
- \
+ closure_sync(cl); \
break; \
} \
\
- if (block) \
- __closure_start_sleep(cl); \
- \
- if (!closure_wait(list, cl)) { \
- if (!block) \
- break; \
+ __closure_start_sleep(cl); \
\
+ if (!closure_wait(list, cl)) \
schedule(); \
- } \
} \
\
ret; \
})
-/**
- * closure_wait_event() - wait on a condition, synchronously or asynchronously.
- * @list: the wait list to wait on
- * @cl: the closure that is doing the waiting
- * @condition: a C expression for the event to wait for
- *
- * If the closure is in blocking mode, sleeps until the @condition evaluates to
- * true - exactly like wait_event().
- *
- * If the closure is not in blocking mode, waits asynchronously; if the
- * condition is currently false the @cl is put onto @list and returns. @list
- * owns a refcount on @cl; closure_sync() or continue_at() may be used later to
- * wait for another thread to wake up @list, which drops the refcount on @cl.
- *
- * Returns the value of @condition; @cl will be on @list iff @condition was
- * false.
- *
- * closure_wake_up(@list) must be called after changing any variable that could
- * cause @condition to become true.
- */
-#define closure_wait_event(list, cl, condition) \
- __closure_wait_event(list, cl, condition, closure_blocking(cl))
-
-#define closure_wait_event_async(list, cl, condition) \
- __closure_wait_event(list, cl, condition, false)
-
-#define closure_wait_event_sync(list, cl, condition) \
- __closure_wait_event(list, cl, condition, true)
+static inline void closure_queue(struct closure *cl)
+{
+ struct workqueue_struct *wq = cl->wq;
+ if (wq) {
+ INIT_WORK(&cl->work, cl->work.func);
+ BUG_ON(!queue_work(wq, &cl->work));
+ } else
+ cl->fn(cl);
+}
static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
struct workqueue_struct *wq)
OpenPOWER on IntegriCloud