summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorAlex Elder <elder@inktank.com>2012-10-08 20:37:30 -0700
committerAlex Elder <elder@inktank.com>2012-10-09 22:00:44 -0700
commit802c6d967fbdcd2cbc91b917425661bb8bbfaade (patch)
treea5a64ac537fb69ecb1edd55db8346089fecb9f84 /net
parent8618e30bc14b06bfafa0f164cca7b0e06451f88a (diff)
downloadop-kernel-dev-802c6d967fbdcd2cbc91b917425661bb8bbfaade.zip
op-kernel-dev-802c6d967fbdcd2cbc91b917425661bb8bbfaade.tar.gz
rbd: define common queue_con_delay()
This patch defines a single function, queue_con_delay() to call queue_delayed_work() for a connection. It basically generalizes what was previously queue_con() by adding the delay argument. queue_con() is now a simple helper that passes 0 for its delay. queue_con_delay() returns 0 if it queued work or an errno if it did not for some reason. If con_work() finds the BACKOFF flag set for a connection, it now calls queue_con_delay() to handle arranging to start again after a delay. Note about connection reference counts: con_work() only ever gets called as a work item function. At the time that work is scheduled, a reference to the connection is acquired, and the corresponding con_work() call is then responsible for dropping that reference before it returns. Previously, the backoff handling inside con_work() silently handed off its reference to delayed work it scheduled. Now that queue_con_delay() is used, a new reference is acquired for the newly-scheduled work, and the original reference is dropped by the con->ops->put() call at the end of the function. Signed-off-by: Alex Elder <elder@inktank.com> Reviewed-by: Sage Weil <sage@inktank.com>
Diffstat (limited to 'net')
-rw-r--r--net/ceph/messenger.c38
1 files changed, 23 insertions, 15 deletions
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 973c16c..66f6f56 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -2244,22 +2244,33 @@ bad_tag:
/*
- * Atomically queue work on a connection. Bump @con reference to
- * avoid races with connection teardown.
+ * Atomically queue work on a connection after the specified delay.
+ * Bump @con reference to avoid races with connection teardown.
+ * Returns 0 if work was queued, or an error code otherwise.
*/
-static void queue_con(struct ceph_connection *con)
+static int queue_con_delay(struct ceph_connection *con, unsigned long delay)
{
if (!con->ops->get(con)) {
- dout("queue_con %p ref count 0\n", con);
- return;
+ dout("%s %p ref count 0\n", __func__, con);
+
+ return -ENOENT;
}
- if (!queue_delayed_work(ceph_msgr_wq, &con->work, 0)) {
- dout("queue_con %p - already queued\n", con);
+ if (!queue_delayed_work(ceph_msgr_wq, &con->work, delay)) {
+ dout("%s %p - already queued\n", __func__, con);
con->ops->put(con);
- } else {
- dout("queue_con %p\n", con);
+
+ return -EBUSY;
}
+
+ dout("%s %p %lu\n", __func__, con, delay);
+
+ return 0;
+}
+
+static void queue_con(struct ceph_connection *con)
+{
+ (void) queue_con_delay(con, 0);
}
/*
@@ -2294,14 +2305,11 @@ restart:
if (test_and_clear_bit(CON_FLAG_BACKOFF, &con->flags)) {
dout("con_work %p backing off\n", con);
- if (queue_delayed_work(ceph_msgr_wq, &con->work,
- round_jiffies_relative(con->delay))) {
- dout("con_work %p backoff %lu\n", con, con->delay);
- mutex_unlock(&con->mutex);
- return;
- } else {
+ ret = queue_con_delay(con, round_jiffies_relative(con->delay));
+ if (ret) {
dout("con_work %p FAILED to back off %lu\n", con,
con->delay);
+ BUG_ON(ret == -ENOENT);
set_bit(CON_FLAG_BACKOFF, &con->flags);
}
goto done;
OpenPOWER on IntegriCloud