summaryrefslogtreecommitdiffstats
path: root/drivers/block/rbd.c
diff options
context:
space:
mode:
authorDongsheng Yang <dongsheng.yang@easystack.cn>2018-03-26 10:22:55 -0400
committerIlya Dryomov <idryomov@gmail.com>2018-04-16 09:38:40 +0200
commit34f55d0b3a0a39c95134c0c89173893b846d4c80 (patch)
treec4421bd5aa961599e55a35a0151f18b528c9f8d6 /drivers/block/rbd.c
parent2f18d46683cb3047c41229d57cf7c6e2ee48676f (diff)
downloadop-kernel-dev-34f55d0b3a0a39c95134c0c89173893b846d4c80.zip
op-kernel-dev-34f55d0b3a0a39c95134c0c89173893b846d4c80.tar.gz
rbd: support timeout in rbd_wait_state_locked()
currently, the rbd_wait_state_locked() will wait forever if we can't get our state locked. Example: rbd map --exclusive test1 --> /dev/rbd0 rbd map test1 --> /dev/rbd1 dd if=/dev/zero of=/dev/rbd1 bs=1M count=1 --> IO blocked To avoid this problem, this patch introduce a timeout design in rbd_wait_state_locked(). Then rbd_wait_state_locked() will return error when we reach a timeout. This patch allow user to set the lock_timeout in rbd mapping. Signed-off-by: Dongsheng Yang <dongsheng.yang@easystack.cn> Reviewed-by: Ilya Dryomov <idryomov@gmail.com> Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
Diffstat (limited to 'drivers/block/rbd.c')
-rw-r--r--drivers/block/rbd.c22
1 files changed, 21 insertions, 1 deletions
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index f4b1b91..d5a5149 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -732,6 +732,7 @@ static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
*/
enum {
Opt_queue_depth,
+ Opt_lock_timeout,
Opt_last_int,
/* int args above */
Opt_last_string,
@@ -745,6 +746,7 @@ enum {
static match_table_t rbd_opts_tokens = {
{Opt_queue_depth, "queue_depth=%d"},
+ {Opt_lock_timeout, "lock_timeout=%d"},
/* int args above */
/* string args above */
{Opt_read_only, "read_only"},
@@ -758,12 +760,14 @@ static match_table_t rbd_opts_tokens = {
struct rbd_options {
int queue_depth;
+ unsigned long lock_timeout;
bool read_only;
bool lock_on_read;
bool exclusive;
};
#define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_MAX_RQ
+#define RBD_LOCK_TIMEOUT_DEFAULT 0 /* no timeout */
#define RBD_READ_ONLY_DEFAULT false
#define RBD_LOCK_ON_READ_DEFAULT false
#define RBD_EXCLUSIVE_DEFAULT false
@@ -796,6 +800,14 @@ static int parse_rbd_opts_token(char *c, void *private)
}
rbd_opts->queue_depth = intval;
break;
+ case Opt_lock_timeout:
+ /* 0 is "wait forever" (i.e. infinite timeout) */
+ if (intval < 0 || intval > INT_MAX / 1000) {
+ pr_err("lock_timeout out of range\n");
+ return -EINVAL;
+ }
+ rbd_opts->lock_timeout = msecs_to_jiffies(intval * 1000);
+ break;
case Opt_read_only:
rbd_opts->read_only = true;
break;
@@ -3536,6 +3548,7 @@ static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
static int rbd_wait_state_locked(struct rbd_device *rbd_dev, bool may_acquire)
{
DEFINE_WAIT(wait);
+ unsigned long timeout;
int ret = 0;
if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags))
@@ -3559,12 +3572,18 @@ static int rbd_wait_state_locked(struct rbd_device *rbd_dev, bool may_acquire)
prepare_to_wait_exclusive(&rbd_dev->lock_waitq, &wait,
TASK_UNINTERRUPTIBLE);
up_read(&rbd_dev->lock_rwsem);
- schedule();
+ timeout = schedule_timeout(ceph_timeout_jiffies(
+ rbd_dev->opts->lock_timeout));
down_read(&rbd_dev->lock_rwsem);
if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) {
ret = -EBLACKLISTED;
break;
}
+ if (!timeout) {
+ rbd_warn(rbd_dev, "timed out waiting for lock");
+ ret = -ETIMEDOUT;
+ break;
+ }
} while (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED);
finish_wait(&rbd_dev->lock_waitq, &wait);
@@ -5186,6 +5205,7 @@ static int rbd_add_parse_args(const char *buf,
rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
rbd_opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT;
+ rbd_opts->lock_timeout = RBD_LOCK_TIMEOUT_DEFAULT;
rbd_opts->lock_on_read = RBD_LOCK_ON_READ_DEFAULT;
rbd_opts->exclusive = RBD_EXCLUSIVE_DEFAULT;
OpenPOWER on IntegriCloud