summaryrefslogtreecommitdiffstats
path: root/drivers/block
diff options
context:
space:
mode:
authorAlex Elder <elder@inktank.com>2013-05-29 11:19:00 -0500
committerSage Weil <sage@inktank.com>2013-07-03 15:32:39 -0700
commit08f75463c15e26e9d67a7c992ce7dd8964c6cbdd (patch)
treeb1ec2df4acc47286381aa0085eeb5c76f03e628d /drivers/block
parent3b5cf2a2f1746a253d56f54ffbb45170c90b1cbd (diff)
downloadop-kernel-dev-08f75463c15e26e9d67a7c992ce7dd8964c6cbdd.zip
op-kernel-dev-08f75463c15e26e9d67a7c992ce7dd8964c6cbdd.tar.gz
rbd: protect against duplicate client creation
If more than one rbd image has the same ceph cluster configuration (same options, same set of monitors, same keys) they normally share a single rbd client. When an image is getting mapped, rbd looks to see if an existing client can be used, and creates a new one if not. The lookup and creation are not done under a common lock though, so mapping two images concurrently could lead to duplicate clients getting set up needlessly. This isn't a major problem, but it's wasteful and different from what's intended. This patch fixes that by using the control mutex to protect both the lookup and (if needed) creation of the client. It was previously used just when creating. This resolves: http://tracker.ceph.com/issues/3094 Signed-off-by: Alex Elder <elder@inktank.com> Reviewed-by: Josh Durgin <josh.durgin@inktank.com>
Diffstat (limited to 'drivers/block')
-rw-r--r--drivers/block/rbd.c17
1 files changed, 7 insertions, 10 deletions
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index d4902c5..fd2795d 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -520,7 +520,7 @@ static const struct block_device_operations rbd_bd_ops = {
/*
* Initialize an rbd client instance. Success or not, this function
- * consumes ceph_opts.
+ * consumes ceph_opts. Caller holds ctl_mutex.
*/
static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
{
@@ -535,30 +535,25 @@ static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
kref_init(&rbdc->kref);
INIT_LIST_HEAD(&rbdc->node);
- mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
-
rbdc->client = ceph_create_client(ceph_opts, rbdc, 0, 0);
if (IS_ERR(rbdc->client))
- goto out_mutex;
+ goto out_rbdc;
ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
ret = ceph_open_session(rbdc->client);
if (ret < 0)
- goto out_err;
+ goto out_client;
spin_lock(&rbd_client_list_lock);
list_add_tail(&rbdc->node, &rbd_client_list);
spin_unlock(&rbd_client_list_lock);
- mutex_unlock(&ctl_mutex);
dout("%s: rbdc %p\n", __func__, rbdc);
return rbdc;
-
-out_err:
+out_client:
ceph_destroy_client(rbdc->client);
-out_mutex:
- mutex_unlock(&ctl_mutex);
+out_rbdc:
kfree(rbdc);
out_opt:
if (ceph_opts)
@@ -682,11 +677,13 @@ static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
{
struct rbd_client *rbdc;
+ mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
rbdc = rbd_client_find(ceph_opts);
if (rbdc) /* using an existing client */
ceph_destroy_options(ceph_opts);
else
rbdc = rbd_client_create(ceph_opts);
+ mutex_unlock(&ctl_mutex);
return rbdc;
}
OpenPOWER on IntegriCloud