summaryrefslogtreecommitdiffstats
path: root/net/ceph/osd_client.c
diff options
context:
space:
mode:
authorIlya Dryomov <idryomov@gmail.com>2017-06-15 16:30:55 +0200
committerIlya Dryomov <idryomov@gmail.com>2017-07-07 17:25:16 +0200
commit04c7d789e269c2b82bbd08106049a5a979cdb3fd (patch)
treedb72e960611bfbcffe75d0a5a0efff8642cc3eb6 /net/ceph/osd_client.c
parenta10bcb19ae02cea7d5e6650fbc2de3ced46b4e5d (diff)
downloadop-kernel-dev-04c7d789e269c2b82bbd08106049a5a979cdb3fd.zip
op-kernel-dev-04c7d789e269c2b82bbd08106049a5a979cdb3fd.tar.gz
libceph: make sure need_resend targets reflect latest map
Otherwise we may miss events like PG splits, pool deletions, etc when we get multiple incremental maps at once. Because check_pool_dne() can now be fed an unlinked request, finish_request() needed to be taught to handle unlinked requests. Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
Diffstat (limited to 'net/ceph/osd_client.c')
-rw-r--r--net/ceph/osd_client.c33
1 files changed, 25 insertions, 8 deletions
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 576101b..173ab9c 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -386,6 +386,7 @@ static void target_copy(struct ceph_osd_request_target *dest,
dest->flags = src->flags;
dest->paused = src->paused;
+ dest->epoch = src->epoch;
dest->last_force_resend = src->last_force_resend;
dest->osd = src->osd;
@@ -1334,6 +1335,7 @@ static enum calc_target_result calc_target(struct ceph_osd_client *osdc,
enum calc_target_result ct_res;
int ret;
+ t->epoch = osdc->osdmap->epoch;
pi = ceph_pg_pool_by_id(osdc->osdmap, t->base_oloc.pool);
if (!pi) {
t->osd = CEPH_HOMELESS_OSD;
@@ -1720,10 +1722,11 @@ static void send_request(struct ceph_osd_request *req)
encode_request_partial(req, req->r_request);
- dout("%s req %p tid %llu to pgid %llu.%x spgid %llu.%xs%d osd%d flags 0x%x attempt %d\n",
+ dout("%s req %p tid %llu to pgid %llu.%x spgid %llu.%xs%d osd%d e%u flags 0x%x attempt %d\n",
__func__, req, req->r_tid, req->r_t.pgid.pool, req->r_t.pgid.seed,
req->r_t.spgid.pgid.pool, req->r_t.spgid.pgid.seed,
- req->r_t.spgid.shard, osd->o_osd, req->r_flags, req->r_attempts);
+ req->r_t.spgid.shard, osd->o_osd, req->r_t.epoch, req->r_flags,
+ req->r_attempts);
req->r_t.paused = false;
req->r_stamp = jiffies;
@@ -1863,13 +1866,12 @@ static void submit_request(struct ceph_osd_request *req, bool wrlocked)
static void finish_request(struct ceph_osd_request *req)
{
struct ceph_osd_client *osdc = req->r_osdc;
- struct ceph_osd *osd = req->r_osd;
- verify_osd_locked(osd);
+ WARN_ON(lookup_request_mc(&osdc->map_checks, req->r_tid));
dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
- WARN_ON(lookup_request_mc(&osdc->map_checks, req->r_tid));
- unlink_request(osd, req);
+ if (req->r_osd)
+ unlink_request(req->r_osd, req);
atomic_dec(&osdc->num_requests);
/*
@@ -3356,8 +3358,25 @@ static void kick_requests(struct ceph_osd_client *osdc,
struct list_head *need_resend_linger)
{
struct ceph_osd_linger_request *lreq, *nlreq;
+ enum calc_target_result ct_res;
struct rb_node *n;
+ /* make sure need_resend targets reflect latest map */
+ for (n = rb_first(need_resend); n; ) {
+ struct ceph_osd_request *req =
+ rb_entry(n, struct ceph_osd_request, r_node);
+
+ n = rb_next(n);
+
+ if (req->r_t.epoch < osdc->osdmap->epoch) {
+ ct_res = calc_target(osdc, &req->r_t, NULL, false);
+ if (ct_res == CALC_TARGET_POOL_DNE) {
+ erase_request(need_resend, req);
+ check_pool_dne(req);
+ }
+ }
+ }
+
for (n = rb_first(need_resend); n; ) {
struct ceph_osd_request *req =
rb_entry(n, struct ceph_osd_request, r_node);
@@ -3366,8 +3385,6 @@ static void kick_requests(struct ceph_osd_client *osdc,
n = rb_next(n);
erase_request(need_resend, req); /* before link_request() */
- WARN_ON(req->r_osd);
- calc_target(osdc, &req->r_t, NULL, false);
osd = lookup_create_osd(osdc, req->r_t.osd, true);
link_request(osd, req);
if (!req->r_linger) {
OpenPOWER on IntegriCloud