diff options
author | Kent Overstreet <kmo@daterainc.com> | 2013-11-11 17:02:31 -0800 |
---|---|---|
committer | Kent Overstreet <kmo@daterainc.com> | 2014-01-08 13:05:14 -0800 |
commit | 829a60b9055c319f3656a01eb8cb78b1b86232ef (patch) | |
tree | d6e709a97b9fc3274ef8de84cb52c2e3e0078807 /drivers/md/bcache/btree.c | |
parent | 89ebb4a28ba9efb5c9b18ba552e784021957b14a (diff) | |
download | op-kernel-dev-829a60b9055c319f3656a01eb8cb78b1b86232ef.zip op-kernel-dev-829a60b9055c319f3656a01eb8cb78b1b86232ef.tar.gz |
bcache: Move insert_fixup() to btree_keys_ops
Now handling overlapping extents/keys is a method that's specific to what the
btree node contains.
Signed-off-by: Kent Overstreet <kmo@daterainc.com>
Diffstat (limited to 'drivers/md/bcache/btree.c')
-rw-r--r-- | drivers/md/bcache/btree.c | 246 |
1 files changed, 17 insertions, 229 deletions
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index b14f34a..463d280 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -24,7 +24,6 @@ #include "btree.h" #include "debug.h" #include "extents.h" -#include "writeback.h" #include <linux/slab.h> #include <linux/bitops.h> @@ -90,13 +89,6 @@ * Test module load/unload */ -enum { - BTREE_INSERT_STATUS_INSERT, - BTREE_INSERT_STATUS_BACK_MERGE, - BTREE_INSERT_STATUS_OVERWROTE, - BTREE_INSERT_STATUS_FRONT_MERGE, -}; - #define MAX_NEED_GC 64 #define MAX_SAVE_PRIO 72 @@ -1792,230 +1784,23 @@ err: /* Btree insertion */ -static bool fix_overlapping_extents(struct btree *b, struct bkey *insert, - struct btree_iter *iter, - struct bkey *replace_key) +static bool btree_insert_key(struct btree *b, struct bkey *k, + struct bkey *replace_key) { - void subtract_dirty(struct bkey *k, uint64_t offset, int sectors) - { - if (KEY_DIRTY(k)) - bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k), - offset, -sectors); - } - - uint64_t old_offset; - unsigned old_size, sectors_found = 0; - - while (1) { - struct bkey *k = bch_btree_iter_next(iter); - if (!k) - break; - - if (bkey_cmp(&START_KEY(k), insert) >= 0) { - if (KEY_SIZE(k)) - break; - else - continue; - } - - if (bkey_cmp(k, &START_KEY(insert)) <= 0) - continue; - - old_offset = KEY_START(k); - old_size = KEY_SIZE(k); - - /* - * We might overlap with 0 size extents; we can't skip these - * because if they're in the set we're inserting to we have to - * adjust them so they don't overlap with the key we're - * inserting. But we don't want to check them for replace - * operations. - */ - - if (replace_key && KEY_SIZE(k)) { - /* - * k might have been split since we inserted/found the - * key we're replacing - */ - unsigned i; - uint64_t offset = KEY_START(k) - - KEY_START(replace_key); - - /* But it must be a subset of the replace key */ - if (KEY_START(k) < KEY_START(replace_key) || - KEY_OFFSET(k) > KEY_OFFSET(replace_key)) - goto check_failed; - - /* We didn't find a key that we were supposed to */ - if (KEY_START(k) > KEY_START(insert) + sectors_found) - goto check_failed; - - if (KEY_PTRS(k) != KEY_PTRS(replace_key) || - KEY_DIRTY(k) != KEY_DIRTY(replace_key)) - goto check_failed; - - /* skip past gen */ - offset <<= 8; - - BUG_ON(!KEY_PTRS(replace_key)); - - for (i = 0; i < KEY_PTRS(replace_key); i++) - if (k->ptr[i] != replace_key->ptr[i] + offset) - goto check_failed; - - sectors_found = KEY_OFFSET(k) - KEY_START(insert); - } - - if (bkey_cmp(insert, k) < 0 && - bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0) { - /* - * We overlapped in the middle of an existing key: that - * means we have to split the old key. But we have to do - * slightly different things depending on whether the - * old key has been written out yet. - */ - - struct bkey *top; - - subtract_dirty(k, KEY_START(insert), KEY_SIZE(insert)); - - if (bkey_written(&b->keys, k)) { - /* - * We insert a new key to cover the top of the - * old key, and the old key is modified in place - * to represent the bottom split. - * - * It's completely arbitrary whether the new key - * is the top or the bottom, but it has to match - * up with what btree_sort_fixup() does - it - * doesn't check for this kind of overlap, it - * depends on us inserting a new key for the top - * here. - */ - top = bch_bset_search(&b->keys, - bset_tree_last(&b->keys), - insert); - bch_bset_insert(&b->keys, top, k); - } else { - BKEY_PADDED(key) temp; - bkey_copy(&temp.key, k); - bch_bset_insert(&b->keys, k, &temp.key); - top = bkey_next(k); - } - - bch_cut_front(insert, top); - bch_cut_back(&START_KEY(insert), k); - bch_bset_fix_invalidated_key(&b->keys, k); - return false; - } - - if (bkey_cmp(insert, k) < 0) { - bch_cut_front(insert, k); - } else { - if (bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0) - old_offset = KEY_START(insert); - - if (bkey_written(&b->keys, k) && - bkey_cmp(&START_KEY(insert), &START_KEY(k)) <= 0) { - /* - * Completely overwrote, so we don't have to - * invalidate the binary search tree - */ - bch_cut_front(k, k); - } else { - __bch_cut_back(&START_KEY(insert), k); - bch_bset_fix_invalidated_key(&b->keys, k); - } - } - - subtract_dirty(k, old_offset, old_size - KEY_SIZE(k)); - } - -check_failed: - if (replace_key) { - if (!sectors_found) { - return true; - } else if (sectors_found < KEY_SIZE(insert)) { - SET_KEY_OFFSET(insert, KEY_OFFSET(insert) - - (KEY_SIZE(insert) - sectors_found)); - SET_KEY_SIZE(insert, sectors_found); - } - } - - return false; -} - -static bool btree_insert_key(struct btree *b, struct btree_op *op, - struct bkey *k, struct bkey *replace_key) -{ - struct bset *i = btree_bset_last(b); - struct bkey *m, *prev; - unsigned status = BTREE_INSERT_STATUS_INSERT; + unsigned status; BUG_ON(bkey_cmp(k, &b->key) > 0); - BUG_ON(b->level && !KEY_PTRS(k)); - BUG_ON(!b->level && !KEY_OFFSET(k)); - if (!b->level) { - struct btree_iter iter; + status = bch_btree_insert_key(&b->keys, k, replace_key); + if (status != BTREE_INSERT_STATUS_NO_INSERT) { + bch_check_keys(&b->keys, "%u for %s", status, + replace_key ? "replace" : "insert"); - /* - * bset_search() returns the first key that is strictly greater - * than the search key - but for back merging, we want to find - * the previous key. - */ - prev = NULL; - m = bch_btree_iter_init(&b->keys, &iter, - PRECEDING_KEY(&START_KEY(k))); - - if (fix_overlapping_extents(b, k, &iter, replace_key)) { - op->insert_collision = true; - return false; - } - - if (KEY_DIRTY(k)) - bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k), - KEY_START(k), KEY_SIZE(k)); - - while (m != bset_bkey_last(i) && - bkey_cmp(k, &START_KEY(m)) > 0) - prev = m, m = bkey_next(m); - - if (key_merging_disabled(b->c)) - goto insert; - - /* prev is in the tree, if we merge we're done */ - status = BTREE_INSERT_STATUS_BACK_MERGE; - if (prev && - bch_bkey_try_merge(&b->keys, prev, k)) - goto merged; - - status = BTREE_INSERT_STATUS_OVERWROTE; - if (m != bset_bkey_last(i) && - KEY_PTRS(m) == KEY_PTRS(k) && !KEY_SIZE(m)) - goto copy; - - status = BTREE_INSERT_STATUS_FRONT_MERGE; - if (m != bset_bkey_last(i) && - bch_bkey_try_merge(&b->keys, k, m)) - goto copy; - } else { - BUG_ON(replace_key); - m = bch_bset_search(&b->keys, bset_tree_last(&b->keys), k); - } - -insert: bch_bset_insert(&b->keys, m, k); -copy: bkey_copy(m, k); -merged: - bch_check_keys(&b->keys, "%u for %s", status, - replace_key ? "replace" : "insert"); - - if (b->level && !KEY_OFFSET(k)) - btree_current_write(b)->prio_blocked++; - - trace_bcache_btree_insert_key(b, k, replace_key != NULL, status); - - return true; + trace_bcache_btree_insert_key(b, k, replace_key != NULL, + status); + return true; + } else + return false; } static size_t insert_u64s_remaining(struct btree *b) @@ -2048,7 +1833,7 @@ static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op, if (!b->level) bkey_put(b->c, k); - ret |= btree_insert_key(b, op, k, replace_key); + ret |= btree_insert_key(b, k, replace_key); bch_keylist_pop_front(insert_keys); } else if (bkey_cmp(&START_KEY(k), &b->key) < 0) { BKEY_PADDED(key) temp; @@ -2057,13 +1842,16 @@ static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op, bch_cut_back(&b->key, &temp.key); bch_cut_front(&b->key, insert_keys->keys); - ret |= btree_insert_key(b, op, &temp.key, replace_key); + ret |= btree_insert_key(b, &temp.key, replace_key); break; } else { break; } } + if (!ret) + op->insert_collision = true; + BUG_ON(!bch_keylist_empty(insert_keys) && b->level); BUG_ON(bch_count_data(&b->keys) < oldsize); |