diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-11-14 15:50:56 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-11-14 15:50:56 -0800 |
commit | b91593fa8531a7396551dd9c0a0c51e9b9b97ca9 (patch) | |
tree | 10b78272c77e6a2872c2b02bd12b24f484a3eead /drivers/md/dm.c | |
parent | e2c5923c349c1738fe8fda980874d93f6fb2e5b6 (diff) | |
parent | ef7afb3656854de04fe03b0b9b4f3722b5722d8d (diff) | |
download | op-kernel-dev-b91593fa8531a7396551dd9c0a0c51e9b9b97ca9.zip op-kernel-dev-b91593fa8531a7396551dd9c0a0c51e9b9b97ca9.tar.gz |
Merge tag 'for-4.15/dm' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm
Pull device mapper updates from Mike Snitzer:
- a few conversions from atomic_t to ref_count_t
- a DM core fix for a race during device destruction that could result
in a BUG_ON
- a stable@ fix for a DM cache race condition that could lead to data
corruption when operating in writeback mode (writethrough is default)
- various DM cache cleanups and improvements
- add DAX support to the DM log-writes target
- a fix for the DM zoned target's ability to deal with the last zone of
the drive being smaller than all others
- a stable@ DM crypt and DM integrity fix for a negative check that was
to restrictive (prevented slab debug with XFS ontop of DM crypt from
working)
- a DM raid target fix for a panic that can occur when forcing a raid
to sync
* tag 'for-4.15/dm' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm: (25 commits)
dm cache: lift common migration preparation code to alloc_migration()
dm cache: remove usused deferred_cells member from struct cache
dm cache policy smq: allocate cache blocks in order
dm cache policy smq: change max background work from 10240 to 4096 blocks
dm cache background tracker: limit amount of background work that may be issued at once
dm cache policy smq: take origin idle status into account when queuing writebacks
dm cache policy smq: handle races with queuing background_work
dm raid: fix panic when attempting to force a raid to sync
dm integrity: allow unaligned bv_offset
dm crypt: allow unaligned bv_offset
dm: small cleanup in dm_get_md()
dm: fix race between dm_get_from_kobject() and __dm_destroy()
dm: allocate struct mapped_device with kvzalloc
dm zoned: ignore last smaller runt zone
dm space map metadata: use ARRAY_SIZE
dm log writes: add support for DAX
dm log writes: add support for inline data buffers
dm cache: simplify get_per_bio_data() by removing data_size argument
dm cache: remove all obsolete writethrough-specific code
dm cache: submit writethrough writes in parallel to origin and cache
...
Diffstat (limited to 'drivers/md/dm.c')
-rw-r--r-- | drivers/md/dm.c | 45 |
1 files changed, 23 insertions, 22 deletions
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index a3f8cbb..de17b71 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -24,6 +24,7 @@ #include <linux/delay.h> #include <linux/wait.h> #include <linux/pr.h> +#include <linux/refcount.h> #define DM_MSG_PREFIX "core" @@ -98,7 +99,7 @@ struct dm_md_mempools { struct table_device { struct list_head list; - atomic_t count; + refcount_t count; struct dm_dev dm_dev; }; @@ -685,10 +686,11 @@ int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode, format_dev_t(td->dm_dev.name, dev); - atomic_set(&td->count, 0); + refcount_set(&td->count, 1); list_add(&td->list, &md->table_devices); + } else { + refcount_inc(&td->count); } - atomic_inc(&td->count); mutex_unlock(&md->table_devices_lock); *result = &td->dm_dev; @@ -701,7 +703,7 @@ void dm_put_table_device(struct mapped_device *md, struct dm_dev *d) struct table_device *td = container_of(d, struct table_device, dm_dev); mutex_lock(&md->table_devices_lock); - if (atomic_dec_and_test(&td->count)) { + if (refcount_dec_and_test(&td->count)) { close_table_device(td, md); list_del(&td->list); kfree(td); @@ -718,7 +720,7 @@ static void free_table_devices(struct list_head *devices) struct table_device *td = list_entry(tmp, struct table_device, list); DMWARN("dm_destroy: %s still exists with %d references", - td->dm_dev.name, atomic_read(&td->count)); + td->dm_dev.name, refcount_read(&td->count)); kfree(td); } } @@ -1684,7 +1686,7 @@ static struct mapped_device *alloc_dev(int minor) struct mapped_device *md; void *old_md; - md = kzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id); + md = kvzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id); if (!md) { DMWARN("unable to allocate device, out of memory."); return NULL; @@ -1784,7 +1786,7 @@ bad_io_barrier: bad_minor: module_put(THIS_MODULE); bad_module_get: - kfree(md); + kvfree(md); return NULL; } @@ -1803,7 +1805,7 @@ static void free_dev(struct mapped_device *md) free_minor(minor); module_put(THIS_MODULE); - kfree(md); + kvfree(md); } static void __bind_mempools(struct mapped_device *md, struct dm_table *t) @@ -2061,17 +2063,12 @@ struct mapped_device *dm_get_md(dev_t dev) spin_lock(&_minor_lock); md = idr_find(&_minor_idr, minor); - if (md) { - if ((md == MINOR_ALLOCED || - (MINOR(disk_devt(dm_disk(md))) != minor) || - dm_deleting_md(md) || - test_bit(DMF_FREEING, &md->flags))) { - md = NULL; - goto out; - } - dm_get(md); + if (!md || md == MINOR_ALLOCED || (MINOR(disk_devt(dm_disk(md))) != minor) || + test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) { + md = NULL; + goto out; } - + dm_get(md); out: spin_unlock(&_minor_lock); @@ -2698,11 +2695,15 @@ struct mapped_device *dm_get_from_kobject(struct kobject *kobj) md = container_of(kobj, struct mapped_device, kobj_holder.kobj); - if (test_bit(DMF_FREEING, &md->flags) || - dm_deleting_md(md)) - return NULL; - + spin_lock(&_minor_lock); + if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) { + md = NULL; + goto out; + } dm_get(md); +out: + spin_unlock(&_minor_lock); + return md; } |