summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-06-12 18:28:00 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2018-06-12 18:28:00 -0700
commitb08fc5277aaa1d8ea15470d38bf36f19dfb0e125 (patch)
tree1910dc474cb1ede95581dd9faa81a3bebeded0dc /mm
parent4597fcff07044d89c646d0c5d8b42cd976d966a1 (diff)
parent9d2a789c1db75d0f55b14fa57bec548d94332ad8 (diff)
downloadop-kernel-dev-b08fc5277aaa1d8ea15470d38bf36f19dfb0e125.zip
op-kernel-dev-b08fc5277aaa1d8ea15470d38bf36f19dfb0e125.tar.gz
Merge tag 'overflow-v4.18-rc1-part2' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux
Pull more overflow updates from Kees Cook: "The rest of the overflow changes for v4.18-rc1. This includes the explicit overflow fixes from Silvio, further struct_size() conversions from Matthew, and a bug fix from Dan. But the bulk of it is the treewide conversions to use either the 2-factor argument allocators (e.g. kmalloc(a * b, ...) into kmalloc_array(a, b, ...) or the array_size() macros (e.g. vmalloc(a * b) into vmalloc(array_size(a, b)). Coccinelle was fighting me on several fronts, so I've done a bunch of manual whitespace updates in the patches as well. Summary: - Error path bug fix for overflow tests (Dan) - Additional struct_size() conversions (Matthew, Kees) - Explicitly reported overflow fixes (Silvio, Kees) - Add missing kvcalloc() function (Kees) - Treewide conversions of allocators to use either 2-factor argument variant when available, or array_size() and array3_size() as needed (Kees)" * tag 'overflow-v4.18-rc1-part2' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux: (26 commits) treewide: Use array_size in f2fs_kvzalloc() treewide: Use array_size() in f2fs_kzalloc() treewide: Use array_size() in f2fs_kmalloc() treewide: Use array_size() in sock_kmalloc() treewide: Use array_size() in kvzalloc_node() treewide: Use array_size() in vzalloc_node() treewide: Use array_size() in vzalloc() treewide: Use array_size() in vmalloc() treewide: devm_kzalloc() -> devm_kcalloc() treewide: devm_kmalloc() -> devm_kmalloc_array() treewide: kvzalloc() -> kvcalloc() treewide: kvmalloc() -> kvmalloc_array() treewide: kzalloc_node() -> kcalloc_node() treewide: kzalloc() -> kcalloc() treewide: kmalloc() -> kmalloc_array() mm: Introduce kvcalloc() video: uvesafb: Fix integer overflow in allocation UBIFS: Fix potential integer overflow in allocation leds: Use struct_size() in allocation Convert intel uncore to struct_size ...
Diffstat (limited to 'mm')
-rw-r--r--mm/gup_benchmark.c2
-rw-r--r--mm/huge_memory.c4
-rw-r--r--mm/hugetlb.c3
-rw-r--r--mm/percpu-stats.c2
-rw-r--r--mm/slab.c3
-rw-r--r--mm/slub.c19
-rw-r--r--mm/swap_slots.c4
-rw-r--r--mm/swap_state.c2
-rw-r--r--mm/swapfile.c5
9 files changed, 25 insertions, 19 deletions
diff --git a/mm/gup_benchmark.c b/mm/gup_benchmark.c
index 0f44759..6a47370 100644
--- a/mm/gup_benchmark.c
+++ b/mm/gup_benchmark.c
@@ -23,7 +23,7 @@ static int __gup_benchmark_ioctl(unsigned int cmd,
struct page **pages;
nr_pages = gup->size / PAGE_SIZE;
- pages = kvzalloc(sizeof(void *) * nr_pages, GFP_KERNEL);
+ pages = kvcalloc(nr_pages, sizeof(void *), GFP_KERNEL);
if (!pages)
return -ENOMEM;
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index ba8fdc0..1cd7c1a 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1131,8 +1131,8 @@ static int do_huge_pmd_wp_page_fallback(struct vm_fault *vmf, pmd_t orig_pmd,
unsigned long mmun_start; /* For mmu_notifiers */
unsigned long mmun_end; /* For mmu_notifiers */
- pages = kmalloc(sizeof(struct page *) * HPAGE_PMD_NR,
- GFP_KERNEL);
+ pages = kmalloc_array(HPAGE_PMD_NR, sizeof(struct page *),
+ GFP_KERNEL);
if (unlikely(!pages)) {
ret |= VM_FAULT_OOM;
goto out;
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 696beff..3612fbb 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2798,7 +2798,8 @@ static int __init hugetlb_init(void)
num_fault_mutexes = 1;
#endif
hugetlb_fault_mutex_table =
- kmalloc(sizeof(struct mutex) * num_fault_mutexes, GFP_KERNEL);
+ kmalloc_array(num_fault_mutexes, sizeof(struct mutex),
+ GFP_KERNEL);
BUG_ON(!hugetlb_fault_mutex_table);
for (i = 0; i < num_fault_mutexes; i++)
diff --git a/mm/percpu-stats.c b/mm/percpu-stats.c
index 063ff60..b5fdd43 100644
--- a/mm/percpu-stats.c
+++ b/mm/percpu-stats.c
@@ -144,7 +144,7 @@ alloc_buffer:
spin_unlock_irq(&pcpu_lock);
/* there can be at most this many free and allocated fragments */
- buffer = vmalloc((2 * max_nr_alloc + 1) * sizeof(int));
+ buffer = vmalloc(array_size(sizeof(int), (2 * max_nr_alloc + 1)));
if (!buffer)
return -ENOMEM;
diff --git a/mm/slab.c b/mm/slab.c
index 36688f6..aa76a70 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -4338,7 +4338,8 @@ static int leaks_show(struct seq_file *m, void *p)
if (x[0] == x[1]) {
/* Increase the buffer size */
mutex_unlock(&slab_mutex);
- m->private = kzalloc(x[0] * 4 * sizeof(unsigned long), GFP_KERNEL);
+ m->private = kcalloc(x[0] * 4, sizeof(unsigned long),
+ GFP_KERNEL);
if (!m->private) {
/* Too bad, we are really out */
m->private = x;
diff --git a/mm/slub.c b/mm/slub.c
index 1550547..a3b8467 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3623,8 +3623,9 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
#ifdef CONFIG_SLUB_DEBUG
void *addr = page_address(page);
void *p;
- unsigned long *map = kzalloc(BITS_TO_LONGS(page->objects) *
- sizeof(long), GFP_ATOMIC);
+ unsigned long *map = kcalloc(BITS_TO_LONGS(page->objects),
+ sizeof(long),
+ GFP_ATOMIC);
if (!map)
return;
slab_err(s, page, text, s->name);
@@ -4412,8 +4413,9 @@ static long validate_slab_cache(struct kmem_cache *s)
{
int node;
unsigned long count = 0;
- unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) *
- sizeof(unsigned long), GFP_KERNEL);
+ unsigned long *map = kmalloc_array(BITS_TO_LONGS(oo_objects(s->max)),
+ sizeof(unsigned long),
+ GFP_KERNEL);
struct kmem_cache_node *n;
if (!map)
@@ -4573,8 +4575,9 @@ static int list_locations(struct kmem_cache *s, char *buf,
unsigned long i;
struct loc_track t = { 0, 0, NULL };
int node;
- unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) *
- sizeof(unsigned long), GFP_KERNEL);
+ unsigned long *map = kmalloc_array(BITS_TO_LONGS(oo_objects(s->max)),
+ sizeof(unsigned long),
+ GFP_KERNEL);
struct kmem_cache_node *n;
if (!map || !alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
@@ -4750,7 +4753,7 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
int x;
unsigned long *nodes;
- nodes = kzalloc(sizeof(unsigned long) * nr_node_ids, GFP_KERNEL);
+ nodes = kcalloc(nr_node_ids, sizeof(unsigned long), GFP_KERNEL);
if (!nodes)
return -ENOMEM;
@@ -5293,7 +5296,7 @@ static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si)
unsigned long sum = 0;
int cpu;
int len;
- int *data = kmalloc(nr_cpu_ids * sizeof(int), GFP_KERNEL);
+ int *data = kmalloc_array(nr_cpu_ids, sizeof(int), GFP_KERNEL);
if (!data)
return -ENOMEM;
diff --git a/mm/swap_slots.c b/mm/swap_slots.c
index f51ac05..a791411 100644
--- a/mm/swap_slots.c
+++ b/mm/swap_slots.c
@@ -122,12 +122,12 @@ static int alloc_swap_slot_cache(unsigned int cpu)
* as kvzalloc could trigger reclaim and get_swap_page,
* which can lock swap_slots_cache_mutex.
*/
- slots = kvzalloc(sizeof(swp_entry_t) * SWAP_SLOTS_CACHE_SIZE,
+ slots = kvcalloc(SWAP_SLOTS_CACHE_SIZE, sizeof(swp_entry_t),
GFP_KERNEL);
if (!slots)
return -ENOMEM;
- slots_ret = kvzalloc(sizeof(swp_entry_t) * SWAP_SLOTS_CACHE_SIZE,
+ slots_ret = kvcalloc(SWAP_SLOTS_CACHE_SIZE, sizeof(swp_entry_t),
GFP_KERNEL);
if (!slots_ret) {
kvfree(slots);
diff --git a/mm/swap_state.c b/mm/swap_state.c
index ab8e59c..ecee9c6 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -620,7 +620,7 @@ int init_swap_address_space(unsigned int type, unsigned long nr_pages)
unsigned int i, nr;
nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES);
- spaces = kvzalloc(sizeof(struct address_space) * nr, GFP_KERNEL);
+ spaces = kvcalloc(nr, sizeof(struct address_space), GFP_KERNEL);
if (!spaces)
return -ENOMEM;
for (i = 0; i < nr; i++) {
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 78a015f..925cf79 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -3196,7 +3196,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
p->cluster_next = 1 + (prandom_u32() % p->highest_bit);
nr_cluster = DIV_ROUND_UP(maxpages, SWAPFILE_CLUSTER);
- cluster_info = kvzalloc(nr_cluster * sizeof(*cluster_info),
+ cluster_info = kvcalloc(nr_cluster, sizeof(*cluster_info),
GFP_KERNEL);
if (!cluster_info) {
error = -ENOMEM;
@@ -3233,7 +3233,8 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
}
/* frontswap enabled? set up bit-per-page map for frontswap */
if (IS_ENABLED(CONFIG_FRONTSWAP))
- frontswap_map = kvzalloc(BITS_TO_LONGS(maxpages) * sizeof(long),
+ frontswap_map = kvcalloc(BITS_TO_LONGS(maxpages),
+ sizeof(long),
GFP_KERNEL);
if (p->bdev &&(swap_flags & SWAP_FLAG_DISCARD) && swap_discardable(p)) {
OpenPOWER on IntegriCloud