summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/Makefile4
-rw-r--r--mm/allocpercpu.c32
-rw-r--r--mm/bootmem.c35
-rw-r--r--mm/filemap.c7
-rw-r--r--mm/memory.c10
-rw-r--r--mm/percpu.c1226
-rw-r--r--mm/slab.c71
-rw-r--r--mm/slob.c37
-rw-r--r--mm/slub.c97
-rw-r--r--mm/vmalloc.c97
10 files changed, 1562 insertions, 54 deletions
diff --git a/mm/Makefile b/mm/Makefile
index 72255be..818569b6 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -30,6 +30,10 @@ obj-$(CONFIG_FAILSLAB) += failslab.o
obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o
obj-$(CONFIG_FS_XIP) += filemap_xip.o
obj-$(CONFIG_MIGRATION) += migrate.o
+ifdef CONFIG_HAVE_DYNAMIC_PER_CPU_AREA
+obj-$(CONFIG_SMP) += percpu.o
+else
obj-$(CONFIG_SMP) += allocpercpu.o
+endif
obj-$(CONFIG_QUICKLIST) += quicklist.o
obj-$(CONFIG_CGROUP_MEM_RES_CTLR) += memcontrol.o page_cgroup.o
diff --git a/mm/allocpercpu.c b/mm/allocpercpu.c
index 4297bc4..3653c57 100644
--- a/mm/allocpercpu.c
+++ b/mm/allocpercpu.c
@@ -99,45 +99,51 @@ static int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp,
__percpu_populate_mask((__pdata), (size), (gfp), &(mask))
/**
- * percpu_alloc_mask - initial setup of per-cpu data
+ * alloc_percpu - initial setup of per-cpu data
* @size: size of per-cpu object
- * @gfp: may sleep or not etc.
- * @mask: populate per-data for cpu's selected through mask bits
+ * @align: alignment
*
- * Populating per-cpu data for all online cpu's would be a typical use case,
- * which is simplified by the percpu_alloc() wrapper.
- * Per-cpu objects are populated with zeroed buffers.
+ * Allocate dynamic percpu area. Percpu objects are populated with
+ * zeroed buffers.
*/
-void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask)
+void *__alloc_percpu(size_t size, size_t align)
{
/*
* We allocate whole cache lines to avoid false sharing
*/
size_t sz = roundup(nr_cpu_ids * sizeof(void *), cache_line_size());
- void *pdata = kzalloc(sz, gfp);
+ void *pdata = kzalloc(sz, GFP_KERNEL);
void *__pdata = __percpu_disguise(pdata);
+ /*
+ * Can't easily make larger alignment work with kmalloc. WARN
+ * on it. Larger alignment should only be used for module
+ * percpu sections on SMP for which this path isn't used.
+ */
+ WARN_ON_ONCE(align > __alignof__(unsigned long long));
+
if (unlikely(!pdata))
return NULL;
- if (likely(!__percpu_populate_mask(__pdata, size, gfp, mask)))
+ if (likely(!__percpu_populate_mask(__pdata, size, GFP_KERNEL,
+ &cpu_possible_map)))
return __pdata;
kfree(pdata);
return NULL;
}
-EXPORT_SYMBOL_GPL(__percpu_alloc_mask);
+EXPORT_SYMBOL_GPL(__alloc_percpu);
/**
- * percpu_free - final cleanup of per-cpu data
+ * free_percpu - final cleanup of per-cpu data
* @__pdata: object to clean up
*
* We simply clean up any per-cpu object left. No need for the client to
* track and specify through a bis mask which per-cpu objects are to free.
*/
-void percpu_free(void *__pdata)
+void free_percpu(void *__pdata)
{
if (unlikely(!__pdata))
return;
__percpu_depopulate_mask(__pdata, &cpu_possible_map);
kfree(__percpu_disguise(__pdata));
}
-EXPORT_SYMBOL_GPL(percpu_free);
+EXPORT_SYMBOL_GPL(free_percpu);
diff --git a/mm/bootmem.c b/mm/bootmem.c
index 51a0ccf..daf9271 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -382,7 +382,6 @@ int __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
return mark_bootmem_node(pgdat->bdata, start, end, 1, flags);
}
-#ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE
/**
* reserve_bootmem - mark a page range as usable
* @addr: starting address of the range
@@ -403,7 +402,6 @@ int __init reserve_bootmem(unsigned long addr, unsigned long size,
return mark_bootmem(start, end, 1, flags);
}
-#endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */
static unsigned long align_idx(struct bootmem_data *bdata, unsigned long idx,
unsigned long step)
@@ -429,8 +427,8 @@ static unsigned long align_off(struct bootmem_data *bdata, unsigned long off,
}
static void * __init alloc_bootmem_core(struct bootmem_data *bdata,
- unsigned long size, unsigned long align,
- unsigned long goal, unsigned long limit)
+ unsigned long size, unsigned long align,
+ unsigned long goal, unsigned long limit)
{
unsigned long fallback = 0;
unsigned long min, max, start, sidx, midx, step;
@@ -530,17 +528,34 @@ find_block:
return NULL;
}
+static void * __init alloc_arch_preferred_bootmem(bootmem_data_t *bdata,
+ unsigned long size, unsigned long align,
+ unsigned long goal, unsigned long limit)
+{
+#ifdef CONFIG_HAVE_ARCH_BOOTMEM
+ bootmem_data_t *p_bdata;
+
+ p_bdata = bootmem_arch_preferred_node(bdata, size, align, goal, limit);
+ if (p_bdata)
+ return alloc_bootmem_core(p_bdata, size, align, goal, limit);
+#endif
+ return NULL;
+}
+
static void * __init ___alloc_bootmem_nopanic(unsigned long size,
unsigned long align,
unsigned long goal,
unsigned long limit)
{
bootmem_data_t *bdata;
+ void *region;
restart:
- list_for_each_entry(bdata, &bdata_list, list) {
- void *region;
+ region = alloc_arch_preferred_bootmem(NULL, size, align, goal, limit);
+ if (region)
+ return region;
+ list_for_each_entry(bdata, &bdata_list, list) {
if (goal && bdata->node_low_pfn <= PFN_DOWN(goal))
continue;
if (limit && bdata->node_min_pfn >= PFN_DOWN(limit))
@@ -618,6 +633,10 @@ static void * __init ___alloc_bootmem_node(bootmem_data_t *bdata,
{
void *ptr;
+ ptr = alloc_arch_preferred_bootmem(bdata, size, align, goal, limit);
+ if (ptr)
+ return ptr;
+
ptr = alloc_bootmem_core(bdata, size, align, goal, limit);
if (ptr)
return ptr;
@@ -674,6 +693,10 @@ void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
{
void *ptr;
+ ptr = alloc_arch_preferred_bootmem(pgdat->bdata, size, align, goal, 0);
+ if (ptr)
+ return ptr;
+
ptr = alloc_bootmem_core(pgdat->bdata, size, align, goal, 0);
if (ptr)
return ptr;
diff --git a/mm/filemap.c b/mm/filemap.c
index 23acefe..126d397 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1823,7 +1823,7 @@ static size_t __iovec_copy_from_user_inatomic(char *vaddr,
int copy = min(bytes, iov->iov_len - base);
base = 0;
- left = __copy_from_user_inatomic_nocache(vaddr, buf, copy);
+ left = __copy_from_user_inatomic(vaddr, buf, copy);
copied += copy;
bytes -= copy;
vaddr += copy;
@@ -1851,8 +1851,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
if (likely(i->nr_segs == 1)) {
int left;
char __user *buf = i->iov->iov_base + i->iov_offset;
- left = __copy_from_user_inatomic_nocache(kaddr + offset,
- buf, bytes);
+ left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
copied = bytes - left;
} else {
copied = __iovec_copy_from_user_inatomic(kaddr + offset,
@@ -1880,7 +1879,7 @@ size_t iov_iter_copy_from_user(struct page *page,
if (likely(i->nr_segs == 1)) {
int left;
char __user *buf = i->iov->iov_base + i->iov_offset;
- left = __copy_from_user_nocache(kaddr + offset, buf, bytes);
+ left = __copy_from_user(kaddr + offset, buf, bytes);
copied = bytes - left;
} else {
copied = __iovec_copy_from_user_inatomic(kaddr + offset,
diff --git a/mm/memory.c b/mm/memory.c
index baa999e..05fab3b 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -48,6 +48,8 @@
#include <linux/rmap.h>
#include <linux/module.h>
#include <linux/delayacct.h>
+#include <linux/kprobes.h>
+#include <linux/mutex.h>
#include <linux/init.h>
#include <linux/writeback.h>
#include <linux/memcontrol.h>
@@ -99,6 +101,14 @@ int randomize_va_space __read_mostly =
2;
#endif
+/*
+ * mutex protecting text section modification (dynamic code patching).
+ * some users need to sleep (allocating memory...) while they hold this lock.
+ *
+ * NOT exported to modules - patching kernel text is a really delicate matter.
+ */
+DEFINE_MUTEX(text_mutex);
+
static int __init disable_randmaps(char *s)
{
randomize_va_space = 0;
diff --git a/mm/percpu.c b/mm/percpu.c
new file mode 100644
index 0000000..bfe6a3a
--- /dev/null
+++ b/mm/percpu.c
@@ -0,0 +1,1226 @@
+/*
+ * linux/mm/percpu.c - percpu memory allocator
+ *
+ * Copyright (C) 2009 SUSE Linux Products GmbH
+ * Copyright (C) 2009 Tejun Heo <tj@kernel.org>
+ *
+ * This file is released under the GPLv2.
+ *
+ * This is percpu allocator which can handle both static and dynamic
+ * areas. Percpu areas are allocated in chunks in vmalloc area. Each
+ * chunk is consisted of num_possible_cpus() units and the first chunk
+ * is used for static percpu variables in the kernel image (special
+ * boot time alloc/init handling necessary as these areas need to be
+ * brought up before allocation services are running). Unit grows as
+ * necessary and all units grow or shrink in unison. When a chunk is
+ * filled up, another chunk is allocated. ie. in vmalloc area
+ *
+ * c0 c1 c2
+ * ------------------- ------------------- ------------
+ * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u
+ * ------------------- ...... ------------------- .... ------------
+ *
+ * Allocation is done in offset-size areas of single unit space. Ie,
+ * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0,
+ * c1:u1, c1:u2 and c1:u3. Percpu access can be done by configuring
+ * percpu base registers UNIT_SIZE apart.
+ *
+ * There are usually many small percpu allocations many of them as
+ * small as 4 bytes. The allocator organizes chunks into lists
+ * according to free size and tries to allocate from the fullest one.
+ * Each chunk keeps the maximum contiguous area size hint which is
+ * guaranteed to be eqaul to or larger than the maximum contiguous
+ * area in the chunk. This helps the allocator not to iterate the
+ * chunk maps unnecessarily.
+ *
+ * Allocation state in each chunk is kept using an array of integers
+ * on chunk->map. A positive value in the map represents a free
+ * region and negative allocated. Allocation inside a chunk is done
+ * by scanning this map sequentially and serving the first matching
+ * entry. This is mostly copied from the percpu_modalloc() allocator.
+ * Chunks are also linked into a rb tree to ease address to chunk
+ * mapping during free.
+ *
+ * To use this allocator, arch code should do the followings.
+ *
+ * - define CONFIG_HAVE_DYNAMIC_PER_CPU_AREA
+ *
+ * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
+ * regular address to percpu pointer and back
+ *
+ * - use pcpu_setup_first_chunk() during percpu area initialization to
+ * setup the first chunk containing the kernel static percpu area
+ */
+
+#include <linux/bitmap.h>
+#include <linux/bootmem.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/percpu.h>
+#include <linux/pfn.h>
+#include <linux/rbtree.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/vmalloc.h>
+#include <linux/workqueue.h>
+
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+
+#define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */
+#define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */
+
+struct pcpu_chunk {
+ struct list_head list; /* linked to pcpu_slot lists */
+ struct rb_node rb_node; /* key is chunk->vm->addr */
+ int free_size; /* free bytes in the chunk */
+ int contig_hint; /* max contiguous size hint */
+ struct vm_struct *vm; /* mapped vmalloc region */
+ int map_used; /* # of map entries used */
+ int map_alloc; /* # of map entries allocated */
+ int *map; /* allocation map */
+ bool immutable; /* no [de]population allowed */
+ struct page **page; /* points to page array */
+ struct page *page_ar[]; /* #cpus * UNIT_PAGES */
+};
+
+static int pcpu_unit_pages __read_mostly;
+static int pcpu_unit_size __read_mostly;
+static int pcpu_chunk_size __read_mostly;
+static int pcpu_nr_slots __read_mostly;
+static size_t pcpu_chunk_struct_size __read_mostly;
+
+/* the address of the first chunk which starts with the kernel static area */
+void *pcpu_base_addr __read_mostly;
+EXPORT_SYMBOL_GPL(pcpu_base_addr);
+
+/* optional reserved chunk, only accessible for reserved allocations */
+static struct pcpu_chunk *pcpu_reserved_chunk;
+/* offset limit of the reserved chunk */
+static int pcpu_reserved_chunk_limit;
+
+/*
+ * Synchronization rules.
+ *
+ * There are two locks - pcpu_alloc_mutex and pcpu_lock. The former
+ * protects allocation/reclaim paths, chunks and chunk->page arrays.
+ * The latter is a spinlock and protects the index data structures -
+ * chunk slots, rbtree, chunks and area maps in chunks.
+ *
+ * During allocation, pcpu_alloc_mutex is kept locked all the time and
+ * pcpu_lock is grabbed and released as necessary. All actual memory
+ * allocations are done using GFP_KERNEL with pcpu_lock released.
+ *
+ * Free path accesses and alters only the index data structures, so it
+ * can be safely called from atomic context. When memory needs to be
+ * returned to the system, free path schedules reclaim_work which
+ * grabs both pcpu_alloc_mutex and pcpu_lock, unlinks chunks to be
+ * reclaimed, release both locks and frees the chunks. Note that it's
+ * necessary to grab both locks to remove a chunk from circulation as
+ * allocation path might be referencing the chunk with only
+ * pcpu_alloc_mutex locked.
+ */
+static DEFINE_MUTEX(pcpu_alloc_mutex); /* protects whole alloc and reclaim */
+static DEFINE_SPINLOCK(pcpu_lock); /* protects index data structures */
+
+static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
+static struct rb_root pcpu_addr_root = RB_ROOT; /* chunks by address */
+
+/* reclaim work to release fully free chunks, scheduled from free path */
+static void pcpu_reclaim(struct work_struct *work);
+static DECLARE_WORK(pcpu_reclaim_work, pcpu_reclaim);
+
+static int __pcpu_size_to_slot(int size)
+{
+ int highbit = fls(size); /* size is in bytes */
+ return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1);
+}
+
+static int pcpu_size_to_slot(int size)
+{
+ if (size == pcpu_unit_size)
+ return pcpu_nr_slots - 1;
+ return __pcpu_size_to_slot(size);
+}
+
+static int pcpu_chunk_slot(const struct pcpu_chunk *chunk)
+{
+ if (chunk->free_size < sizeof(int) || chunk->contig_hint < sizeof(int))
+ return 0;
+
+ return pcpu_size_to_slot(chunk->free_size);
+}
+
+static int pcpu_page_idx(unsigned int cpu, int page_idx)
+{
+ return cpu * pcpu_unit_pages + page_idx;
+}
+
+static struct page **pcpu_chunk_pagep(struct pcpu_chunk *chunk,
+ unsigned int cpu, int page_idx)
+{
+ return &chunk->page[pcpu_page_idx(cpu, page_idx)];
+}
+
+static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
+ unsigned int cpu, int page_idx)
+{
+ return (unsigned long)chunk->vm->addr +
+ (pcpu_page_idx(cpu, page_idx) << PAGE_SHIFT);
+}
+
+static bool pcpu_chunk_page_occupied(struct pcpu_chunk *chunk,
+ int page_idx)
+{
+ return *pcpu_chunk_pagep(chunk, 0, page_idx) != NULL;
+}
+
+/**
+ * pcpu_mem_alloc - allocate memory
+ * @size: bytes to allocate
+ *
+ * Allocate @size bytes. If @size is smaller than PAGE_SIZE,
+ * kzalloc() is used; otherwise, vmalloc() is used. The returned
+ * memory is always zeroed.
+ *
+ * CONTEXT:
+ * Does GFP_KERNEL allocation.
+ *
+ * RETURNS:
+ * Pointer to the allocated area on success, NULL on failure.
+ */
+static void *pcpu_mem_alloc(size_t size)
+{
+ if (size <= PAGE_SIZE)
+ return kzalloc(size, GFP_KERNEL);
+ else {
+ void *ptr = vmalloc(size);
+ if (ptr)
+ memset(ptr, 0, size);
+ return ptr;
+ }
+}
+
+/**
+ * pcpu_mem_free - free memory
+ * @ptr: memory to free
+ * @size: size of the area
+ *
+ * Free @ptr. @ptr should have been allocated using pcpu_mem_alloc().
+ */
+static void pcpu_mem_free(void *ptr, size_t size)
+{
+ if (size <= PAGE_SIZE)
+ kfree(ptr);
+ else
+ vfree(ptr);
+}
+
+/**
+ * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
+ * @chunk: chunk of interest
+ * @oslot: the previous slot it was on
+ *
+ * This function is called after an allocation or free changed @chunk.
+ * New slot according to the changed state is determined and @chunk is
+ * moved to the slot. Note that the reserved chunk is never put on
+ * chunk slots.
+ *
+ * CONTEXT:
+ * pcpu_lock.
+ */
+static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
+{
+ int nslot = pcpu_chunk_slot(chunk);
+
+ if (chunk != pcpu_reserved_chunk && oslot != nslot) {
+ if (oslot < nslot)
+ list_move(&chunk->list, &pcpu_slot[nslot]);
+ else
+ list_move_tail(&chunk->list, &pcpu_slot[nslot]);
+ }
+}
+
+static struct rb_node **pcpu_chunk_rb_search(void *addr,
+ struct rb_node **parentp)
+{
+ struct rb_node **p = &pcpu_addr_root.rb_node;
+ struct rb_node *parent = NULL;
+ struct pcpu_chunk *chunk;
+
+ while (*p) {
+ parent = *p;
+ chunk = rb_entry(parent, struct pcpu_chunk, rb_node);
+
+ if (addr < chunk->vm->addr)
+ p = &(*p)->rb_left;
+ else if (addr > chunk->vm->addr)
+ p = &(*p)->rb_right;
+ else
+ break;
+ }
+
+ if (parentp)
+ *parentp = parent;
+ return p;
+}
+
+/**
+ * pcpu_chunk_addr_search - search for chunk containing specified address
+ * @addr: address to search for
+ *
+ * Look for chunk which might contain @addr. More specifically, it
+ * searchs for the chunk with the highest start address which isn't
+ * beyond @addr.
+ *
+ * CONTEXT:
+ * pcpu_lock.
+ *
+ * RETURNS:
+ * The address of the found chunk.
+ */
+static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
+{
+ struct rb_node *n, *parent;
+ struct pcpu_chunk *chunk;
+
+ /* is it in the reserved chunk? */
+ if (pcpu_reserved_chunk) {
+ void *start = pcpu_reserved_chunk->vm->addr;
+
+ if (addr >= start && addr < start + pcpu_reserved_chunk_limit)
+ return pcpu_reserved_chunk;
+ }
+
+ /* nah... search the regular ones */
+ n = *pcpu_chunk_rb_search(addr, &parent);
+ if (!n) {
+ /* no exactly matching chunk, the parent is the closest */
+ n = parent;
+ BUG_ON(!n);
+ }
+ chunk = rb_entry(n, struct pcpu_chunk, rb_node);
+
+ if (addr < chunk->vm->addr) {
+ /* the parent was the next one, look for the previous one */
+ n = rb_prev(n);
+ BUG_ON(!n);
+ chunk = rb_entry(n, struct pcpu_chunk, rb_node);
+ }
+
+ return chunk;
+}
+
+/**
+ * pcpu_chunk_addr_insert - insert chunk into address rb tree
+ * @new: chunk to insert
+ *
+ * Insert @new into address rb tree.
+ *
+ * CONTEXT:
+ * pcpu_lock.
+ */
+static void pcpu_chunk_addr_insert(struct pcpu_chunk *new)
+{
+ struct rb_node **p, *parent;
+
+ p = pcpu_chunk_rb_search(new->vm->addr, &parent);
+ BUG_ON(*p);
+ rb_link_node(&new->rb_node, parent, p);
+ rb_insert_color(&new->rb_node, &pcpu_addr_root);
+}
+
+/**
+ * pcpu_extend_area_map - extend area map for allocation
+ * @chunk: target chunk
+ *
+ * Extend area map of @chunk so that it can accomodate an allocation.
+ * A single allocation can split an area into three areas, so this
+ * function makes sure that @chunk->map has at least two extra slots.
+ *
+ * CONTEXT:
+ * pcpu_alloc_mutex, pcpu_lock. pcpu_lock is released and reacquired
+ * if area map is extended.
+ *
+ * RETURNS:
+ * 0 if noop, 1 if successfully extended, -errno on failure.
+ */
+static int pcpu_extend_area_map(struct pcpu_chunk *chunk)
+{
+ int new_alloc;
+ int *new;
+ size_t size;
+
+ /* has enough? */
+ if (chunk->map_alloc >= chunk->map_used + 2)
+ return 0;
+
+ spin_unlock_irq(&pcpu_lock);
+
+ new_alloc = PCPU_DFL_MAP_ALLOC;
+ while (new_alloc < chunk->map_used + 2)
+ new_alloc *= 2;
+
+ new = pcpu_mem_alloc(new_alloc * sizeof(new[0]));
+ if (!new) {
+ spin_lock_irq(&pcpu_lock);
+ return -ENOMEM;
+ }
+
+ /*
+ * Acquire pcpu_lock and switch to new area map. Only free
+ * could have happened inbetween, so map_used couldn't have
+ * grown.
+ */
+ spin_lock_irq(&pcpu_lock);
+ BUG_ON(new_alloc < chunk->map_used + 2);
+
+ size = chunk->map_alloc * sizeof(chunk->map[0]);
+ memcpy(new, chunk->map, size);
+
+ /*
+ * map_alloc < PCPU_DFL_MAP_ALLOC indicates that the chunk is
+ * one of the first chunks and still using static map.
+ */
+ if (chunk->map_alloc >= PCPU_DFL_MAP_ALLOC)
+ pcpu_mem_free(chunk->map, size);
+
+ chunk->map_alloc = new_alloc;
+ chunk->map = new;
+ return 0;
+}
+
+/**
+ * pcpu_split_block - split a map block
+ * @chunk: chunk of interest
+ * @i: index of map block to split
+ * @head: head size in bytes (can be 0)
+ * @tail: tail size in bytes (can be 0)
+ *
+ * Split the @i'th map block into two or three blocks. If @head is
+ * non-zero, @head bytes block is inserted before block @i moving it
+ * to @i+1 and reducing its size by @head bytes.
+ *
+ * If @tail is non-zero, the target block, which can be @i or @i+1
+ * depending on @head, is reduced by @tail bytes and @tail byte block
+ * is inserted after the target block.
+ *
+ * @chunk->map must have enough free slots to accomodate the split.
+ *
+ * CONTEXT:
+ * pcpu_lock.
+ */
+static void pcpu_split_block(struct pcpu_chunk *chunk, int i,
+ int head, int tail)
+{
+ int nr_extra = !!head + !!tail;
+
+ BUG_ON(chunk->map_alloc < chunk->map_used + nr_extra);
+
+ /* insert new subblocks */
+ memmove(&chunk->map[i + nr_extra], &chunk->map[i],
+ sizeof(chunk->map[0]) * (chunk->map_used - i));
+ chunk->map_used += nr_extra;
+
+ if (head) {
+ chunk->map[i + 1] = chunk->map[i] - head;
+ chunk->map[i++] = head;
+ }
+ if (tail) {
+ chunk->map[i++] -= tail;
+ chunk->map[i] = tail;
+ }
+}
+
+/**
+ * pcpu_alloc_area - allocate area from a pcpu_chunk
+ * @chunk: chunk of interest
+ * @size: wanted size in bytes
+ * @align: wanted align
+ *
+ * Try to allocate @size bytes area aligned at @align from @chunk.
+ * Note that this function only allocates the offset. It doesn't
+ * populate or map the area.
+ *
+ * @chunk->map must have at least two free slots.
+ *
+ * CONTEXT:
+ * pcpu_lock.
+ *
+ * RETURNS:
+ * Allocated offset in @chunk on success, -1 if no matching area is
+ * found.
+ */
+static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align)
+{
+ int oslot = pcpu_chunk_slot(chunk);
+ int max_contig = 0;
+ int i, off;
+
+ for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++])) {
+ bool is_last = i + 1 == chunk->map_used;
+ int head, tail;
+
+ /* extra for alignment requirement */
+ head = ALIGN(off, align) - off;
+ BUG_ON(i == 0 && head != 0);
+
+ if (chunk->map[i] < 0)
+ continue;
+ if (chunk->map[i] < head + size) {
+ max_contig = max(chunk->map[i], max_contig);
+ continue;
+ }
+
+ /*
+ * If head is small or the previous block is free,
+ * merge'em. Note that 'small' is defined as smaller
+ * than sizeof(int), which is very small but isn't too
+ * uncommon for percpu allocations.
+ */
+ if (head && (head < sizeof(int) || chunk->map[i - 1] > 0)) {
+ if (chunk->map[i - 1] > 0)
+ chunk->map[i - 1] += head;
+ else {
+ chunk->map[i - 1] -= head;
+ chunk->free_size -= head;
+ }
+ chunk->map[i] -= head;
+ off += head;
+ head = 0;
+ }
+
+ /* if tail is small, just keep it around */
+ tail = chunk->map[i] - head - size;
+ if (tail < sizeof(int))
+ tail = 0;
+
+ /* split if warranted */
+ if (head || tail) {
+ pcpu_split_block(chunk, i, head, tail);
+ if (head) {
+ i++;
+ off += head;
+ max_contig = max(chunk->map[i - 1], max_contig);
+ }
+ if (tail)
+ max_contig = max(chunk->map[i + 1], max_contig);
+ }
+
+ /* update hint and mark allocated */
+ if (is_last)
+ chunk->contig_hint = max_contig; /* fully scanned */
+ else
+ chunk->contig_hint = max(chunk->contig_hint,
+ max_contig);
+
+ chunk->free_size -= chunk->map[i];
+ chunk->map[i] = -chunk->map[i];
+
+ pcpu_chunk_relocate(chunk, oslot);
+ return off;
+ }
+
+ chunk->contig_hint = max_contig; /* fully scanned */
+ pcpu_chunk_relocate(chunk, oslot);
+
+ /* tell the upper layer that this chunk has no matching area */
+ return -1;
+}
+
+/**
+ * pcpu_free_area - free area to a pcpu_chunk
+ * @chunk: chunk of interest
+ * @freeme: offset of area to free
+ *
+ * Free area starting from @freeme to @chunk. Note that this function
+ * only modifies the allocation map. It doesn't depopulate or unmap
+ * the area.
+ *
+ * CONTEXT:
+ * pcpu_lock.
+ */
+static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme)
+{
+ int oslot = pcpu_chunk_slot(chunk);
+ int i, off;
+
+ for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++]))
+ if (off == freeme)
+ break;
+ BUG_ON(off != freeme);
+ BUG_ON(chunk->map[i] > 0);
+
+ chunk->map[i] = -chunk->map[i];
+ chunk->free_size += chunk->map[i];
+
+ /* merge with previous? */
+ if (i > 0 && chunk->map[i - 1] >= 0) {
+ chunk->map[i - 1] += chunk->map[i];
+ chunk->map_used--;
+ memmove(&chunk->map[i], &chunk->map[i + 1],
+ (chunk->map_used - i) * sizeof(chunk->map[0]));
+ i--;
+ }
+ /* merge with next? */
+ if (i + 1 < chunk->map_used && chunk->map[i + 1] >= 0) {
+ chunk->map[i] += chunk->map[i + 1];
+ chunk->map_used--;
+ memmove(&chunk->map[i + 1], &chunk->map[i + 2],
+ (chunk->map_used - (i + 1)) * sizeof(chunk->map[0]));
+ }
+
+ chunk->contig_hint = max(chunk->map[i], chunk->contig_hint);
+ pcpu_chunk_relocate(chunk, oslot);
+}
+
+/**
+ * pcpu_unmap - unmap pages out of a pcpu_chunk
+ * @chunk: chunk of interest
+ * @page_start: page index of the first page to unmap
+ * @page_end: page index of the last page to unmap + 1
+ * @flush: whether to flush cache and tlb or not
+ *
+ * For each cpu, unmap pages [@page_start,@page_end) out of @chunk.
+ * If @flush is true, vcache is flushed before unmapping and tlb
+ * after.
+ */
+static void pcpu_unmap(struct pcpu_chunk *chunk, int page_start, int page_end,
+ bool flush)
+{
+ unsigned int last = num_possible_cpus() - 1;
+ unsigned int cpu;
+
+ /* unmap must not be done on immutable chunk */
+ WARN_ON(chunk->immutable);
+
+ /*
+ * Each flushing trial can be very expensive, issue flush on
+ * the whole region at once rather than doing it for each cpu.
+ * This could be an overkill but is more scalable.
+ */
+ if (flush)
+ flush_cache_vunmap(pcpu_chunk_addr(chunk, 0, page_start),
+ pcpu_chunk_addr(chunk, last, page_end));
+
+ for_each_possible_cpu(cpu)
+ unmap_kernel_range_noflush(
+ pcpu_chunk_addr(chunk, cpu, page_start),
+ (page_end - page_start) << PAGE_SHIFT);
+
+ /* ditto as flush_cache_vunmap() */
+ if (flush)
+ flush_tlb_kernel_range(pcpu_chunk_addr(chunk, 0, page_start),
+ pcpu_chunk_addr(chunk, last, page_end));
+}
+
+/**
+ * pcpu_depopulate_chunk - depopulate and unmap an area of a pcpu_chunk
+ * @chunk: chunk to depopulate
+ * @off: offset to the area to depopulate
+ * @size: size of the area to depopulate in bytes
+ * @flush: whether to flush cache and tlb or not
+ *
+ * For each cpu, depopulate and unmap pages [@page_start,@page_end)
+ * from @chunk. If @flush is true, vcache is flushed before unmapping
+ * and tlb after.
+ *
+ * CONTEXT:
+ * pcpu_alloc_mutex.
+ */
+static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size,
+ bool flush)
+{
+ int page_start = PFN_DOWN(off);
+ int page_end = PFN_UP(off + size);
+ int unmap_start = -1;
+ int uninitialized_var(unmap_end);
+ unsigned int cpu;
+ int i;
+
+ for (i = page_start; i < page_end; i++) {
+ for_each_possible_cpu(cpu) {
+ struct page **pagep = pcpu_chunk_pagep(chunk, cpu, i);
+
+ if (!*pagep)
+ continue;
+
+ __free_page(*pagep);
+
+ /*
+ * If it's partial depopulation, it might get
+ * populated or depopulated again. Mark the
+ * page gone.
+ */
+ *pagep = NULL;
+
+ unmap_start = unmap_start < 0 ? i : unmap_start;
+ unmap_end = i + 1;
+ }
+ }
+
+ if (unmap_start >= 0)
+ pcpu_unmap(chunk, unmap_start, unmap_end, flush);
+}
+
+/**
+ * pcpu_map - map pages into a pcpu_chunk
+ * @chunk: chunk of interest
+ * @page_start: page index of the first page to map
+ * @page_end: page index of the last page to map + 1
+ *
+ * For each cpu, map pages [@page_start,@page_end) into @chunk.
+ * vcache is flushed afterwards.
+ */
+static int pcpu_map(struct pcpu_chunk *chunk, int page_start, int page_end)
+{
+ unsigned int last = num_possible_cpus() - 1;
+ unsigned int cpu;
+ int err;
+
+ /* map must not be done on immutable chunk */
+ WARN_ON(chunk->immutable);
+
+ for_each_possible_cpu(cpu) {
+ err = map_kernel_range_noflush(
+ pcpu_chunk_addr(chunk, cpu, page_start),
+ (page_end - page_start) << PAGE_SHIFT,
+ PAGE_KERNEL,
+ pcpu_chunk_pagep(chunk, cpu, page_start));
+ if (err < 0)
+ return err;
+ }
+
+ /* flush at once, please read comments in pcpu_unmap() */
+ flush_cache_vmap(pcpu_chunk_addr(chunk, 0, page_start),
+ pcpu_chunk_addr(chunk, last, page_end));
+ return 0;
+}
+
+/**
+ * pcpu_populate_chunk - populate and map an area of a pcpu_chunk
+ * @chunk: chunk of interest
+ * @off: offset to the area to populate
+ * @size: size of the area to populate in bytes
+ *
+ * For each cpu, populate and map pages [@page_start,@page_end) into
+ * @chunk. The area is cleared on return.
+ *
+ * CONTEXT:
+ * pcpu_alloc_mutex, does GFP_KERNEL allocation.
+ */
+static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size)
+{
+ const gfp_t alloc_mask = GFP_KERNEL | __GFP_HIGHMEM | __GFP_COLD;
+ int page_start = PFN_DOWN(off);
+ int page_end = PFN_UP(off + size);
+ int map_start = -1;
+ int uninitialized_var(map_end);
+ unsigned int cpu;
+ int i;
+
+ for (i = page_start; i < page_end; i++) {
+ if (pcpu_chunk_page_occupied(chunk, i)) {
+ if (map_start >= 0) {
+ if (pcpu_map(chunk, map_start, map_end))
+ goto err;
+ map_start = -1;
+ }
+ continue;
+ }
+
+ map_start = map_start < 0 ? i : map_start;
+ map_end = i + 1;
+
+ for_each_possible_cpu(cpu) {
+ struct page **pagep = pcpu_chunk_pagep(chunk, cpu, i);
+
+ *pagep = alloc_pages_node(cpu_to_node(cpu),
+ alloc_mask, 0);
+ if (!*pagep)
+ goto err;
+ }
+ }
+
+ if (map_start >= 0 && pcpu_map(chunk, map_start, map_end))
+ goto err;
+
+ for_each_possible_cpu(cpu)
+ memset(chunk->vm->addr + cpu * pcpu_unit_size + off, 0,
+ size);
+
+ return 0;
+err:
+ /* likely under heavy memory pressure, give memory back */
+ pcpu_depopulate_chunk(chunk, off, size, true);
+ return -ENOMEM;
+}
+
+static void free_pcpu_chunk(struct pcpu_chunk *chunk)
+{
+ if (!chunk)
+ return;
+ if (chunk->vm)
+ free_vm_area(chunk->vm);
+ pcpu_mem_free(chunk->map, chunk->map_alloc * sizeof(chunk->map[0]));
+ kfree(chunk);
+}
+
+static struct pcpu_chunk *alloc_pcpu_chunk(void)
+{
+ struct pcpu_chunk *chunk;
+
+ chunk = kzalloc(pcpu_chunk_struct_size, GFP_KERNEL);
+ if (!chunk)
+ return NULL;
+
+ chunk->map = pcpu_mem_alloc(PCPU_DFL_MAP_ALLOC * sizeof(chunk->map[0]));
+ chunk->map_alloc = PCPU_DFL_MAP_ALLOC;
+ chunk->map[chunk->map_used++] = pcpu_unit_size;
+ chunk->page = chunk->page_ar;
+
+ chunk->vm = get_vm_area(pcpu_chunk_size, GFP_KERNEL);
+ if (!chunk->vm) {
+ free_pcpu_chunk(chunk);
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&chunk->list);
+ chunk->free_size = pcpu_unit_size;
+ chunk->contig_hint = pcpu_unit_size;
+
+ return chunk;
+}
+
+/**
+ * pcpu_alloc - the percpu allocator
+ * @size: size of area to allocate in bytes
+ * @align: alignment of area (max PAGE_SIZE)
+ * @reserved: allocate from the reserved chunk if available
+ *
+ * Allocate percpu area of @size bytes aligned at @align.
+ *
+ * CONTEXT:
+ * Does GFP_KERNEL allocation.
+ *
+ * RETURNS:
+ * Percpu pointer to the allocated area on success, NULL on failure.
+ */
+static void *pcpu_alloc(size_t size, size_t align, bool reserved)
+{
+ struct pcpu_chunk *chunk;
+ int slot, off;
+
+ if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) {
+ WARN(true, "illegal size (%zu) or align (%zu) for "
+ "percpu allocation\n", size, align);
+ return NULL;
+ }
+
+ mutex_lock(&pcpu_alloc_mutex);
+ spin_lock_irq(&pcpu_lock);
+
+ /* serve reserved allocations from the reserved chunk if available */
+ if (reserved && pcpu_reserved_chunk) {
+ chunk = pcpu_reserved_chunk;
+ if (size > chunk->contig_hint ||
+ pcpu_extend_area_map(chunk) < 0)
+ goto fail_unlock;
+ off = pcpu_alloc_area(chunk, size, align);
+ if (off >= 0)
+ goto area_found;
+ goto fail_unlock;
+ }
+
+restart:
+ /* search through normal chunks */
+ for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) {
+ list_for_each_entry(chunk, &pcpu_slot[slot], list) {
+ if (size > chunk->contig_hint)
+ continue;
+
+ switch (pcpu_extend_area_map(chunk)) {
+ case 0:
+ break;
+ case 1:
+ goto restart; /* pcpu_lock dropped, restart */
+ default:
+ goto fail_unlock;
+ }
+
+ off = pcpu_alloc_area(chunk, size, align);
+ if (off >= 0)
+ goto area_found;
+ }
+ }
+
+ /* hmmm... no space left, create a new chunk */
+ spin_unlock_irq(&pcpu_lock);
+
+ chunk = alloc_pcpu_chunk();
+ if (!chunk)
+ goto fail_unlock_mutex;
+
+ spin_lock_irq(&pcpu_lock);
+ pcpu_chunk_relocate(chunk, -1);
+ pcpu_chunk_addr_insert(chunk);
+ goto restart;
+
+area_found:
+ spin_unlock_irq(&pcpu_lock);
+
+ /* populate, map and clear the area */
+ if (pcpu_populate_chunk(chunk, off, size)) {
+ spin_lock_irq(&pcpu_lock);
+ pcpu_free_area(chunk, off);
+ goto fail_unlock;
+ }
+
+ mutex_unlock(&pcpu_alloc_mutex);
+
+ return __addr_to_pcpu_ptr(chunk->vm->addr + off);
+
+fail_unlock:
+ spin_unlock_irq(&pcpu_lock);
+fail_unlock_mutex:
+ mutex_unlock(&pcpu_alloc_mutex);
+ return NULL;
+}
+
+/**
+ * __alloc_percpu - allocate dynamic percpu area
+ * @size: size of area to allocate in bytes
+ * @align: alignment of area (max PAGE_SIZE)
+ *
+ * Allocate percpu area of @size bytes aligned at @align. Might
+ * sleep. Might trigger writeouts.
+ *
+ * CONTEXT:
+ * Does GFP_KERNEL allocation.
+ *
+ * RETURNS:
+ * Percpu pointer to the allocated area on success, NULL on failure.
+ */
+void *__alloc_percpu(size_t size, size_t align)
+{
+ return pcpu_alloc(size, align, false);
+}
+EXPORT_SYMBOL_GPL(__alloc_percpu);
+
+/**
+ * __alloc_reserved_percpu - allocate reserved percpu area
+ * @size: size of area to allocate in bytes
+ * @align: alignment of area (max PAGE_SIZE)
+ *
+ * Allocate percpu area of @size bytes aligned at @align from reserved
+ * percpu area if arch has set it up; otherwise, allocation is served
+ * from the same dynamic area. Might sleep. Might trigger writeouts.
+ *
+ * CONTEXT:
+ * Does GFP_KERNEL allocation.
+ *
+ * RETURNS:
+ * Percpu pointer to the allocated area on success, NULL on failure.
+ */
+void *__alloc_reserved_percpu(size_t size, size_t align)
+{
+ return pcpu_alloc(size, align, true);
+}
+
+/**
+ * pcpu_reclaim - reclaim fully free chunks, workqueue function
+ * @work: unused
+ *
+ * Reclaim all fully free chunks except for the first one.
+ *
+ * CONTEXT:
+ * workqueue context.
+ */
+static void pcpu_reclaim(struct work_struct *work)
+{
+ LIST_HEAD(todo);
+ struct list_head *head = &pcpu_slot[pcpu_nr_slots - 1];
+ struct pcpu_chunk *chunk, *next;
+
+ mutex_lock(&pcpu_alloc_mutex);
+ spin_lock_irq(&pcpu_lock);
+
+ list_for_each_entry_safe(chunk, next, head, list) {
+ WARN_ON(chunk->immutable);
+
+ /* spare the first one */
+ if (chunk == list_first_entry(head, struct pcpu_chunk, list))
+ continue;
+
+ rb_erase(&chunk->rb_node, &pcpu_addr_root);
+ list_move(&chunk->list, &todo);
+ }
+
+ spin_unlock_irq(&pcpu_lock);
+ mutex_unlock(&pcpu_alloc_mutex);
+
+ list_for_each_entry_safe(chunk, next, &todo, list) {
+ pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size, false);
+ free_pcpu_chunk(chunk);
+ }
+}
+
+/**
+ * free_percpu - free percpu area
+ * @ptr: pointer to area to free
+ *
+ * Free percpu area @ptr.
+ *
+ * CONTEXT:
+ * Can be called from atomic context.
+ */
+void free_percpu(void *ptr)
+{
+ void *addr = __pcpu_ptr_to_addr(ptr);
+ struct pcpu_chunk *chunk;
+ unsigned long flags;
+ int off;
+
+ if (!ptr)
+ return;
+
+ spin_lock_irqsave(&pcpu_lock, flags);
+
+ chunk = pcpu_chunk_addr_search(addr);
+ off = addr - chunk->vm->addr;
+
+ pcpu_free_area(chunk, off);
+
+ /* if there are more than one fully free chunks, wake up grim reaper */
+ if (chunk->free_size == pcpu_unit_size) {
+ struct pcpu_chunk *pos;
+
+ list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list)
+ if (pos != chunk) {
+ schedule_work(&pcpu_reclaim_work);
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&pcpu_lock, flags);
+}
+EXPORT_SYMBOL_GPL(free_percpu);
+
+/**
+ * pcpu_setup_first_chunk - initialize the first percpu chunk
+ * @get_page_fn: callback to fetch page pointer
+ * @static_size: the size of static percpu area in bytes
+ * @reserved_size: the size of reserved percpu area in bytes
+ * @unit_size: unit size in bytes, must be multiple of PAGE_SIZE, -1 for auto
+ * @dyn_size: free size for dynamic allocation in bytes, -1 for auto
+ * @base_addr: mapped address, NULL for auto
+ * @populate_pte_fn: callback to allocate pagetable, NULL if unnecessary
+ *
+ * Initialize the first percpu chunk which contains the kernel static
+ * perpcu area. This function is to be called from arch percpu area
+ * setup path. The first two parameters are mandatory. The rest are
+ * optional.
+ *
+ * @get_page_fn() should return pointer to percpu page given cpu
+ * number and page number. It should at least return enough pages to
+ * cover the static area. The returned pages for static area should
+ * have been initialized with valid data. If @unit_size is specified,
+ * it can also return pages after the static area. NULL return
+ * indicates end of pages for the cpu. Note that @get_page_fn() must
+ * return the same number of pages for all cpus.
+ *
+ * @reserved_size, if non-zero, specifies the amount of bytes to
+ * reserve after the static area in the first chunk. This reserves
+ * the first chunk such that it's available only through reserved
+ * percpu allocation. This is primarily used to serve module percpu
+ * static areas on architectures where the addressing model has
+ * limited offset range for symbol relocations to guarantee module
+ * percpu symbols fall inside the relocatable range.
+ *
+ * @unit_size, if non-negative, specifies unit size and must be
+ * aligned to PAGE_SIZE and equal to or larger than @static_size +
+ * @reserved_size + @dyn_size.
+ *
+ * @dyn_size, if non-negative, limits the number of bytes available
+ * for dynamic allocation in the first chunk. Specifying non-negative
+ * value make percpu leave alone the area beyond @static_size +
+ * @reserved_size + @dyn_size.
+ *
+ * Non-null @base_addr means that the caller already allocated virtual
+ * region for the first chunk and mapped it. percpu must not mess
+ * with the chunk. Note that @base_addr with 0 @unit_size or non-NULL
+ * @populate_pte_fn doesn't make any sense.
+ *
+ * @populate_pte_fn is used to populate the pagetable. NULL means the
+ * caller already populated the pagetable.
+ *
+ * If the first chunk ends up with both reserved and dynamic areas, it
+ * is served by two chunks - one to serve the core static and reserved
+ * areas and the other for the dynamic area. They share the same vm
+ * and page map but uses different area allocation map to stay away
+ * from each other. The latter chunk is circulated in the chunk slots
+ * and available for dynamic allocation like any other chunks.
+ *
+ * RETURNS:
+ * The determined pcpu_unit_size which can be used to initialize
+ * percpu access.
+ */
+size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
+ size_t static_size, size_t reserved_size,
+ ssize_t unit_size, ssize_t dyn_size,
+ void *base_addr,
+ pcpu_populate_pte_fn_t populate_pte_fn)
+{
+ static struct vm_struct first_vm;
+ static int smap[2], dmap[2];
+ struct pcpu_chunk *schunk, *dchunk = NULL;
+ unsigned int cpu;
+ int nr_pages;
+ int err, i;
+
+ /* santiy checks */
+ BUILD_BUG_ON(ARRAY_SIZE(smap) >= PCPU_DFL_MAP_ALLOC ||
+ ARRAY_SIZE(dmap) >= PCPU_DFL_MAP_ALLOC);
+ BUG_ON(!static_size);
+ if (unit_size >= 0) {
+ BUG_ON(unit_size < static_size + reserved_size +
+ (dyn_size >= 0 ? dyn_size : 0));
+ BUG_ON(unit_size & ~PAGE_MASK);
+ } else {
+ BUG_ON(dyn_size >= 0);
+ BUG_ON(base_addr);
+ }
+ BUG_ON(base_addr && populate_pte_fn);
+
+ if (unit_size >= 0)
+ pcpu_unit_pages = unit_size >> PAGE_SHIFT;
+ else
+ pcpu_unit_pages = max_t(int, PCPU_MIN_UNIT_SIZE >> PAGE_SHIFT,
+ PFN_UP(static_size + reserved_size));
+
+ pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
+ pcpu_chunk_size = num_possible_cpus() * pcpu_unit_size;
+ pcpu_chunk_struct_size = sizeof(struct pcpu_chunk)
+ + num_possible_cpus() * pcpu_unit_pages * sizeof(struct page *);
+
+ if (dyn_size < 0)
+ dyn_size = pcpu_unit_size - static_size - reserved_size;
+
+ /*
+ * Allocate chunk slots. The additional last slot is for
+ * empty chunks.
+ */
+ pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2;
+ pcpu_slot = alloc_bootmem(pcpu_nr_slots * sizeof(pcpu_slot[0]));
+ for (i = 0; i < pcpu_nr_slots; i++)
+ INIT_LIST_HEAD(&pcpu_slot[i]);
+
+ /*
+ * Initialize static chunk. If reserved_size is zero, the
+ * static chunk covers static area + dynamic allocation area
+ * in the first chunk. If reserved_size is not zero, it
+ * covers static area + reserved area (mostly used for module
+ * static percpu allocation).
+ */
+ schunk = alloc_bootmem(pcpu_chunk_struct_size);
+ INIT_LIST_HEAD(&schunk->list);
+ schunk->vm = &first_vm;
+ schunk->map = smap;
+ schunk->map_alloc = ARRAY_SIZE(smap);
+ schunk->page = schunk->page_ar;
+
+ if (reserved_size) {
+ schunk->free_size = reserved_size;
+ pcpu_reserved_chunk = schunk; /* not for dynamic alloc */
+ } else {
+ schunk->free_size = dyn_size;
+ dyn_size = 0; /* dynamic area covered */
+ }
+ schunk->contig_hint = schunk->free_size;
+
+ schunk->map[schunk->map_used++] = -static_size;
+ if (schunk->free_size)
+ schunk->map[schunk->map_used++] = schunk->free_size;
+
+ pcpu_reserved_chunk_limit = static_size + schunk->free_size;
+
+ /* init dynamic chunk if necessary */
+ if (dyn_size) {
+ dchunk = alloc_bootmem(sizeof(struct pcpu_chunk));
+ INIT_LIST_HEAD(&dchunk->list);
+ dchunk->vm = &first_vm;
+ dchunk->map = dmap;
+ dchunk->map_alloc = ARRAY_SIZE(dmap);
+ dchunk->page = schunk->page_ar; /* share page map with schunk */
+
+ dchunk->contig_hint = dchunk->free_size = dyn_size;
+ dchunk->map[dchunk->map_used++] = -pcpu_reserved_chunk_limit;
+ dchunk->map[dchunk->map_used++] = dchunk->free_size;
+ }
+
+ /* allocate vm address */
+ first_vm.flags = VM_ALLOC;
+ first_vm.size = pcpu_chunk_size;
+
+ if (!base_addr)
+ vm_area_register_early(&first_vm, PAGE_SIZE);
+ else {
+ /*
+ * Pages already mapped. No need to remap into
+ * vmalloc area. In this case the first chunks can't
+ * be mapped or unmapped by percpu and are marked
+ * immutable.
+ */
+ first_vm.addr = base_addr;
+ schunk->immutable = true;
+ if (dchunk)
+ dchunk->immutable = true;
+ }
+
+ /* assign pages */
+ nr_pages = -1;
+ for_each_possible_cpu(cpu) {
+ for (i = 0; i < pcpu_unit_pages; i++) {
+ struct page *page = get_page_fn(cpu, i);
+
+ if (!page)
+ break;
+ *pcpu_chunk_pagep(schunk, cpu, i) = page;
+ }
+
+ BUG_ON(i < PFN_UP(static_size));
+
+ if (nr_pages < 0)
+ nr_pages = i;
+ else
+ BUG_ON(nr_pages != i);
+ }
+
+ /* map them */
+ if (populate_pte_fn) {
+ for_each_possible_cpu(cpu)
+ for (i = 0; i < nr_pages; i++)
+ populate_pte_fn(pcpu_chunk_addr(schunk,
+ cpu, i));
+
+ err = pcpu_map(schunk, 0, nr_pages);
+ if (err)
+ panic("failed to setup static percpu area, err=%d\n",
+ err);
+ }
+
+ /* link the first chunk in */
+ if (!dchunk) {
+ pcpu_chunk_relocate(schunk, -1);
+ pcpu_chunk_addr_insert(schunk);
+ } else {
+ pcpu_chunk_relocate(dchunk, -1);
+ pcpu_chunk_addr_insert(dchunk);
+ }
+
+ /* we're done */
+ pcpu_base_addr = (void *)pcpu_chunk_addr(schunk, 0, 0);
+ return pcpu_unit_size;
+}
diff --git a/mm/slab.c b/mm/slab.c
index 825c606..9ec66c3 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -102,6 +102,7 @@
#include <linux/cpu.h>
#include <linux/sysctl.h>
#include <linux/module.h>
+#include <trace/kmemtrace.h>
#include <linux/rcupdate.h>
#include <linux/string.h>
#include <linux/uaccess.h>
@@ -568,6 +569,14 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp)
#endif
+#ifdef CONFIG_KMEMTRACE
+size_t slab_buffer_size(struct kmem_cache *cachep)
+{
+ return cachep->buffer_size;
+}
+EXPORT_SYMBOL(slab_buffer_size);
+#endif
+
/*
* Do not go above this order unless 0 objects fit into the slab.
*/
@@ -3554,10 +3563,23 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp)
*/
void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
{
- return __cache_alloc(cachep, flags, __builtin_return_address(0));
+ void *ret = __cache_alloc(cachep, flags, __builtin_return_address(0));
+
+ kmemtrace_mark_alloc(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret,
+ obj_size(cachep), cachep->buffer_size, flags);
+
+ return ret;
}
EXPORT_SYMBOL(kmem_cache_alloc);
+#ifdef CONFIG_KMEMTRACE
+void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags)
+{
+ return __cache_alloc(cachep, flags, __builtin_return_address(0));
+}
+EXPORT_SYMBOL(kmem_cache_alloc_notrace);
+#endif
+
/**
* kmem_ptr_validate - check if an untrusted pointer might be a slab entry.
* @cachep: the cache we're checking against
@@ -3602,23 +3624,47 @@ out:
#ifdef CONFIG_NUMA
void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
{
- return __cache_alloc_node(cachep, flags, nodeid,
- __builtin_return_address(0));
+ void *ret = __cache_alloc_node(cachep, flags, nodeid,
+ __builtin_return_address(0));
+
+ kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret,
+ obj_size(cachep), cachep->buffer_size,
+ flags, nodeid);
+
+ return ret;
}
EXPORT_SYMBOL(kmem_cache_alloc_node);
+#ifdef CONFIG_KMEMTRACE
+void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
+ gfp_t flags,
+ int nodeid)
+{
+ return __cache_alloc_node(cachep, flags, nodeid,
+ __builtin_return_address(0));
+}
+EXPORT_SYMBOL(kmem_cache_alloc_node_notrace);
+#endif
+
static __always_inline void *
__do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller)
{
struct kmem_cache *cachep;
+ void *ret;
cachep = kmem_find_general_cachep(size, flags);
if (unlikely(ZERO_OR_NULL_PTR(cachep)))
return cachep;
- return kmem_cache_alloc_node(cachep, flags, node);
+ ret = kmem_cache_alloc_node_notrace(cachep, flags, node);
+
+ kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC,
+ (unsigned long) caller, ret,
+ size, cachep->buffer_size, flags, node);
+
+ return ret;
}
-#ifdef CONFIG_DEBUG_SLAB
+#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_KMEMTRACE)
void *__kmalloc_node(size_t size, gfp_t flags, int node)
{
return __do_kmalloc_node(size, flags, node,
@@ -3651,6 +3697,7 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
void *caller)
{
struct kmem_cache *cachep;
+ void *ret;
/* If you want to save a few bytes .text space: replace
* __ with kmem_.
@@ -3660,11 +3707,17 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
cachep = __find_general_cachep(size, flags);
if (unlikely(ZERO_OR_NULL_PTR(cachep)))
return cachep;
- return __cache_alloc(cachep, flags, caller);
+ ret = __cache_alloc(cachep, flags, caller);
+
+ kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC,
+ (unsigned long) caller, ret,
+ size, cachep->buffer_size, flags);
+
+ return ret;
}
-#ifdef CONFIG_DEBUG_SLAB
+#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_KMEMTRACE)
void *__kmalloc(size_t size, gfp_t flags)
{
return __do_kmalloc(size, flags, __builtin_return_address(0));
@@ -3703,6 +3756,8 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
debug_check_no_obj_freed(objp, obj_size(cachep));
__cache_free(cachep, objp);
local_irq_restore(flags);
+
+ kmemtrace_mark_free(KMEMTRACE_TYPE_CACHE, _RET_IP_, objp);
}
EXPORT_SYMBOL(kmem_cache_free);
@@ -3729,6 +3784,8 @@ void kfree(const void *objp)
debug_check_no_obj_freed(objp, obj_size(c));
__cache_free(c, (void *)objp);
local_irq_restore(flags);
+
+ kmemtrace_mark_free(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, objp);
}
EXPORT_SYMBOL(kfree);
diff --git a/mm/slob.c b/mm/slob.c
index 26aa464..5961529 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -65,6 +65,7 @@
#include <linux/module.h>
#include <linux/rcupdate.h>
#include <linux/list.h>
+#include <trace/kmemtrace.h>
#include <asm/atomic.h>
/*
@@ -463,6 +464,7 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
{
unsigned int *m;
int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
+ void *ret;
lockdep_trace_alloc(flags);
@@ -471,21 +473,31 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
return ZERO_SIZE_PTR;
m = slob_alloc(size + align, gfp, align, node);
+
if (!m)
return NULL;
*m = size;
- return (void *)m + align;
+ ret = (void *)m + align;
+
+ kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC,
+ _RET_IP_, ret,
+ size, size + align, gfp, node);
} else {
- void *ret;
+ unsigned int order = get_order(size);
- ret = slob_new_page(gfp | __GFP_COMP, get_order(size), node);
+ ret = slob_new_page(gfp | __GFP_COMP, order, node);
if (ret) {
struct page *page;
page = virt_to_page(ret);
page->private = size;
}
- return ret;
+
+ kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC,
+ _RET_IP_, ret,
+ size, PAGE_SIZE << order, gfp, node);
}
+
+ return ret;
}
EXPORT_SYMBOL(__kmalloc_node);
@@ -503,6 +515,8 @@ void kfree(const void *block)
slob_free(m, *m + align);
} else
put_page(&sp->page);
+
+ kmemtrace_mark_free(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, block);
}
EXPORT_SYMBOL(kfree);
@@ -572,10 +586,19 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
{
void *b;
- if (c->size < PAGE_SIZE)
+ if (c->size < PAGE_SIZE) {
b = slob_alloc(c->size, flags, c->align, node);
- else
+ kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE,
+ _RET_IP_, b, c->size,
+ SLOB_UNITS(c->size) * SLOB_UNIT,
+ flags, node);
+ } else {
b = slob_new_page(flags, get_order(c->size), node);
+ kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE,
+ _RET_IP_, b, c->size,
+ PAGE_SIZE << get_order(c->size),
+ flags, node);
+ }
if (c->ctor)
c->ctor(b);
@@ -611,6 +634,8 @@ void kmem_cache_free(struct kmem_cache *c, void *b)
} else {
__kmem_cache_free(b, c->size);
}
+
+ kmemtrace_mark_free(KMEMTRACE_TYPE_CACHE, _RET_IP_, b);
}
EXPORT_SYMBOL(kmem_cache_free);
diff --git a/mm/slub.c b/mm/slub.c
index 604da4b..816734e 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -16,6 +16,7 @@
#include <linux/slab.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
+#include <trace/kmemtrace.h>
#include <linux/cpu.h>
#include <linux/cpuset.h>
#include <linux/mempolicy.h>
@@ -1624,18 +1625,46 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
{
- return slab_alloc(s, gfpflags, -1, _RET_IP_);
+ void *ret = slab_alloc(s, gfpflags, -1, _RET_IP_);
+
+ kmemtrace_mark_alloc(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret,
+ s->objsize, s->size, gfpflags);
+
+ return ret;
}
EXPORT_SYMBOL(kmem_cache_alloc);
+#ifdef CONFIG_KMEMTRACE
+void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags)
+{
+ return slab_alloc(s, gfpflags, -1, _RET_IP_);
+}
+EXPORT_SYMBOL(kmem_cache_alloc_notrace);
+#endif
+
#ifdef CONFIG_NUMA
void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
{
- return slab_alloc(s, gfpflags, node, _RET_IP_);
+ void *ret = slab_alloc(s, gfpflags, node, _RET_IP_);
+
+ kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret,
+ s->objsize, s->size, gfpflags, node);
+
+ return ret;
}
EXPORT_SYMBOL(kmem_cache_alloc_node);
#endif
+#ifdef CONFIG_KMEMTRACE
+void *kmem_cache_alloc_node_notrace(struct kmem_cache *s,
+ gfp_t gfpflags,
+ int node)
+{
+ return slab_alloc(s, gfpflags, node, _RET_IP_);
+}
+EXPORT_SYMBOL(kmem_cache_alloc_node_notrace);
+#endif
+
/*
* Slow patch handling. This may still be called frequently since objects
* have a longer lifetime than the cpu slabs in most processing loads.
@@ -1743,6 +1772,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
page = virt_to_head_page(x);
slab_free(s, page, x, _RET_IP_);
+
+ kmemtrace_mark_free(KMEMTRACE_TYPE_CACHE, _RET_IP_, x);
}
EXPORT_SYMBOL(kmem_cache_free);
@@ -2476,7 +2507,7 @@ EXPORT_SYMBOL(kmem_cache_destroy);
* Kmalloc subsystem
*******************************************************************/
-struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1] __cacheline_aligned;
+struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT] __cacheline_aligned;
EXPORT_SYMBOL(kmalloc_caches);
static int __init setup_slub_min_order(char *str)
@@ -2538,7 +2569,7 @@ panic:
}
#ifdef CONFIG_ZONE_DMA
-static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT + 1];
+static struct kmem_cache *kmalloc_caches_dma[SLUB_PAGE_SHIFT];
static void sysfs_add_func(struct work_struct *w)
{
@@ -2658,8 +2689,9 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags)
void *__kmalloc(size_t size, gfp_t flags)
{
struct kmem_cache *s;
+ void *ret;
- if (unlikely(size > PAGE_SIZE))
+ if (unlikely(size > SLUB_MAX_SIZE))
return kmalloc_large(size, flags);
s = get_slab(size, flags);
@@ -2667,7 +2699,12 @@ void *__kmalloc(size_t size, gfp_t flags)
if (unlikely(ZERO_OR_NULL_PTR(s)))
return s;
- return slab_alloc(s, flags, -1, _RET_IP_);
+ ret = slab_alloc(s, flags, -1, _RET_IP_);
+
+ kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, ret,
+ size, s->size, flags);
+
+ return ret;
}
EXPORT_SYMBOL(__kmalloc);
@@ -2686,16 +2723,30 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
void *__kmalloc_node(size_t size, gfp_t flags, int node)
{
struct kmem_cache *s;
+ void *ret;
+
+ if (unlikely(size > SLUB_MAX_SIZE)) {
+ ret = kmalloc_large_node(size, flags, node);
- if (unlikely(size > PAGE_SIZE))
- return kmalloc_large_node(size, flags, node);
+ kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC,
+ _RET_IP_, ret,
+ size, PAGE_SIZE << get_order(size),
+ flags, node);
+
+ return ret;
+ }
s = get_slab(size, flags);
if (unlikely(ZERO_OR_NULL_PTR(s)))
return s;
- return slab_alloc(s, flags, node, _RET_IP_);
+ ret = slab_alloc(s, flags, node, _RET_IP_);
+
+ kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, ret,
+ size, s->size, flags, node);
+
+ return ret;
}
EXPORT_SYMBOL(__kmalloc_node);
#endif
@@ -2754,6 +2805,8 @@ void kfree(const void *x)
return;
}
slab_free(page->slab, page, object, _RET_IP_);
+
+ kmemtrace_mark_free(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, x);
}
EXPORT_SYMBOL(kfree);
@@ -2987,7 +3040,7 @@ void __init kmem_cache_init(void)
caches++;
}
- for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++) {
+ for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
create_kmalloc_cache(&kmalloc_caches[i],
"kmalloc", 1 << i, GFP_KERNEL);
caches++;
@@ -3024,7 +3077,7 @@ void __init kmem_cache_init(void)
slab_state = UP;
/* Provide the correct kmalloc names now that the caches are up */
- for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++)
+ for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++)
kmalloc_caches[i]. name =
kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i);
@@ -3223,8 +3276,9 @@ static struct notifier_block __cpuinitdata slab_notifier = {
void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
{
struct kmem_cache *s;
+ void *ret;
- if (unlikely(size > PAGE_SIZE))
+ if (unlikely(size > SLUB_MAX_SIZE))
return kmalloc_large(size, gfpflags);
s = get_slab(size, gfpflags);
@@ -3232,15 +3286,22 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
if (unlikely(ZERO_OR_NULL_PTR(s)))
return s;
- return slab_alloc(s, gfpflags, -1, caller);
+ ret = slab_alloc(s, gfpflags, -1, caller);
+
+ /* Honor the call site pointer we recieved. */
+ kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, caller, ret, size,
+ s->size, gfpflags);
+
+ return ret;
}
void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
int node, unsigned long caller)
{
struct kmem_cache *s;
+ void *ret;
- if (unlikely(size > PAGE_SIZE))
+ if (unlikely(size > SLUB_MAX_SIZE))
return kmalloc_large_node(size, gfpflags, node);
s = get_slab(size, gfpflags);
@@ -3248,7 +3309,13 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
if (unlikely(ZERO_OR_NULL_PTR(s)))
return s;
- return slab_alloc(s, gfpflags, node, caller);
+ ret = slab_alloc(s, gfpflags, node, caller);
+
+ /* Honor the call site pointer we recieved. */
+ kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, caller, ret,
+ size, s->size, gfpflags, node);
+
+ return ret;
}
#ifdef CONFIG_SLUB_DEBUG
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 520a759..af58324 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -24,6 +24,7 @@
#include <linux/radix-tree.h>
#include <linux/rcupdate.h>
#include <linux/bootmem.h>
+#include <linux/pfn.h>
#include <asm/atomic.h>
#include <asm/uaccess.h>
@@ -152,8 +153,8 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
*
* Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N]
*/
-static int vmap_page_range(unsigned long start, unsigned long end,
- pgprot_t prot, struct page **pages)
+static int vmap_page_range_noflush(unsigned long start, unsigned long end,
+ pgprot_t prot, struct page **pages)
{
pgd_t *pgd;
unsigned long next;
@@ -169,13 +170,22 @@ static int vmap_page_range(unsigned long start, unsigned long end,
if (err)
break;
} while (pgd++, addr = next, addr != end);
- flush_cache_vmap(start, end);
if (unlikely(err))
return err;
return nr;
}
+static int vmap_page_range(unsigned long start, unsigned long end,
+ pgprot_t prot, struct page **pages)
+{
+ int ret;
+
+ ret = vmap_page_range_noflush(start, end, prot, pages);
+ flush_cache_vmap(start, end);
+ return ret;
+}
+
static inline int is_vmalloc_or_module_addr(const void *x)
{
/*
@@ -990,6 +1000,32 @@ void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t pro
}
EXPORT_SYMBOL(vm_map_ram);
+/**
+ * vm_area_register_early - register vmap area early during boot
+ * @vm: vm_struct to register
+ * @align: requested alignment
+ *
+ * This function is used to register kernel vm area before
+ * vmalloc_init() is called. @vm->size and @vm->flags should contain
+ * proper values on entry and other fields should be zero. On return,
+ * vm->addr contains the allocated address.
+ *
+ * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
+ */
+void __init vm_area_register_early(struct vm_struct *vm, size_t align)
+{
+ static size_t vm_init_off __initdata;
+ unsigned long addr;
+
+ addr = ALIGN(VMALLOC_START + vm_init_off, align);
+ vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START;
+
+ vm->addr = (void *)addr;
+
+ vm->next = vmlist;
+ vmlist = vm;
+}
+
void __init vmalloc_init(void)
{
struct vmap_area *va;
@@ -1017,6 +1053,58 @@ void __init vmalloc_init(void)
vmap_initialized = true;
}
+/**
+ * map_kernel_range_noflush - map kernel VM area with the specified pages
+ * @addr: start of the VM area to map
+ * @size: size of the VM area to map
+ * @prot: page protection flags to use
+ * @pages: pages to map
+ *
+ * Map PFN_UP(@size) pages at @addr. The VM area @addr and @size
+ * specify should have been allocated using get_vm_area() and its
+ * friends.
+ *
+ * NOTE:
+ * This function does NOT do any cache flushing. The caller is
+ * responsible for calling flush_cache_vmap() on to-be-mapped areas
+ * before calling this function.
+ *
+ * RETURNS:
+ * The number of pages mapped on success, -errno on failure.
+ */
+int map_kernel_range_noflush(unsigned long addr, unsigned long size,
+ pgprot_t prot, struct page **pages)
+{
+ return vmap_page_range_noflush(addr, addr + size, prot, pages);
+}
+
+/**
+ * unmap_kernel_range_noflush - unmap kernel VM area
+ * @addr: start of the VM area to unmap
+ * @size: size of the VM area to unmap
+ *
+ * Unmap PFN_UP(@size) pages at @addr. The VM area @addr and @size
+ * specify should have been allocated using get_vm_area() and its
+ * friends.
+ *
+ * NOTE:
+ * This function does NOT do any cache flushing. The caller is
+ * responsible for calling flush_cache_vunmap() on to-be-mapped areas
+ * before calling this function and flush_tlb_kernel_range() after.
+ */
+void unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
+{
+ vunmap_page_range(addr, addr + size);
+}
+
+/**
+ * unmap_kernel_range - unmap kernel VM area and flush cache and TLB
+ * @addr: start of the VM area to unmap
+ * @size: size of the VM area to unmap
+ *
+ * Similar to unmap_kernel_range_noflush() but flushes vcache before
+ * the unmapping and tlb after.
+ */
void unmap_kernel_range(unsigned long addr, unsigned long size)
{
unsigned long end = addr + size;
@@ -1267,6 +1355,7 @@ EXPORT_SYMBOL(vfree);
void vunmap(const void *addr)
{
BUG_ON(in_interrupt());
+ might_sleep();
__vunmap(addr, 0);
}
EXPORT_SYMBOL(vunmap);
@@ -1286,6 +1375,8 @@ void *vmap(struct page **pages, unsigned int count,
{
struct vm_struct *area;
+ might_sleep();
+
if (count > num_physpages)
return NULL;
OpenPOWER on IntegriCloud