summaryrefslogtreecommitdiffstats
path: root/arch/ia64/mm/init.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-07-21 13:48:51 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2018-07-21 13:48:51 -0700
commit3928d4f5ee37cdc523894f6e549e6aae521d8980 (patch)
treec328fd919e48fc8442db04d13c2ba1fe2c0f88e4 /arch/ia64/mm/init.c
parent191a3afa98b857faf5231981ddbab66698034273 (diff)
downloadop-kernel-dev-3928d4f5ee37cdc523894f6e549e6aae521d8980.zip
op-kernel-dev-3928d4f5ee37cdc523894f6e549e6aae521d8980.tar.gz
mm: use helper functions for allocating and freeing vm_area structs
The vm_area_struct is one of the most fundamental memory management objects, but the management of it is entirely open-coded evertwhere, ranging from allocation and freeing (using kmem_cache_[z]alloc and kmem_cache_free) to initializing all the fields. We want to unify this in order to end up having some unified initialization of the vmas, and the first step to this is to at least have basic allocation functions. Right now those functions are literally just wrappers around the kmem_cache_*() calls. This is a purely mechanical conversion: # new vma: kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL) -> vm_area_alloc() # copy old vma kmem_cache_alloc(vm_area_cachep, GFP_KERNEL) -> vm_area_dup(old) # free vma kmem_cache_free(vm_area_cachep, vma) -> vm_area_free(vma) to the point where the old vma passed in to the vm_area_dup() function isn't even used yet (because I've left all the old manual initialization alone). Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/ia64/mm/init.c')
-rw-r--r--arch/ia64/mm/init.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 18278b4..3f2321b 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -114,7 +114,7 @@ ia64_init_addr_space (void)
* the problem. When the process attempts to write to the register backing store
* for the first time, it will get a SEGFAULT in this case.
*/
- vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
+ vma = vm_area_alloc();
if (vma) {
INIT_LIST_HEAD(&vma->anon_vma_chain);
vma->vm_mm = current->mm;
@@ -125,7 +125,7 @@ ia64_init_addr_space (void)
down_write(&current->mm->mmap_sem);
if (insert_vm_struct(current->mm, vma)) {
up_write(&current->mm->mmap_sem);
- kmem_cache_free(vm_area_cachep, vma);
+ vm_area_free(vma);
return;
}
up_write(&current->mm->mmap_sem);
@@ -133,7 +133,7 @@ ia64_init_addr_space (void)
/* map NaT-page at address zero to speed up speculative dereferencing of NULL: */
if (!(current->personality & MMAP_PAGE_ZERO)) {
- vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
+ vma = vm_area_alloc();
if (vma) {
INIT_LIST_HEAD(&vma->anon_vma_chain);
vma->vm_mm = current->mm;
@@ -144,7 +144,7 @@ ia64_init_addr_space (void)
down_write(&current->mm->mmap_sem);
if (insert_vm_struct(current->mm, vma)) {
up_write(&current->mm->mmap_sem);
- kmem_cache_free(vm_area_cachep, vma);
+ vm_area_free(vma);
return;
}
up_write(&current->mm->mmap_sem);
OpenPOWER on IntegriCloud