diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/bootmem.c | 4 | ||||
-rw-r--r-- | mm/fadvise.c | 12 | ||||
-rw-r--r-- | mm/memory.c | 8 | ||||
-rw-r--r-- | mm/mmzone.c | 6 | ||||
-rw-r--r-- | mm/nommu.c | 2 | ||||
-rw-r--r-- | mm/slab.c | 74 | ||||
-rw-r--r-- | mm/vmalloc.c | 7 | ||||
-rw-r--r-- | mm/vmstat.c | 1 |
8 files changed, 59 insertions, 55 deletions
diff --git a/mm/bootmem.c b/mm/bootmem.c index d213fed..50353e0 100644 --- a/mm/bootmem.c +++ b/mm/bootmem.c @@ -29,9 +29,7 @@ unsigned long max_low_pfn; unsigned long min_low_pfn; unsigned long max_pfn; -EXPORT_SYMBOL(max_pfn); /* This is exported so - * dma_get_required_mask(), which uses - * it, can be an inline function */ +EXPORT_UNUSED_SYMBOL(max_pfn); /* June 2006 */ static LIST_HEAD(bdata_list); #ifdef CONFIG_CRASH_DUMP diff --git a/mm/fadvise.c b/mm/fadvise.c index 0a03357..60a5d55 100644 --- a/mm/fadvise.c +++ b/mm/fadvise.c @@ -23,18 +23,6 @@ /* * POSIX_FADV_WILLNEED could set PG_Referenced, and POSIX_FADV_NOREUSE could * deactivate the pages and clear PG_Referenced. - * - * LINUX_FADV_ASYNC_WRITE: start async writeout of any dirty pages between file - * offsets `offset' and `offset+len' inclusive. Any pages which are currently - * under writeout are skipped, whether or not they are dirty. - * - * LINUX_FADV_WRITE_WAIT: wait upon writeout of any dirty pages between file - * offsets `offset' and `offset+len'. - * - * By combining these two operations the application may do several things: - * - * LINUX_FADV_ASYNC_WRITE: push some or all of the dirty pages at the disk. - * */ asmlinkage long sys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice) { diff --git a/mm/memory.c b/mm/memory.c index c1e14c9..109e986 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -47,6 +47,7 @@ #include <linux/pagemap.h> #include <linux/rmap.h> #include <linux/module.h> +#include <linux/delayacct.h> #include <linux/init.h> #include <asm/pgalloc.h> @@ -1549,9 +1550,9 @@ gotten: flush_cache_page(vma, address, pte_pfn(orig_pte)); entry = mk_pte(new_page, vma->vm_page_prot); entry = maybe_mkwrite(pte_mkdirty(entry), vma); + lazy_mmu_prot_update(entry); ptep_establish(vma, address, page_table, entry); update_mmu_cache(vma, address, entry); - lazy_mmu_prot_update(entry); lru_cache_add_active(new_page); page_add_new_anon_rmap(new_page, vma, address); @@ -1853,7 +1854,7 @@ int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end) return 0; } -EXPORT_SYMBOL(vmtruncate_range); +EXPORT_UNUSED_SYMBOL(vmtruncate_range); /* June 2006 */ /* * Primitive swap readahead code. We simply read an aligned block of @@ -1934,6 +1935,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, migration_entry_wait(mm, pmd, address); goto out; } + delayacct_set_flag(DELAYACCT_PF_SWAPIN); page = lookup_swap_cache(entry); if (!page) { swapin_readahead(entry, address, vma); @@ -1946,6 +1948,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, page_table = pte_offset_map_lock(mm, pmd, address, &ptl); if (likely(pte_same(*page_table, orig_pte))) ret = VM_FAULT_OOM; + delayacct_clear_flag(DELAYACCT_PF_SWAPIN); goto unlock; } @@ -1955,6 +1958,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, grab_swap_token(); } + delayacct_clear_flag(DELAYACCT_PF_SWAPIN); mark_page_accessed(page); lock_page(page); diff --git a/mm/mmzone.c b/mm/mmzone.c index 0959ee1..febea1c 100644 --- a/mm/mmzone.c +++ b/mm/mmzone.c @@ -14,7 +14,7 @@ struct pglist_data *first_online_pgdat(void) return NODE_DATA(first_online_node); } -EXPORT_SYMBOL(first_online_pgdat); +EXPORT_UNUSED_SYMBOL(first_online_pgdat); /* June 2006 */ struct pglist_data *next_online_pgdat(struct pglist_data *pgdat) { @@ -24,7 +24,7 @@ struct pglist_data *next_online_pgdat(struct pglist_data *pgdat) return NULL; return NODE_DATA(nid); } -EXPORT_SYMBOL(next_online_pgdat); +EXPORT_UNUSED_SYMBOL(next_online_pgdat); /* June 2006 */ /* @@ -45,5 +45,5 @@ struct zone *next_zone(struct zone *zone) } return zone; } -EXPORT_SYMBOL(next_zone); +EXPORT_UNUSED_SYMBOL(next_zone); /* June 2006 */ @@ -1070,6 +1070,7 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long from, vma->vm_start = vma->vm_pgoff << PAGE_SHIFT; return 0; } +EXPORT_SYMBOL(remap_pfn_range); void swap_unplug_io_fn(struct backing_dev_info *bdi, struct page *page) { @@ -1090,6 +1091,7 @@ void unmap_mapping_range(struct address_space *mapping, int even_cows) { } +EXPORT_SYMBOL(unmap_mapping_range); /* * Check that a process has enough memory to allocate a new virtual @@ -674,6 +674,37 @@ static struct kmem_cache cache_cache = { #endif }; +#ifdef CONFIG_LOCKDEP + +/* + * Slab sometimes uses the kmalloc slabs to store the slab headers + * for other slabs "off slab". + * The locking for this is tricky in that it nests within the locks + * of all other slabs in a few places; to deal with this special + * locking we put on-slab caches into a separate lock-class. + */ +static struct lock_class_key on_slab_key; + +static inline void init_lock_keys(struct cache_sizes *s) +{ + int q; + + for (q = 0; q < MAX_NUMNODES; q++) { + if (!s->cs_cachep->nodelists[q] || OFF_SLAB(s->cs_cachep)) + continue; + lockdep_set_class(&s->cs_cachep->nodelists[q]->list_lock, + &on_slab_key); + } +} + +#else +static inline void init_lock_keys(struct cache_sizes *s) +{ +} +#endif + + + /* Guard access to the cache-chain. */ static DEFINE_MUTEX(cache_chain_mutex); static struct list_head cache_chain; @@ -1021,8 +1052,7 @@ static void drain_alien_cache(struct kmem_cache *cachep, } } -static inline int cache_free_alien(struct kmem_cache *cachep, void *objp, - int nesting) +static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) { struct slab *slabp = virt_to_slab(objp); int nodeid = slabp->nodeid; @@ -1040,7 +1070,7 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp, STATS_INC_NODEFREES(cachep); if (l3->alien && l3->alien[nodeid]) { alien = l3->alien[nodeid]; - spin_lock_nested(&alien->lock, nesting); + spin_lock(&alien->lock); if (unlikely(alien->avail == alien->limit)) { STATS_INC_ACOVERFLOW(cachep); __drain_alien_cache(cachep, alien, nodeid); @@ -1069,8 +1099,7 @@ static inline void free_alien_cache(struct array_cache **ac_ptr) { } -static inline int cache_free_alien(struct kmem_cache *cachep, void *objp, - int nesting) +static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) { return 0; } @@ -1393,6 +1422,7 @@ void __init kmem_cache_init(void) ARCH_KMALLOC_FLAGS|SLAB_PANIC, NULL, NULL); } + init_lock_keys(sizes); sizes->cs_dmacachep = kmem_cache_create(names->name_dma, sizes->cs_size, @@ -1760,8 +1790,6 @@ static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp) } #endif -static void __cache_free(struct kmem_cache *cachep, void *objp, int nesting); - /** * slab_destroy - destroy and release all objects in a slab * @cachep: cache pointer being destroyed @@ -1785,17 +1813,8 @@ static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp) call_rcu(&slab_rcu->head, kmem_rcu_free); } else { kmem_freepages(cachep, addr); - if (OFF_SLAB(cachep)) { - unsigned long flags; - - /* - * lockdep: we may nest inside an already held - * ac->lock, so pass in a nesting flag: - */ - local_irq_save(flags); - __cache_free(cachep->slabp_cache, slabp, 1); - local_irq_restore(flags); - } + if (OFF_SLAB(cachep)) + kmem_cache_free(cachep->slabp_cache, slabp); } } @@ -3100,16 +3119,7 @@ static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects, if (slabp->inuse == 0) { if (l3->free_objects > l3->free_limit) { l3->free_objects -= cachep->num; - /* - * It is safe to drop the lock. The slab is - * no longer linked to the cache. cachep - * cannot disappear - we are using it and - * all destruction of caches must be - * serialized properly by the user. - */ - spin_unlock(&l3->list_lock); slab_destroy(cachep, slabp); - spin_lock(&l3->list_lock); } else { list_add(&slabp->list, &l3->slabs_free); } @@ -3135,7 +3145,7 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac) #endif check_irq_off(); l3 = cachep->nodelists[node]; - spin_lock_nested(&l3->list_lock, SINGLE_DEPTH_NESTING); + spin_lock(&l3->list_lock); if (l3->shared) { struct array_cache *shared_array = l3->shared; int max = shared_array->limit - shared_array->avail; @@ -3178,14 +3188,14 @@ free_done: * Release an obj back to its cache. If the obj has a constructed state, it must * be in this state _before_ it is released. Called with disabled ints. */ -static void __cache_free(struct kmem_cache *cachep, void *objp, int nesting) +static inline void __cache_free(struct kmem_cache *cachep, void *objp) { struct array_cache *ac = cpu_cache_get(cachep); check_irq_off(); objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0)); - if (cache_free_alien(cachep, objp, nesting)) + if (cache_free_alien(cachep, objp)) return; if (likely(ac->avail < ac->limit)) { @@ -3424,7 +3434,7 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp) BUG_ON(virt_to_cache(objp) != cachep); local_irq_save(flags); - __cache_free(cachep, objp, 0); + __cache_free(cachep, objp); local_irq_restore(flags); } EXPORT_SYMBOL(kmem_cache_free); @@ -3449,7 +3459,7 @@ void kfree(const void *objp) kfree_debugcheck(objp); c = virt_to_cache(objp); debug_check_no_locks_freed(objp, obj_size(c)); - __cache_free(c, (void *)objp, 0); + __cache_free(c, (void *)objp); local_irq_restore(flags); } EXPORT_SYMBOL(kfree); diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 7b45079..266162d 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -340,7 +340,7 @@ void __vunmap(void *addr, int deallocate_pages) __free_page(area->pages[i]); } - if (area->nr_pages > PAGE_SIZE/sizeof(struct page *)) + if (area->flags & VM_VPAGES) vfree(area->pages); else kfree(area->pages); @@ -427,9 +427,10 @@ void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, area->nr_pages = nr_pages; /* Please note that the recursion is strictly bounded. */ - if (array_size > PAGE_SIZE) + if (array_size > PAGE_SIZE) { pages = __vmalloc_node(array_size, gfp_mask, PAGE_KERNEL, node); - else + area->flags |= VM_VPAGES; + } else pages = kmalloc_node(array_size, (gfp_mask & ~__GFP_HIGHMEM), node); area->pages = pages; if (!area->pages) { diff --git a/mm/vmstat.c b/mm/vmstat.c index 73b83d6..dfdf241 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -81,6 +81,7 @@ void all_vm_events(unsigned long *ret) { sum_vm_events(ret, &cpu_online_map); } +EXPORT_SYMBOL_GPL(all_vm_events); #ifdef CONFIG_HOTPLUG /* |