diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/hugetlb.c | 17 | ||||
-rw-r--r-- | mm/kmemleak.c | 52 | ||||
-rw-r--r-- | mm/memory.c | 34 | ||||
-rw-r--r-- | mm/nommu.c | 12 | ||||
-rw-r--r-- | mm/page_alloc.c | 10 | ||||
-rw-r--r-- | mm/shmem.c | 6 | ||||
-rw-r--r-- | mm/shmem_acl.c | 29 | ||||
-rw-r--r-- | mm/slub.c | 10 | ||||
-rw-r--r-- | mm/thrash.c | 32 | ||||
-rw-r--r-- | mm/vmscan.c | 2 |
10 files changed, 91 insertions, 113 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index a56e6f3..d0351e3 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1985,7 +1985,7 @@ static struct page *hugetlbfs_pagecache_page(struct hstate *h, } static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long address, pte_t *ptep, int write_access) + unsigned long address, pte_t *ptep, unsigned int flags) { struct hstate *h = hstate_vma(vma); int ret = VM_FAULT_SIGBUS; @@ -2053,7 +2053,7 @@ retry: * any allocations necessary to record that reservation occur outside * the spinlock. */ - if (write_access && !(vma->vm_flags & VM_SHARED)) + if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) if (vma_needs_reservation(h, vma, address) < 0) { ret = VM_FAULT_OOM; goto backout_unlocked; @@ -2072,7 +2072,7 @@ retry: && (vma->vm_flags & VM_SHARED))); set_huge_pte_at(mm, address, ptep, new_pte); - if (write_access && !(vma->vm_flags & VM_SHARED)) { + if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { /* Optimization, do the COW without a second fault */ ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page); } @@ -2091,7 +2091,7 @@ backout_unlocked: } int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long address, int write_access) + unsigned long address, unsigned int flags) { pte_t *ptep; pte_t entry; @@ -2112,7 +2112,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, mutex_lock(&hugetlb_instantiation_mutex); entry = huge_ptep_get(ptep); if (huge_pte_none(entry)) { - ret = hugetlb_no_page(mm, vma, address, ptep, write_access); + ret = hugetlb_no_page(mm, vma, address, ptep, flags); goto out_mutex; } @@ -2126,7 +2126,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, * page now as it is used to determine if a reservation has been * consumed. */ - if (write_access && !pte_write(entry)) { + if ((flags & FAULT_FLAG_WRITE) && !pte_write(entry)) { if (vma_needs_reservation(h, vma, address) < 0) { ret = VM_FAULT_OOM; goto out_mutex; @@ -2143,7 +2143,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, goto out_page_table_lock; - if (write_access) { + if (flags & FAULT_FLAG_WRITE) { if (!pte_write(entry)) { ret = hugetlb_cow(mm, vma, address, ptep, entry, pagecache_page); @@ -2152,7 +2152,8 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, entry = pte_mkdirty(entry); } entry = pte_mkyoung(entry); - if (huge_ptep_set_access_flags(vma, address, ptep, entry, write_access)) + if (huge_ptep_set_access_flags(vma, address, ptep, entry, + flags & FAULT_FLAG_WRITE)) update_mmu_cache(vma, address, entry); out_page_table_lock: diff --git a/mm/kmemleak.c b/mm/kmemleak.c index ec759b6..c96f2c8 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -61,6 +61,8 @@ * structure. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/init.h> #include <linux/kernel.h> #include <linux/list.h> @@ -311,7 +313,7 @@ static int unreferenced_object(struct kmemleak_object *object) static void print_referenced(struct kmemleak_object *object) { - pr_info("kmemleak: referenced object 0x%08lx (size %zu)\n", + pr_info("referenced object 0x%08lx (size %zu)\n", object->pointer, object->size); } @@ -320,7 +322,7 @@ static void print_unreferenced(struct seq_file *seq, { int i; - print_helper(seq, "kmemleak: unreferenced object 0x%08lx (size %zu):\n", + print_helper(seq, "unreferenced object 0x%08lx (size %zu):\n", object->pointer, object->size); print_helper(seq, " comm \"%s\", pid %d, jiffies %lu\n", object->comm, object->pid, object->jiffies); @@ -344,7 +346,7 @@ static void dump_object_info(struct kmemleak_object *object) trace.nr_entries = object->trace_len; trace.entries = object->trace; - pr_notice("kmemleak: Object 0x%08lx (size %zu):\n", + pr_notice("Object 0x%08lx (size %zu):\n", object->tree_node.start, object->size); pr_notice(" comm \"%s\", pid %d, jiffies %lu\n", object->comm, object->pid, object->jiffies); @@ -372,7 +374,7 @@ static struct kmemleak_object *lookup_object(unsigned long ptr, int alias) object = prio_tree_entry(node, struct kmemleak_object, tree_node); if (!alias && object->pointer != ptr) { - kmemleak_warn("kmemleak: Found object by alias"); + kmemleak_warn("Found object by alias"); object = NULL; } } else @@ -467,8 +469,7 @@ static void create_object(unsigned long ptr, size_t size, int min_count, object = kmem_cache_alloc(object_cache, gfp & GFP_KMEMLEAK_MASK); if (!object) { - kmemleak_stop("kmemleak: Cannot allocate a kmemleak_object " - "structure\n"); + kmemleak_stop("Cannot allocate a kmemleak_object structure\n"); return; } @@ -527,8 +528,8 @@ static void create_object(unsigned long ptr, size_t size, int min_count, if (node != &object->tree_node) { unsigned long flags; - kmemleak_stop("kmemleak: Cannot insert 0x%lx into the object " - "search tree (already existing)\n", ptr); + kmemleak_stop("Cannot insert 0x%lx into the object search tree " + "(already existing)\n", ptr); object = lookup_object(ptr, 1); spin_lock_irqsave(&object->lock, flags); dump_object_info(object); @@ -553,7 +554,7 @@ static void delete_object(unsigned long ptr) write_lock_irqsave(&kmemleak_lock, flags); object = lookup_object(ptr, 0); if (!object) { - kmemleak_warn("kmemleak: Freeing unknown object at 0x%08lx\n", + kmemleak_warn("Freeing unknown object at 0x%08lx\n", ptr); write_unlock_irqrestore(&kmemleak_lock, flags); return; @@ -588,8 +589,7 @@ static void make_gray_object(unsigned long ptr) object = find_and_get_object(ptr, 0); if (!object) { - kmemleak_warn("kmemleak: Graying unknown object at 0x%08lx\n", - ptr); + kmemleak_warn("Graying unknown object at 0x%08lx\n", ptr); return; } @@ -610,8 +610,7 @@ static void make_black_object(unsigned long ptr) object = find_and_get_object(ptr, 0); if (!object) { - kmemleak_warn("kmemleak: Blacking unknown object at 0x%08lx\n", - ptr); + kmemleak_warn("Blacking unknown object at 0x%08lx\n", ptr); return; } @@ -634,21 +633,20 @@ static void add_scan_area(unsigned long ptr, unsigned long offset, object = find_and_get_object(ptr, 0); if (!object) { - kmemleak_warn("kmemleak: Adding scan area to unknown " - "object at 0x%08lx\n", ptr); + kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n", + ptr); return; } area = kmem_cache_alloc(scan_area_cache, gfp & GFP_KMEMLEAK_MASK); if (!area) { - kmemleak_warn("kmemleak: Cannot allocate a scan area\n"); + kmemleak_warn("Cannot allocate a scan area\n"); goto out; } spin_lock_irqsave(&object->lock, flags); if (offset + length > object->size) { - kmemleak_warn("kmemleak: Scan area larger than object " - "0x%08lx\n", ptr); + kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr); dump_object_info(object); kmem_cache_free(scan_area_cache, area); goto out_unlock; @@ -677,8 +675,7 @@ static void object_no_scan(unsigned long ptr) object = find_and_get_object(ptr, 0); if (!object) { - kmemleak_warn("kmemleak: Not scanning unknown object at " - "0x%08lx\n", ptr); + kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr); return; } @@ -699,7 +696,7 @@ static void log_early(int op_type, const void *ptr, size_t size, struct early_log *log; if (crt_early_log >= ARRAY_SIZE(early_log)) { - kmemleak_stop("kmemleak: Early log buffer exceeded\n"); + kmemleak_stop("Early log buffer exceeded\n"); return; } @@ -966,7 +963,7 @@ static void kmemleak_scan(void) * 1 reference to any object at this point. */ if (atomic_read(&object->use_count) > 1) { - pr_debug("kmemleak: object->use_count = %d\n", + pr_debug("object->use_count = %d\n", atomic_read(&object->use_count)); dump_object_info(object); } @@ -1062,7 +1059,7 @@ static int kmemleak_scan_thread(void *arg) { static int first_run = 1; - pr_info("kmemleak: Automatic memory scanning thread started\n"); + pr_info("Automatic memory scanning thread started\n"); /* * Wait before the first scan to allow the system to fully initialize. @@ -1108,7 +1105,7 @@ static int kmemleak_scan_thread(void *arg) timeout = schedule_timeout_interruptible(timeout); } - pr_info("kmemleak: Automatic memory scanning thread ended\n"); + pr_info("Automatic memory scanning thread ended\n"); return 0; } @@ -1123,7 +1120,7 @@ void start_scan_thread(void) return; scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak"); if (IS_ERR(scan_thread)) { - pr_warning("kmemleak: Failed to create the scan thread\n"); + pr_warning("Failed to create the scan thread\n"); scan_thread = NULL; } } @@ -1367,7 +1364,7 @@ static void kmemleak_cleanup(void) cleanup_thread = kthread_run(kmemleak_cleanup_thread, NULL, "kmemleak-clean"); if (IS_ERR(cleanup_thread)) - pr_warning("kmemleak: Failed to create the clean-up thread\n"); + pr_warning("Failed to create the clean-up thread\n"); } /* @@ -1488,8 +1485,7 @@ static int __init kmemleak_late_init(void) dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL, &kmemleak_fops); if (!dentry) - pr_warning("kmemleak: Failed to create the debugfs kmemleak " - "file\n"); + pr_warning("Failed to create the debugfs kmemleak file\n"); mutex_lock(&kmemleak_mutex); start_scan_thread(); mutex_unlock(&kmemleak_mutex); diff --git a/mm/memory.c b/mm/memory.c index 98bcb90..6521619 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1207,8 +1207,8 @@ static inline int use_zero_page(struct vm_area_struct *vma) int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, - unsigned long start, int len, int flags, - struct page **pages, struct vm_area_struct **vmas) + unsigned long start, int nr_pages, int flags, + struct page **pages, struct vm_area_struct **vmas) { int i; unsigned int vm_flags = 0; @@ -1217,7 +1217,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, int ignore = !!(flags & GUP_FLAGS_IGNORE_VMA_PERMISSIONS); int ignore_sigkill = !!(flags & GUP_FLAGS_IGNORE_SIGKILL); - if (len <= 0) + if (nr_pages <= 0) return 0; /* * Require read or write permissions. @@ -1269,7 +1269,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, vmas[i] = gate_vma; i++; start += PAGE_SIZE; - len--; + nr_pages--; continue; } @@ -1280,7 +1280,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, if (is_vm_hugetlb_page(vma)) { i = follow_hugetlb_page(mm, vma, pages, vmas, - &start, &len, i, write); + &start, &nr_pages, i, write); continue; } @@ -1311,8 +1311,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, while (!(page = follow_page(vma, start, foll_flags))) { int ret; - /* FOLL_WRITE matches FAULT_FLAG_WRITE! */ - ret = handle_mm_fault(mm, vma, start, foll_flags & FOLL_WRITE); + ret = handle_mm_fault(mm, vma, start, + (foll_flags & FOLL_WRITE) ? + FAULT_FLAG_WRITE : 0); + if (ret & VM_FAULT_ERROR) { if (ret & VM_FAULT_OOM) return i ? i : -ENOMEM; @@ -1355,9 +1357,9 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, vmas[i] = vma; i++; start += PAGE_SIZE; - len--; - } while (len && start < vma->vm_end); - } while (len); + nr_pages--; + } while (nr_pages && start < vma->vm_end); + } while (nr_pages); return i; } @@ -1366,7 +1368,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, * @tsk: task_struct of target task * @mm: mm_struct of target mm * @start: starting user address - * @len: number of pages from start to pin + * @nr_pages: number of pages from start to pin * @write: whether pages will be written to by the caller * @force: whether to force write access even if user mapping is * readonly. This will result in the page being COWed even @@ -1378,7 +1380,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, * Or NULL if the caller does not require them. * * Returns number of pages pinned. This may be fewer than the number - * requested. If len is 0 or negative, returns 0. If no pages + * requested. If nr_pages is 0 or negative, returns 0. If no pages * were pinned, returns -errno. Each page returned must be released * with a put_page() call when it is finished with. vmas will only * remain valid while mmap_sem is held. @@ -1412,7 +1414,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, * See also get_user_pages_fast, for performance critical applications. */ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, - unsigned long start, int len, int write, int force, + unsigned long start, int nr_pages, int write, int force, struct page **pages, struct vm_area_struct **vmas) { int flags = 0; @@ -1422,9 +1424,7 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, if (force) flags |= GUP_FLAGS_FORCE; - return __get_user_pages(tsk, mm, - start, len, flags, - pages, vmas); + return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas); } EXPORT_SYMBOL(get_user_pages); @@ -2517,7 +2517,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, delayacct_set_flag(DELAYACCT_PF_SWAPIN); page = lookup_swap_cache(entry); if (!page) { - grab_swap_token(); /* Contend for token _before_ read-in */ + grab_swap_token(mm); /* Contend for token _before_ read-in */ page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE, vma, address); if (!page) { @@ -173,8 +173,8 @@ unsigned int kobjsize(const void *objp) } int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, - unsigned long start, int len, int flags, - struct page **pages, struct vm_area_struct **vmas) + unsigned long start, int nr_pages, int flags, + struct page **pages, struct vm_area_struct **vmas) { struct vm_area_struct *vma; unsigned long vm_flags; @@ -189,7 +189,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, vm_flags = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD); vm_flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE); - for (i = 0; i < len; i++) { + for (i = 0; i < nr_pages; i++) { vma = find_vma(mm, start); if (!vma) goto finish_or_fault; @@ -224,7 +224,7 @@ finish_or_fault: * - don't permit access to VMAs that don't support it, such as I/O mappings */ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, - unsigned long start, int len, int write, int force, + unsigned long start, int nr_pages, int write, int force, struct page **pages, struct vm_area_struct **vmas) { int flags = 0; @@ -234,9 +234,7 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, if (force) flags |= GUP_FLAGS_FORCE; - return __get_user_pages(tsk, mm, - start, len, flags, - pages, vmas); + return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas); } EXPORT_SYMBOL(get_user_pages); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 30d5093..5d714f8 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1153,10 +1153,10 @@ again: * properly detect and handle allocation failures. * * We most definitely don't want callers attempting to - * allocate greater than single-page units with + * allocate greater than order-1 page units with * __GFP_NOFAIL. */ - WARN_ON_ONCE(order > 0); + WARN_ON_ONCE(order > 1); } spin_lock_irqsave(&zone->lock, flags); page = __rmqueue(zone, order, migratetype); @@ -3026,7 +3026,7 @@ bad: if (dzone == zone) break; kfree(zone_pcp(dzone, cpu)); - zone_pcp(dzone, cpu) = NULL; + zone_pcp(dzone, cpu) = &boot_pageset[cpu]; } return -ENOMEM; } @@ -3041,7 +3041,7 @@ static inline void free_zone_pagesets(int cpu) /* Free per_cpu_pageset if it is slab allocated */ if (pset != &boot_pageset[cpu]) kfree(pset); - zone_pcp(zone, cpu) = NULL; + zone_pcp(zone, cpu) = &boot_pageset[cpu]; } } @@ -4659,7 +4659,7 @@ int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write, ret = proc_dointvec_minmax(table, write, file, buffer, length, ppos); if (!write || (ret == -EINVAL)) return ret; - for_each_zone(zone) { + for_each_populated_zone(zone) { for_each_online_cpu(cpu) { unsigned long high; high = zone->present_pages / percpu_pagelist_fraction; @@ -1558,6 +1558,7 @@ static struct inode *shmem_get_inode(struct super_block *sb, int mode, spin_lock_init(&info->lock); info->flags = flags & VM_NORESERVE; INIT_LIST_HEAD(&info->swaplist); + cache_no_acl(inode); switch (mode & S_IFMT) { default: @@ -2388,7 +2389,6 @@ static void shmem_destroy_inode(struct inode *inode) /* only struct inode is valid if it's an inline symlink */ mpol_free_shared_policy(&SHMEM_I(inode)->policy); } - shmem_acl_destroy_inode(inode); kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode)); } @@ -2397,10 +2397,6 @@ static void init_once(void *foo) struct shmem_inode_info *p = (struct shmem_inode_info *) foo; inode_init_once(&p->vfs_inode); -#ifdef CONFIG_TMPFS_POSIX_ACL - p->i_acl = NULL; - p->i_default_acl = NULL; -#endif } static int init_inodecache(void) diff --git a/mm/shmem_acl.c b/mm/shmem_acl.c index 8e5aadd..606a8e7 100644 --- a/mm/shmem_acl.c +++ b/mm/shmem_acl.c @@ -22,11 +22,11 @@ shmem_get_acl(struct inode *inode, int type) spin_lock(&inode->i_lock); switch(type) { case ACL_TYPE_ACCESS: - acl = posix_acl_dup(SHMEM_I(inode)->i_acl); + acl = posix_acl_dup(inode->i_acl); break; case ACL_TYPE_DEFAULT: - acl = posix_acl_dup(SHMEM_I(inode)->i_default_acl); + acl = posix_acl_dup(inode->i_default_acl); break; } spin_unlock(&inode->i_lock); @@ -45,13 +45,13 @@ shmem_set_acl(struct inode *inode, int type, struct posix_acl *acl) spin_lock(&inode->i_lock); switch(type) { case ACL_TYPE_ACCESS: - free = SHMEM_I(inode)->i_acl; - SHMEM_I(inode)->i_acl = posix_acl_dup(acl); + free = inode->i_acl; + inode->i_acl = posix_acl_dup(acl); break; case ACL_TYPE_DEFAULT: - free = SHMEM_I(inode)->i_default_acl; - SHMEM_I(inode)->i_default_acl = posix_acl_dup(acl); + free = inode->i_default_acl; + inode->i_default_acl = posix_acl_dup(acl); break; } spin_unlock(&inode->i_lock); @@ -155,23 +155,6 @@ shmem_acl_init(struct inode *inode, struct inode *dir) } /** - * shmem_acl_destroy_inode - destroy acls hanging off the in-memory inode - * - * This is done before destroying the actual inode. - */ - -void -shmem_acl_destroy_inode(struct inode *inode) -{ - if (SHMEM_I(inode)->i_acl) - posix_acl_release(SHMEM_I(inode)->i_acl); - SHMEM_I(inode)->i_acl = NULL; - if (SHMEM_I(inode)->i_default_acl) - posix_acl_release(SHMEM_I(inode)->i_default_acl); - SHMEM_I(inode)->i_default_acl = NULL; -} - -/** * shmem_check_acl - check_acl() callback for generic_permission() */ static int @@ -1085,11 +1085,17 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) { struct page *page; struct kmem_cache_order_objects oo = s->oo; + gfp_t alloc_gfp; flags |= s->allocflags; - page = alloc_slab_page(flags | __GFP_NOWARN | __GFP_NORETRY, node, - oo); + /* + * Let the initial higher-order allocation fail under memory pressure + * so we fall-back to the minimum order allocation. + */ + alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL; + + page = alloc_slab_page(alloc_gfp, node, oo); if (unlikely(!page)) { oo = s->min; /* diff --git a/mm/thrash.c b/mm/thrash.c index c4c5205..2372d4e 100644 --- a/mm/thrash.c +++ b/mm/thrash.c @@ -26,47 +26,45 @@ static DEFINE_SPINLOCK(swap_token_lock); struct mm_struct *swap_token_mm; static unsigned int global_faults; -void grab_swap_token(void) +void grab_swap_token(struct mm_struct *mm) { int current_interval; global_faults++; - current_interval = global_faults - current->mm->faultstamp; + current_interval = global_faults - mm->faultstamp; if (!spin_trylock(&swap_token_lock)) return; /* First come first served */ if (swap_token_mm == NULL) { - current->mm->token_priority = current->mm->token_priority + 2; - swap_token_mm = current->mm; + mm->token_priority = mm->token_priority + 2; + swap_token_mm = mm; goto out; } - if (current->mm != swap_token_mm) { - if (current_interval < current->mm->last_interval) - current->mm->token_priority++; + if (mm != swap_token_mm) { + if (current_interval < mm->last_interval) + mm->token_priority++; else { - if (likely(current->mm->token_priority > 0)) - current->mm->token_priority--; + if (likely(mm->token_priority > 0)) + mm->token_priority--; } /* Check if we deserve the token */ - if (current->mm->token_priority > - swap_token_mm->token_priority) { - current->mm->token_priority += 2; - swap_token_mm = current->mm; + if (mm->token_priority > swap_token_mm->token_priority) { + mm->token_priority += 2; + swap_token_mm = mm; } } else { /* Token holder came in again! */ - current->mm->token_priority += 2; + mm->token_priority += 2; } out: - current->mm->faultstamp = global_faults; - current->mm->last_interval = current_interval; + mm->faultstamp = global_faults; + mm->last_interval = current_interval; spin_unlock(&swap_token_lock); -return; } /* Called on process exit. */ diff --git a/mm/vmscan.c b/mm/vmscan.c index e8fa2d9..5415526 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -932,7 +932,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan, continue; if (__isolate_lru_page(cursor_page, mode, file) == 0) { list_move(&cursor_page->lru, dst); - mem_cgroup_del_lru(page); + mem_cgroup_del_lru(cursor_page); nr_taken++; scan++; } |