From 347ce434d57da80fd5809c0c836f206a50999c26 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Fri, 30 Jun 2006 01:55:35 -0700 Subject: [PATCH] zoned vm counters: conversion of nr_pagecache to per zone counter Currently a single atomic variable is used to establish the size of the page cache in the whole machine. The zoned VM counters have the same method of implementation as the nr_pagecache code but also allow the determination of the pagecache size per zone. Remove the special implementation for nr_pagecache and make it a zoned counter named NR_FILE_PAGES. Updates of the page cache counters are always performed with interrupts off. We can therefore use the __ variant here. Signed-off-by: Christoph Lameter Cc: Trond Myklebust Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mmzone.h | 2 +- include/linux/pagemap.h | 45 --------------------------------------------- 2 files changed, 1 insertion(+), 46 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index eb42c12..08be91e 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -49,7 +49,7 @@ struct zone_padding { enum zone_stat_item { NR_FILE_MAPPED, /* mapped into pagetables. only modified from process context */ - + NR_FILE_PAGES, NR_VM_ZONE_STAT_ITEMS }; struct per_cpu_pages { diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 1245df7..0a2f5d2 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -113,51 +113,6 @@ int add_to_page_cache_lru(struct page *page, struct address_space *mapping, extern void remove_from_page_cache(struct page *page); extern void __remove_from_page_cache(struct page *page); -extern atomic_t nr_pagecache; - -#ifdef CONFIG_SMP - -#define PAGECACHE_ACCT_THRESHOLD max(16, NR_CPUS * 2) -DECLARE_PER_CPU(long, nr_pagecache_local); - -/* - * pagecache_acct implements approximate accounting for pagecache. - * vm_enough_memory() do not need high accuracy. Writers will keep - * an offset in their per-cpu arena and will spill that into the - * global count whenever the absolute value of the local count - * exceeds the counter's threshold. - * - * MUST be protected from preemption. - * current protection is mapping->page_lock. - */ -static inline void pagecache_acct(int count) -{ - long *local; - - local = &__get_cpu_var(nr_pagecache_local); - *local += count; - if (*local > PAGECACHE_ACCT_THRESHOLD || *local < -PAGECACHE_ACCT_THRESHOLD) { - atomic_add(*local, &nr_pagecache); - *local = 0; - } -} - -#else - -static inline void pagecache_acct(int count) -{ - atomic_add(count, &nr_pagecache); -} -#endif - -static inline unsigned long get_page_cache_size(void) -{ - int ret = atomic_read(&nr_pagecache); - if (unlikely(ret < 0)) - ret = 0; - return ret; -} - /* * Return byte-offset into filesystem object for page. */ -- cgit v1.1