summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2006-01-08 01:00:52 -0800
committerLinus Torvalds <torvalds@g5.osdl.org>2006-01-08 20:12:42 -0800
commit8419c3181086c86664e8246bc997afc2e4ffba4f (patch)
tree25938e6f99bdaaffe8f6d357582eca56692b091c
parent39743889aaf76725152f16aa90ca3c45f6d52da3 (diff)
downloadop-kernel-dev-8419c3181086c86664e8246bc997afc2e4ffba4f.zip
op-kernel-dev-8419c3181086c86664e8246bc997afc2e4ffba4f.tar.gz
[PATCH] SwapMig: CONFIG_MIGRATION fixes
Move move_to_lru, putback_lru_pages and isolate_lru in section surrounded by CONFIG_MIGRATION saving some codesize for single processor kernels. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--include/linux/swap.h3
-rw-r--r--mm/vmscan.c152
2 files changed, 77 insertions, 78 deletions
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 117add0..997d838 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -175,10 +175,9 @@ extern int try_to_free_pages(struct zone **, gfp_t);
extern int shrink_all_memory(int);
extern int vm_swappiness;
+#ifdef CONFIG_MIGRATION
extern int isolate_lru_page(struct page *p);
extern int putback_lru_pages(struct list_head *l);
-
-#ifdef CONFIG_MIGRATION
extern int migrate_pages(struct list_head *l, struct list_head *t);
#endif
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 58270ae..daed4a7 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -569,6 +569,40 @@ keep:
}
#ifdef CONFIG_MIGRATION
+static inline void move_to_lru(struct page *page)
+{
+ list_del(&page->lru);
+ if (PageActive(page)) {
+ /*
+ * lru_cache_add_active checks that
+ * the PG_active bit is off.
+ */
+ ClearPageActive(page);
+ lru_cache_add_active(page);
+ } else {
+ lru_cache_add(page);
+ }
+ put_page(page);
+}
+
+/*
+ * Add isolated pages on the list back to the LRU
+ *
+ * returns the number of pages put back.
+ */
+int putback_lru_pages(struct list_head *l)
+{
+ struct page *page;
+ struct page *page2;
+ int count = 0;
+
+ list_for_each_entry_safe(page, page2, l, lru) {
+ move_to_lru(page);
+ count++;
+ }
+ return count;
+}
+
/*
* swapout a single page
* page is locked upon entry, unlocked on exit
@@ -709,6 +743,48 @@ retry_later:
return nr_failed + retry;
}
+
+static void lru_add_drain_per_cpu(void *dummy)
+{
+ lru_add_drain();
+}
+
+/*
+ * Isolate one page from the LRU lists and put it on the
+ * indicated list. Do necessary cache draining if the
+ * page is not on the LRU lists yet.
+ *
+ * Result:
+ * 0 = page not on LRU list
+ * 1 = page removed from LRU list and added to the specified list.
+ * -ENOENT = page is being freed elsewhere.
+ */
+int isolate_lru_page(struct page *page)
+{
+ int rc = 0;
+ struct zone *zone = page_zone(page);
+
+redo:
+ spin_lock_irq(&zone->lru_lock);
+ rc = __isolate_lru_page(page);
+ if (rc == 1) {
+ if (PageActive(page))
+ del_page_from_active_list(zone, page);
+ else
+ del_page_from_inactive_list(zone, page);
+ }
+ spin_unlock_irq(&zone->lru_lock);
+ if (rc == 0) {
+ /*
+ * Maybe this page is still waiting for a cpu to drain it
+ * from one of the lru lists?
+ */
+ rc = schedule_on_each_cpu(lru_add_drain_per_cpu, NULL);
+ if (rc == 0 && PageLRU(page))
+ goto redo;
+ }
+ return rc;
+}
#endif
/*
@@ -758,48 +834,6 @@ static int isolate_lru_pages(int nr_to_scan, struct list_head *src,
return nr_taken;
}
-static void lru_add_drain_per_cpu(void *dummy)
-{
- lru_add_drain();
-}
-
-/*
- * Isolate one page from the LRU lists and put it on the
- * indicated list. Do necessary cache draining if the
- * page is not on the LRU lists yet.
- *
- * Result:
- * 0 = page not on LRU list
- * 1 = page removed from LRU list and added to the specified list.
- * -ENOENT = page is being freed elsewhere.
- */
-int isolate_lru_page(struct page *page)
-{
- int rc = 0;
- struct zone *zone = page_zone(page);
-
-redo:
- spin_lock_irq(&zone->lru_lock);
- rc = __isolate_lru_page(page);
- if (rc == 1) {
- if (PageActive(page))
- del_page_from_active_list(zone, page);
- else
- del_page_from_inactive_list(zone, page);
- }
- spin_unlock_irq(&zone->lru_lock);
- if (rc == 0) {
- /*
- * Maybe this page is still waiting for a cpu to drain it
- * from one of the lru lists?
- */
- rc = schedule_on_each_cpu(lru_add_drain_per_cpu, NULL);
- if (rc == 0 && PageLRU(page))
- goto redo;
- }
- return rc;
-}
-
/*
* shrink_cache() adds the number of pages reclaimed to sc->nr_reclaimed
*/
@@ -865,40 +899,6 @@ done:
pagevec_release(&pvec);
}
-static inline void move_to_lru(struct page *page)
-{
- list_del(&page->lru);
- if (PageActive(page)) {
- /*
- * lru_cache_add_active checks that
- * the PG_active bit is off.
- */
- ClearPageActive(page);
- lru_cache_add_active(page);
- } else {
- lru_cache_add(page);
- }
- put_page(page);
-}
-
-/*
- * Add isolated pages on the list back to the LRU
- *
- * returns the number of pages put back.
- */
-int putback_lru_pages(struct list_head *l)
-{
- struct page *page;
- struct page *page2;
- int count = 0;
-
- list_for_each_entry_safe(page, page2, l, lru) {
- move_to_lru(page);
- count++;
- }
- return count;
-}
-
/*
* This moves pages from the active list to the inactive list.
*
OpenPOWER on IntegriCloud