summaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c24
1 files changed, 15 insertions, 9 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 1b1c39e6..6877e22 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -525,32 +525,38 @@ static void free_pcppages_bulk(struct zone *zone, int count,
struct per_cpu_pages *pcp)
{
int migratetype = 0;
+ int batch_free = 0;
spin_lock(&zone->lock);
zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
zone->pages_scanned = 0;
__mod_zone_page_state(zone, NR_FREE_PAGES, count);
- while (count--) {
+ while (count) {
struct page *page;
struct list_head *list;
/*
- * Remove pages from lists in a round-robin fashion. This spinning
- * around potentially empty lists is bloody awful, alternatives that
- * don't suck are welcome
+ * Remove pages from lists in a round-robin fashion. A
+ * batch_free count is maintained that is incremented when an
+ * empty list is encountered. This is so more pages are freed
+ * off fuller lists instead of spinning excessively around empty
+ * lists
*/
do {
+ batch_free++;
if (++migratetype == MIGRATE_PCPTYPES)
migratetype = 0;
list = &pcp->lists[migratetype];
} while (list_empty(list));
- page = list_entry(list->prev, struct page, lru);
- /* have to delete it as __free_one_page list manipulates */
- list_del(&page->lru);
- trace_mm_page_pcpu_drain(page, 0, migratetype);
- __free_one_page(page, zone, 0, migratetype);
+ do {
+ page = list_entry(list->prev, struct page, lru);
+ /* must delete as __free_one_page list manipulates */
+ list_del(&page->lru);
+ __free_one_page(page, zone, 0, migratetype);
+ trace_mm_page_pcpu_drain(page, 0, migratetype);
+ } while (--count && --batch_free && !list_empty(list));
}
spin_unlock(&zone->lock);
}
OpenPOWER on IntegriCloud