diff options
-rw-r--r-- | include/linux/mmzone.h | 8 | ||||
-rw-r--r-- | mm/page_alloc.c | 30 | ||||
-rw-r--r-- | mm/page_isolation.c | 26 |
3 files changed, 62 insertions, 2 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 98f079b..64b2c3a 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -478,6 +478,14 @@ struct zone { * rarely used fields: */ const char *name; +#ifdef CONFIG_MEMORY_ISOLATION + /* + * the number of MIGRATE_ISOLATE *pageblock*. + * We need this for free page counting. Look at zone_watermark_ok_safe. + * It's protected by zone->lock + */ + int nr_pageblock_isolate; +#endif } ____cacheline_internodealigned_in_smp; typedef enum { diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 2e66359..6a29ed8 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -218,6 +218,11 @@ EXPORT_SYMBOL(nr_online_nodes); int page_group_by_mobility_disabled __read_mostly; +/* + * NOTE: + * Don't use set_pageblock_migratetype(page, MIGRATE_ISOLATE) directly. + * Instead, use {un}set_pageblock_isolate. + */ void set_pageblock_migratetype(struct page *page, int migratetype) { @@ -1619,6 +1624,20 @@ static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark, return true; } +#ifdef CONFIG_MEMORY_ISOLATION +static inline unsigned long nr_zone_isolate_freepages(struct zone *zone) +{ + if (unlikely(zone->nr_pageblock_isolate)) + return zone->nr_pageblock_isolate * pageblock_nr_pages; + return 0; +} +#else +static inline unsigned long nr_zone_isolate_freepages(struct zone *zone) +{ + return 0; +} +#endif + bool zone_watermark_ok(struct zone *z, int order, unsigned long mark, int classzone_idx, int alloc_flags) { @@ -1634,6 +1653,14 @@ bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark, if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark) free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES); + /* + * If the zone has MIGRATE_ISOLATE type free pages, we should consider + * it. nr_zone_isolate_freepages is never accurate so kswapd might not + * sleep although it could do so. But this is more desirable for memory + * hotplug than sleeping which can cause a livelock in the direct + * reclaim path. + */ + free_pages -= nr_zone_isolate_freepages(z); return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags, free_pages); } @@ -4398,6 +4425,9 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat, lruvec_init(&zone->lruvec, zone); zap_zone_vm_stats(zone); zone->flags = 0; +#ifdef CONFIG_MEMORY_ISOLATION + zone->nr_pageblock_isolate = 0; +#endif if (!size) continue; diff --git a/mm/page_isolation.c b/mm/page_isolation.c index fb482cf..247d1f1 100644 --- a/mm/page_isolation.c +++ b/mm/page_isolation.c @@ -8,6 +8,28 @@ #include <linux/memory.h> #include "internal.h" +/* called while holding zone->lock */ +static void set_pageblock_isolate(struct page *page) +{ + if (get_pageblock_migratetype(page) == MIGRATE_ISOLATE) + return; + + set_pageblock_migratetype(page, MIGRATE_ISOLATE); + page_zone(page)->nr_pageblock_isolate++; +} + +/* called while holding zone->lock */ +static void restore_pageblock_isolate(struct page *page, int migratetype) +{ + struct zone *zone = page_zone(page); + if (WARN_ON(get_pageblock_migratetype(page) != MIGRATE_ISOLATE)) + return; + + BUG_ON(zone->nr_pageblock_isolate <= 0); + set_pageblock_migratetype(page, migratetype); + zone->nr_pageblock_isolate--; +} + int set_migratetype_isolate(struct page *page) { struct zone *zone; @@ -54,7 +76,7 @@ int set_migratetype_isolate(struct page *page) out: if (!ret) { - set_pageblock_migratetype(page, MIGRATE_ISOLATE); + set_pageblock_isolate(page); move_freepages_block(zone, page, MIGRATE_ISOLATE); } @@ -72,8 +94,8 @@ void unset_migratetype_isolate(struct page *page, unsigned migratetype) spin_lock_irqsave(&zone->lock, flags); if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE) goto out; - set_pageblock_migratetype(page, migratetype); move_freepages_block(zone, page, migratetype); + restore_pageblock_isolate(page, migratetype); out: spin_unlock_irqrestore(&zone->lock, flags); } |