summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2011-12-23 17:13:56 -0500
committerDavid S. Miller <davem@davemloft.net>2011-12-23 17:13:56 -0500
commitabb434cb0539fb355c1c921f8fd761efbbac3462 (patch)
tree24a7d99ec161f8fd4dc9ff03c9c4cc93be883ce6 /mm
parent2494654d4890316e7340fb8b3458daad0474a1b9 (diff)
parent6350323ad8def2ac00d77cdee3b79c9b9fba75c4 (diff)
downloadop-kernel-dev-abb434cb0539fb355c1c921f8fd761efbbac3462.zip
op-kernel-dev-abb434cb0539fb355c1c921f8fd761efbbac3462.tar.gz
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: net/bluetooth/l2cap_core.c Just two overlapping changes, one added an initialization of a local variable, and another change added a new local variable. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c13
-rw-r--r--mm/huge_memory.c16
-rw-r--r--mm/hugetlb.c1
-rw-r--r--mm/memcontrol.c3
-rw-r--r--mm/migrate.c2
-rw-r--r--mm/oom_kill.c2
-rw-r--r--mm/page-writeback.c32
-rw-r--r--mm/page_alloc.c10
-rw-r--r--mm/percpu.c6
-rw-r--r--mm/vmalloc.c4
-rw-r--r--mm/vmscan.c26
11 files changed, 70 insertions, 45 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index c0018f2..5f0a3c9 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1828,7 +1828,7 @@ repeat:
page = __page_cache_alloc(gfp | __GFP_COLD);
if (!page)
return ERR_PTR(-ENOMEM);
- err = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL);
+ err = add_to_page_cache_lru(page, mapping, index, gfp);
if (unlikely(err)) {
page_cache_release(page);
if (err == -EEXIST)
@@ -1925,10 +1925,7 @@ static struct page *wait_on_page_read(struct page *page)
* @gfp: the page allocator flags to use if allocating
*
* This is the same as "read_mapping_page(mapping, index, NULL)", but with
- * any new page allocations done using the specified allocation flags. Note
- * that the Radix tree operations will still use GFP_KERNEL, so you can't
- * expect to do this atomically or anything like that - but you can pass in
- * other page requirements.
+ * any new page allocations done using the specified allocation flags.
*
* If the page does not get brought uptodate, return -EIO.
*/
@@ -2407,7 +2404,6 @@ static ssize_t generic_perform_write(struct file *file,
iov_iter_count(i));
again:
-
/*
* Bring in the user page that we will copy from _first_.
* Otherwise there's a nasty deadlock on copying from the
@@ -2463,7 +2459,10 @@ again:
written += copied;
balance_dirty_pages_ratelimited(mapping);
-
+ if (fatal_signal_pending(current)) {
+ status = -EINTR;
+ break;
+ }
} while (iov_iter_count(i));
return written ? written : status;
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 4298aba..36b3d98 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2259,12 +2259,8 @@ static void khugepaged_do_scan(struct page **hpage)
static void khugepaged_alloc_sleep(void)
{
- DEFINE_WAIT(wait);
- add_wait_queue(&khugepaged_wait, &wait);
- schedule_timeout_interruptible(
- msecs_to_jiffies(
- khugepaged_alloc_sleep_millisecs));
- remove_wait_queue(&khugepaged_wait, &wait);
+ wait_event_freezable_timeout(khugepaged_wait, false,
+ msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
}
#ifndef CONFIG_NUMA
@@ -2313,14 +2309,10 @@ static void khugepaged_loop(void)
if (unlikely(kthread_should_stop()))
break;
if (khugepaged_has_work()) {
- DEFINE_WAIT(wait);
if (!khugepaged_scan_sleep_millisecs)
continue;
- add_wait_queue(&khugepaged_wait, &wait);
- schedule_timeout_interruptible(
- msecs_to_jiffies(
- khugepaged_scan_sleep_millisecs));
- remove_wait_queue(&khugepaged_wait, &wait);
+ wait_event_freezable_timeout(khugepaged_wait, false,
+ msecs_to_jiffies(khugepaged_scan_sleep_millisecs));
} else if (khugepaged_enabled())
wait_event_freezable(khugepaged_wait,
khugepaged_wait_event());
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index bb28a5f..73f17c0 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -576,6 +576,7 @@ static void prep_compound_gigantic_page(struct page *page, unsigned long order)
__SetPageHead(page);
for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
__SetPageTail(p);
+ set_page_count(p, 0);
p->first_page = page;
}
}
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 8cdc915..94da8ee 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -4995,9 +4995,9 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
int cpu;
enable_swap_cgroup();
parent = NULL;
- root_mem_cgroup = memcg;
if (mem_cgroup_soft_limit_tree_init())
goto free_out;
+ root_mem_cgroup = memcg;
for_each_possible_cpu(cpu) {
struct memcg_stock_pcp *stock =
&per_cpu(memcg_stock, cpu);
@@ -5036,7 +5036,6 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
return &memcg->css;
free_out:
__mem_cgroup_free(memcg);
- root_mem_cgroup = NULL;
return ERR_PTR(error);
}
diff --git a/mm/migrate.c b/mm/migrate.c
index 578e291..177aca4 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -871,9 +871,9 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
if (anon_vma)
put_anon_vma(anon_vma);
-out:
unlock_page(hpage);
+out:
if (rc != -EAGAIN) {
list_del(&hpage->lru);
put_page(hpage);
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 76f2c5a..069b64e 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -176,7 +176,7 @@ static bool oom_unkillable_task(struct task_struct *p,
unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem,
const nodemask_t *nodemask, unsigned long totalpages)
{
- int points;
+ long points;
if (oom_unkillable_task(p, mem, nodemask))
return 0;
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 7125248..50f0824 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -411,8 +411,13 @@ void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
*
* Returns @bdi's dirty limit in pages. The term "dirty" in the context of
* dirty balancing includes all PG_dirty, PG_writeback and NFS unstable pages.
- * And the "limit" in the name is not seriously taken as hard limit in
- * balance_dirty_pages().
+ *
+ * Note that balance_dirty_pages() will only seriously take it as a hard limit
+ * when sleeping max_pause per page is not enough to keep the dirty pages under
+ * control. For example, when the device is completely stalled due to some error
+ * conditions, or when there are 1000 dd tasks writing to a slow 10MB/s USB key.
+ * In the other normal situations, it acts more gently by throttling the tasks
+ * more (rather than completely block them) when the bdi dirty pages go high.
*
* It allocates high/low dirty limits to fast/slow devices, in order to prevent
* - starving fast devices
@@ -594,6 +599,13 @@ static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
*/
if (unlikely(bdi_thresh > thresh))
bdi_thresh = thresh;
+ /*
+ * It's very possible that bdi_thresh is close to 0 not because the
+ * device is slow, but that it has remained inactive for long time.
+ * Honour such devices a reasonable good (hopefully IO efficient)
+ * threshold, so that the occasional writes won't be blocked and active
+ * writes can rampup the threshold quickly.
+ */
bdi_thresh = max(bdi_thresh, (limit - dirty) / 8);
/*
* scale global setpoint to bdi's:
@@ -977,8 +989,7 @@ static unsigned long bdi_max_pause(struct backing_dev_info *bdi,
*
* 8 serves as the safety ratio.
*/
- if (bdi_dirty)
- t = min(t, bdi_dirty * HZ / (8 * bw + 1));
+ t = min(t, bdi_dirty * HZ / (8 * bw + 1));
/*
* The pause time will be settled within range (max_pause/4, max_pause).
@@ -1136,6 +1147,19 @@ pause:
if (task_ratelimit)
break;
+ /*
+ * In the case of an unresponding NFS server and the NFS dirty
+ * pages exceeds dirty_thresh, give the other good bdi's a pipe
+ * to go through, so that tasks on them still remain responsive.
+ *
+ * In theory 1 page is enough to keep the comsumer-producer
+ * pipe going: the flusher cleans 1 page => the task dirties 1
+ * more page. However bdi_dirty has accounting errors. So use
+ * the larger and more IO friendly bdi_stat_error.
+ */
+ if (bdi_dirty <= bdi_stat_error(bdi))
+ break;
+
if (fatal_signal_pending(current))
break;
}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 9dd443d..2b8ba3a 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -356,8 +356,8 @@ void prep_compound_page(struct page *page, unsigned long order)
__SetPageHead(page);
for (i = 1; i < nr_pages; i++) {
struct page *p = page + i;
-
__SetPageTail(p);
+ set_page_count(p, 0);
p->first_page = page;
}
}
@@ -3377,9 +3377,15 @@ static void setup_zone_migrate_reserve(struct zone *zone)
unsigned long block_migratetype;
int reserve;
- /* Get the start pfn, end pfn and the number of blocks to reserve */
+ /*
+ * Get the start pfn, end pfn and the number of blocks to reserve
+ * We have to be careful to be aligned to pageblock_nr_pages to
+ * make sure that we always check pfn_valid for the first page in
+ * the block.
+ */
start_pfn = zone->zone_start_pfn;
end_pfn = start_pfn + zone->spanned_pages;
+ start_pfn = roundup(start_pfn, pageblock_nr_pages);
reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >>
pageblock_order;
diff --git a/mm/percpu.c b/mm/percpu.c
index 3bb810a..716eb4a 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1023,9 +1023,11 @@ phys_addr_t per_cpu_ptr_to_phys(void *addr)
if (!is_vmalloc_addr(addr))
return __pa(addr);
else
- return page_to_phys(vmalloc_to_page(addr));
+ return page_to_phys(vmalloc_to_page(addr)) +
+ offset_in_page(addr);
} else
- return page_to_phys(pcpu_addr_to_page(addr));
+ return page_to_phys(pcpu_addr_to_page(addr)) +
+ offset_in_page(addr);
}
/**
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 3231bf3..27be2f0 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1290,7 +1290,7 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
unsigned long align, unsigned long flags, unsigned long start,
unsigned long end, int node, gfp_t gfp_mask, void *caller)
{
- static struct vmap_area *va;
+ struct vmap_area *va;
struct vm_struct *area;
BUG_ON(in_interrupt());
@@ -1633,6 +1633,8 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
goto fail;
addr = __vmalloc_area_node(area, gfp_mask, prot, node, caller);
+ if (!addr)
+ return NULL;
/*
* In this function, newly allocated vm_struct is not added
diff --git a/mm/vmscan.c b/mm/vmscan.c
index a1893c0..f54a05b 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -183,7 +183,7 @@ static unsigned long zone_nr_lru_pages(struct zone *zone,
*/
void register_shrinker(struct shrinker *shrinker)
{
- shrinker->nr = 0;
+ atomic_long_set(&shrinker->nr_in_batch, 0);
down_write(&shrinker_rwsem);
list_add_tail(&shrinker->list, &shrinker_list);
up_write(&shrinker_rwsem);
@@ -247,25 +247,26 @@ unsigned long shrink_slab(struct shrink_control *shrink,
list_for_each_entry(shrinker, &shrinker_list, list) {
unsigned long long delta;
- unsigned long total_scan;
- unsigned long max_pass;
+ long total_scan;
+ long max_pass;
int shrink_ret = 0;
long nr;
long new_nr;
long batch_size = shrinker->batch ? shrinker->batch
: SHRINK_BATCH;
+ max_pass = do_shrinker_shrink(shrinker, shrink, 0);
+ if (max_pass <= 0)
+ continue;
+
/*
* copy the current shrinker scan count into a local variable
* and zero it so that other concurrent shrinker invocations
* don't also do this scanning work.
*/
- do {
- nr = shrinker->nr;
- } while (cmpxchg(&shrinker->nr, nr, 0) != nr);
+ nr = atomic_long_xchg(&shrinker->nr_in_batch, 0);
total_scan = nr;
- max_pass = do_shrinker_shrink(shrinker, shrink, 0);
delta = (4 * nr_pages_scanned) / shrinker->seeks;
delta *= max_pass;
do_div(delta, lru_pages + 1);
@@ -325,12 +326,11 @@ unsigned long shrink_slab(struct shrink_control *shrink,
* manner that handles concurrent updates. If we exhausted the
* scan, there is no need to do an update.
*/
- do {
- nr = shrinker->nr;
- new_nr = total_scan + nr;
- if (total_scan <= 0)
- break;
- } while (cmpxchg(&shrinker->nr, nr, new_nr) != nr);
+ if (total_scan > 0)
+ new_nr = atomic_long_add_return(total_scan,
+ &shrinker->nr_in_batch);
+ else
+ new_nr = atomic_long_read(&shrinker->nr_in_batch);
trace_mm_shrink_slab_end(shrinker, shrink_ret, nr, new_nr);
}
OpenPOWER on IntegriCloud