diff options
-rw-r--r-- | fs/buffer.c | 2 | ||||
-rw-r--r-- | fs/mpage.c | 2 | ||||
-rw-r--r-- | include/linux/mmzone.h | 2 | ||||
-rw-r--r-- | kernel/sched_cpupri.c | 2 |
4 files changed, 4 insertions, 4 deletions
diff --git a/fs/buffer.c b/fs/buffer.c index 6fa5302..1d920ba 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -2893,7 +2893,7 @@ int block_write_full_page_endio(struct page *page, get_block_t *get_block, /* * The page straddles i_size. It must be zeroed out on each and every - * writepage invokation because it may be mmapped. "A file is mapped + * writepage invocation because it may be mmapped. "A file is mapped * in multiples of the page size. For a file that is not a multiple of * the page size, the remaining memory is zeroed when mapped, and * writes to that region are not written out to the file." @@ -561,7 +561,7 @@ page_is_mapped: if (page->index >= end_index) { /* * The page straddles i_size. It must be zeroed out on each - * and every writepage invokation because it may be mmapped. + * and every writepage invocation because it may be mmapped. * "A file is mapped in multiples of the page size. For a file * that is not a multiple of the page size, the remaining memory * is zeroed when mapped, and writes to that region are not diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 30fe668..e60a340 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -349,7 +349,7 @@ struct zone { * prev_priority holds the scanning priority for this zone. It is * defined as the scanning priority at which we achieved our reclaim * target at the previous try_to_free_pages() or balance_pgdat() - * invokation. + * invocation. * * We use prev_priority as a measure of how much stress page reclaim is * under - it drives the swappiness decision: whether to unmap mapped diff --git a/kernel/sched_cpupri.c b/kernel/sched_cpupri.c index 597b330..3db4b1a 100644 --- a/kernel/sched_cpupri.c +++ b/kernel/sched_cpupri.c @@ -58,7 +58,7 @@ static int convert_prio(int prio) * @lowest_mask: A mask to fill in with selected CPUs (or NULL) * * Note: This function returns the recommended CPUs as calculated during the - * current invokation. By the time the call returns, the CPUs may have in + * current invocation. By the time the call returns, the CPUs may have in * fact changed priorities any number of times. While not ideal, it is not * an issue of correctness since the normal rebalancer logic will correct * any discrepancies created by racing against the uncertainty of the current |