diff options
author | Hugh Dickins <hugh@veritas.com> | 2005-10-29 18:16:41 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-10-29 21:40:42 -0700 |
commit | b8072f099b7829a6ff3eba618e1d079a81f753f8 (patch) | |
tree | 80bf801b68ecf5f29a61f0f4fd5976b4daa91c6a | |
parent | f412ac08c9861b4791af0145934c22f1458686da (diff) | |
download | op-kernel-dev-b8072f099b7829a6ff3eba618e1d079a81f753f8.zip op-kernel-dev-b8072f099b7829a6ff3eba618e1d079a81f753f8.tar.gz |
[PATCH] mm: update comments to pte lock
Updated several references to page_table_lock in common code comments.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | include/asm-generic/pgtable.h | 2 | ||||
-rw-r--r-- | include/linux/mempolicy.h | 3 | ||||
-rw-r--r-- | mm/filemap.c | 6 | ||||
-rw-r--r-- | mm/rmap.c | 10 | ||||
-rw-r--r-- | mm/swap_state.c | 3 |
5 files changed, 11 insertions, 13 deletions
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index ff28c8b..7dca30a 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -8,7 +8,7 @@ * - update the page tables * - inform the TLB about the new one * - * We hold the mm semaphore for reading and vma->vm_mm->page_table_lock. + * We hold the mm semaphore for reading, and the pte lock. * * Note: the old pte is known to not be writable, so we don't need to * worry about dirty bits etc getting lost. diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index 38e60a0..7af8cb8 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h @@ -47,8 +47,7 @@ struct vm_area_struct; * Locking policy for interlave: * In process context there is no locking because only the process accesses * its own state. All vma manipulation is somewhat protected by a down_read on - * mmap_sem. For allocating in the interleave policy the page_table_lock - * must be also aquired to protect il_next. + * mmap_sem. * * Freeing policy: * When policy is MPOL_BIND v.zonelist is kmalloc'ed and must be kfree'd. diff --git a/mm/filemap.c b/mm/filemap.c index f560b41..036599d 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -66,7 +66,7 @@ generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, * * ->mmap_sem * ->i_mmap_lock - * ->page_table_lock (various places, mainly in mmap.c) + * ->page_table_lock or pte_lock (various, mainly in memory.c) * ->mapping->tree_lock (arch-dependent flush_dcache_mmap_lock) * * ->mmap_sem @@ -86,9 +86,9 @@ generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, * ->anon_vma.lock (vma_adjust) * * ->anon_vma.lock - * ->page_table_lock (anon_vma_prepare and various) + * ->page_table_lock or pte_lock (anon_vma_prepare and various) * - * ->page_table_lock + * ->page_table_lock or pte_lock * ->swap_lock (try_to_unmap_one) * ->private_lock (try_to_unmap_one) * ->tree_lock (try_to_unmap_one) @@ -32,7 +32,7 @@ * page->flags PG_locked (lock_page) * mapping->i_mmap_lock * anon_vma->lock - * mm->page_table_lock + * mm->page_table_lock or pte_lock * zone->lru_lock (in mark_page_accessed) * swap_lock (in swap_duplicate, swap_info_get) * mmlist_lock (in mmput, drain_mmlist and others) @@ -244,7 +244,7 @@ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) /* * Check that @page is mapped at @address into @mm. * - * On success returns with mapped pte and locked mm->page_table_lock. + * On success returns with pte mapped and locked. */ pte_t *page_check_address(struct page *page, struct mm_struct *mm, unsigned long address, spinlock_t **ptlp) @@ -445,7 +445,7 @@ int page_referenced(struct page *page, int is_locked, int ignore_token) * @vma: the vm area in which the mapping is added * @address: the user virtual address mapped * - * The caller needs to hold the mm->page_table_lock. + * The caller needs to hold the pte lock. */ void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) @@ -468,7 +468,7 @@ void page_add_anon_rmap(struct page *page, * page_add_file_rmap - add pte mapping to a file page * @page: the page to add the mapping to * - * The caller needs to hold the mm->page_table_lock. + * The caller needs to hold the pte lock. */ void page_add_file_rmap(struct page *page) { @@ -483,7 +483,7 @@ void page_add_file_rmap(struct page *page) * page_remove_rmap - take down pte mapping from a page * @page: page to remove mapping from * - * Caller needs to hold the mm->page_table_lock. + * The caller needs to hold the pte lock. */ void page_remove_rmap(struct page *page) { diff --git a/mm/swap_state.c b/mm/swap_state.c index cafc1ed..dfd9a46 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -259,8 +259,7 @@ static inline void free_swap_cache(struct page *page) /* * Perform a free_page(), also freeing any swap cache associated with - * this page if it is the last user of the page. Can not do a lock_page, - * as we are holding the page_table_lock spinlock. + * this page if it is the last user of the page. */ void free_page_and_swap_cache(struct page *page) { |