diff options
Diffstat (limited to 'arch/sh64')
-rw-r--r-- | arch/sh64/kernel/ptrace.c | 2 | ||||
-rw-r--r-- | arch/sh64/kernel/time.c | 3 | ||||
-rw-r--r-- | arch/sh64/mm/cache.c | 68 | ||||
-rw-r--r-- | arch/sh64/mm/hugetlbpage.c | 188 | ||||
-rw-r--r-- | arch/sh64/mm/ioremap.c | 4 |
5 files changed, 45 insertions, 220 deletions
diff --git a/arch/sh64/kernel/ptrace.c b/arch/sh64/kernel/ptrace.c index fd20009..71f2eec 100644 --- a/arch/sh64/kernel/ptrace.c +++ b/arch/sh64/kernel/ptrace.c @@ -121,7 +121,7 @@ put_fpu_long(struct task_struct *task, unsigned long addr, unsigned long data) return 0; } -asmlinkage int sys_ptrace(long request, long pid, long addr, long data) +asmlinkage long sys_ptrace(long request, long pid, long addr, long data) { struct task_struct *child; extern void poke_real_address_q(unsigned long long addr, unsigned long long data); diff --git a/arch/sh64/kernel/time.c b/arch/sh64/kernel/time.c index f4a62a1..870fe53 100644 --- a/arch/sh64/kernel/time.c +++ b/arch/sh64/kernel/time.c @@ -116,8 +116,6 @@ extern unsigned long wall_jiffies; -u64 jiffies_64 = INITIAL_JIFFIES; - static unsigned long tmu_base, rtc_base; unsigned long cprc_base; @@ -253,6 +251,7 @@ int do_settimeofday(struct timespec *tv) return 0; } +EXPORT_SYMBOL(do_settimeofday); static int set_rtc_time(unsigned long nowtime) { diff --git a/arch/sh64/mm/cache.c b/arch/sh64/mm/cache.c index 3b87e25..c0c1b21 100644 --- a/arch/sh64/mm/cache.c +++ b/arch/sh64/mm/cache.c @@ -584,32 +584,36 @@ static void sh64_dcache_purge_phy_page(unsigned long paddr) } } -static void sh64_dcache_purge_user_page(struct mm_struct *mm, unsigned long eaddr) +static void sh64_dcache_purge_user_pages(struct mm_struct *mm, + unsigned long addr, unsigned long end) { pgd_t *pgd; pmd_t *pmd; pte_t *pte; pte_t entry; + spinlock_t *ptl; unsigned long paddr; - /* NOTE : all the callers of this have mm->page_table_lock held, so the - following page table traversal is safe even on SMP/pre-emptible. */ - - if (!mm) return; /* No way to find physical address of page */ - pgd = pgd_offset(mm, eaddr); - if (pgd_bad(*pgd)) return; - - pmd = pmd_offset(pgd, eaddr); - if (pmd_none(*pmd) || pmd_bad(*pmd)) return; - - pte = pte_offset_kernel(pmd, eaddr); - entry = *pte; - if (pte_none(entry) || !pte_present(entry)) return; - - paddr = pte_val(entry) & PAGE_MASK; - - sh64_dcache_purge_coloured_phy_page(paddr, eaddr); - + if (!mm) + return; /* No way to find physical address of page */ + + pgd = pgd_offset(mm, addr); + if (pgd_bad(*pgd)) + return; + + pmd = pmd_offset(pgd, addr); + if (pmd_none(*pmd) || pmd_bad(*pmd)) + return; + + pte = pte_offset_map_lock(mm, pmd, addr, &ptl); + do { + entry = *pte; + if (pte_none(entry) || !pte_present(entry)) + continue; + paddr = pte_val(entry) & PAGE_MASK; + sh64_dcache_purge_coloured_phy_page(paddr, addr); + } while (pte++, addr += PAGE_SIZE, addr != end); + pte_unmap_unlock(pte - 1, ptl); } /****************************************************************************/ @@ -668,7 +672,7 @@ static void sh64_dcache_purge_user_range(struct mm_struct *mm, int n_pages; n_pages = ((end - start) >> PAGE_SHIFT); - if (n_pages >= 64) { + if (n_pages >= 64 || ((start ^ (end - 1)) & PMD_MASK)) { #if 1 sh64_dcache_purge_all(); #else @@ -707,20 +711,10 @@ static void sh64_dcache_purge_user_range(struct mm_struct *mm, } #endif } else { - /* 'Small' range */ - unsigned long aligned_start; - unsigned long eaddr; - unsigned long last_page_start; - - aligned_start = start & PAGE_MASK; - /* 'end' is 1 byte beyond the end of the range */ - last_page_start = (end - 1) & PAGE_MASK; - - eaddr = aligned_start; - while (eaddr <= last_page_start) { - sh64_dcache_purge_user_page(mm, eaddr); - eaddr += PAGE_SIZE; - } + /* Small range, covered by a single page table page */ + start &= PAGE_MASK; /* should already be so */ + end = PAGE_ALIGN(end); /* should already be so */ + sh64_dcache_purge_user_pages(mm, start, end); } return; } @@ -880,9 +874,7 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start, addresses from the user address space specified by mm, after writing back any dirty data. - Note(1), 'end' is 1 byte beyond the end of the range to flush. - - Note(2), this is called with mm->page_table_lock held.*/ + Note, 'end' is 1 byte beyond the end of the range to flush. */ sh64_dcache_purge_user_range(mm, start, end); sh64_icache_inv_user_page_range(mm, start, end); @@ -898,7 +890,7 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long eaddr, unsigned the I-cache must be searched too in case the page in question is both writable and being executed from (e.g. stack trampolines.) - Note(1), this is called with mm->page_table_lock held. + Note, this is called with pte lock held. */ sh64_dcache_purge_phy_page(pfn << PAGE_SHIFT); diff --git a/arch/sh64/mm/hugetlbpage.c b/arch/sh64/mm/hugetlbpage.c index dcd9c8a..ed6a505 100644 --- a/arch/sh64/mm/hugetlbpage.c +++ b/arch/sh64/mm/hugetlbpage.c @@ -54,41 +54,31 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) return pte; } -#define mk_pte_huge(entry) do { pte_val(entry) |= _PAGE_SZHUGE; } while (0) - -static void set_huge_pte(struct mm_struct *mm, struct vm_area_struct *vma, - struct page *page, pte_t * page_table, int write_access) +void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t entry) { - unsigned long i; - pte_t entry; - - add_mm_counter(mm, rss, HPAGE_SIZE / PAGE_SIZE); - - if (write_access) - entry = pte_mkwrite(pte_mkdirty(mk_pte(page, - vma->vm_page_prot))); - else - entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot)); - entry = pte_mkyoung(entry); - mk_pte_huge(entry); + int i; for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) { - set_pte(page_table, entry); - page_table++; - + set_pte_at(mm, addr, ptep, entry); + ptep++; + addr += PAGE_SIZE; pte_val(entry) += PAGE_SIZE; } } -pte_t huge_ptep_get_and_clear(pte_t *ptep) +pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, + pte_t *ptep) { pte_t entry; + int i; entry = *ptep; for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) { - pte_clear(pte); - pte++; + pte_clear(mm, addr, ptep); + addr += PAGE_SIZE; + ptep++; } return entry; @@ -106,79 +96,6 @@ int is_aligned_hugepage_range(unsigned long addr, unsigned long len) return 0; } -int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, - struct vm_area_struct *vma) -{ - pte_t *src_pte, *dst_pte, entry; - struct page *ptepage; - unsigned long addr = vma->vm_start; - unsigned long end = vma->vm_end; - int i; - - while (addr < end) { - dst_pte = huge_pte_alloc(dst, addr); - if (!dst_pte) - goto nomem; - src_pte = huge_pte_offset(src, addr); - BUG_ON(!src_pte || pte_none(*src_pte)); - entry = *src_pte; - ptepage = pte_page(entry); - get_page(ptepage); - for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) { - set_pte(dst_pte, entry); - pte_val(entry) += PAGE_SIZE; - dst_pte++; - } - add_mm_counter(dst, rss, HPAGE_SIZE / PAGE_SIZE); - addr += HPAGE_SIZE; - } - return 0; - -nomem: - return -ENOMEM; -} - -int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, - struct page **pages, struct vm_area_struct **vmas, - unsigned long *position, int *length, int i) -{ - unsigned long vaddr = *position; - int remainder = *length; - - WARN_ON(!is_vm_hugetlb_page(vma)); - - while (vaddr < vma->vm_end && remainder) { - if (pages) { - pte_t *pte; - struct page *page; - - pte = huge_pte_offset(mm, vaddr); - - /* hugetlb should be locked, and hence, prefaulted */ - BUG_ON(!pte || pte_none(*pte)); - - page = pte_page(*pte); - - WARN_ON(!PageCompound(page)); - - get_page(page); - pages[i] = page; - } - - if (vmas) - vmas[i] = vma; - - vaddr += PAGE_SIZE; - --remainder; - ++i; - } - - *length = remainder; - *position = vaddr; - - return i; -} - struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, int write) { @@ -195,84 +112,3 @@ struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, { return NULL; } - -void unmap_hugepage_range(struct vm_area_struct *vma, - unsigned long start, unsigned long end) -{ - struct mm_struct *mm = vma->vm_mm; - unsigned long address; - pte_t *pte; - struct page *page; - int i; - - BUG_ON(start & (HPAGE_SIZE - 1)); - BUG_ON(end & (HPAGE_SIZE - 1)); - - for (address = start; address < end; address += HPAGE_SIZE) { - pte = huge_pte_offset(mm, address); - BUG_ON(!pte); - if (pte_none(*pte)) - continue; - page = pte_page(*pte); - put_page(page); - for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) { - pte_clear(mm, address+(i*PAGE_SIZE), pte); - pte++; - } - } - add_mm_counter(mm, rss, -((end - start) >> PAGE_SHIFT)); - flush_tlb_range(vma, start, end); -} - -int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma) -{ - struct mm_struct *mm = current->mm; - unsigned long addr; - int ret = 0; - - BUG_ON(vma->vm_start & ~HPAGE_MASK); - BUG_ON(vma->vm_end & ~HPAGE_MASK); - - spin_lock(&mm->page_table_lock); - for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) { - unsigned long idx; - pte_t *pte = huge_pte_alloc(mm, addr); - struct page *page; - - if (!pte) { - ret = -ENOMEM; - goto out; - } - if (!pte_none(*pte)) - continue; - - idx = ((addr - vma->vm_start) >> HPAGE_SHIFT) - + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT)); - page = find_get_page(mapping, idx); - if (!page) { - /* charge the fs quota first */ - if (hugetlb_get_quota(mapping)) { - ret = -ENOMEM; - goto out; - } - page = alloc_huge_page(); - if (!page) { - hugetlb_put_quota(mapping); - ret = -ENOMEM; - goto out; - } - ret = add_to_page_cache(page, mapping, idx, GFP_ATOMIC); - if (! ret) { - unlock_page(page); - } else { - hugetlb_put_quota(mapping); - free_huge_page(page); - goto out; - } - } - set_huge_pte(mm, vma, page, pte, vma->vm_flags & VM_WRITE); - } -out: - spin_unlock(&mm->page_table_lock); - return ret; -} diff --git a/arch/sh64/mm/ioremap.c b/arch/sh64/mm/ioremap.c index f4003da..fb1866f 100644 --- a/arch/sh64/mm/ioremap.c +++ b/arch/sh64/mm/ioremap.c @@ -79,7 +79,7 @@ static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned lo BUG(); do { - pte_t * pte = pte_alloc_kernel(&init_mm, pmd, address); + pte_t * pte = pte_alloc_kernel(pmd, address); if (!pte) return -ENOMEM; remap_area_pte(pte, address, end - address, address + phys_addr, flags); @@ -101,7 +101,6 @@ static int remap_area_pages(unsigned long address, unsigned long phys_addr, flush_cache_all(); if (address >= end) BUG(); - spin_lock(&init_mm.page_table_lock); do { pmd_t *pmd = pmd_alloc(&init_mm, dir, address); error = -ENOMEM; @@ -115,7 +114,6 @@ static int remap_area_pages(unsigned long address, unsigned long phys_addr, address = (address + PGDIR_SIZE) & PGDIR_MASK; dir++; } while (address && (address < end)); - spin_unlock(&init_mm.page_table_lock); flush_tlb_all(); return 0; } |