diff options
author | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2007-10-22 12:52:47 +0200 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2007-10-22 12:52:49 +0200 |
commit | 3610cce87af0693603db171d5b6f6735f5e3dc5b (patch) | |
tree | 9aa7d9a0924b2f075c1b95ed57bb63ed512165c9 /include/asm-s390 | |
parent | e4aa402e7a3b6b87d8df6243a37171cdcd2f01c2 (diff) | |
download | op-kernel-dev-3610cce87af0693603db171d5b6f6735f5e3dc5b.zip op-kernel-dev-3610cce87af0693603db171d5b6f6735f5e3dc5b.tar.gz |
[S390] Cleanup page table definitions.
- De-confuse the defines for the address-space-control-elements
and the segment/region table entries.
- Create out of line functions for page table allocation / freeing.
- Simplify get_shadow_xxx functions.
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'include/asm-s390')
-rw-r--r-- | include/asm-s390/mmu_context.h | 50 | ||||
-rw-r--r-- | include/asm-s390/pgalloc.h | 213 | ||||
-rw-r--r-- | include/asm-s390/pgtable.h | 219 | ||||
-rw-r--r-- | include/asm-s390/processor.h | 6 | ||||
-rw-r--r-- | include/asm-s390/tlbflush.h | 2 |
5 files changed, 191 insertions, 299 deletions
diff --git a/include/asm-s390/mmu_context.h b/include/asm-s390/mmu_context.h index 501cb9b..05b8421 100644 --- a/include/asm-s390/mmu_context.h +++ b/include/asm-s390/mmu_context.h @@ -21,45 +21,43 @@ #ifndef __s390x__ #define LCTL_OPCODE "lctl" -#define PGTABLE_BITS (_SEGMENT_TABLE|USER_STD_MASK) #else #define LCTL_OPCODE "lctlg" -#define PGTABLE_BITS (_REGION_TABLE|USER_STD_MASK) #endif -static inline void enter_lazy_tlb(struct mm_struct *mm, - struct task_struct *tsk) +static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk) { + pgd_t *pgd = mm->pgd; + unsigned long asce_bits; + + /* Calculate asce bits from the first pgd table entry. */ + asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS; +#ifdef CONFIG_64BIT + asce_bits |= _ASCE_TYPE_REGION3; +#endif + S390_lowcore.user_asce = asce_bits | __pa(pgd); + if (switch_amode) { + /* Load primary space page table origin. */ + pgd_t *shadow_pgd = get_shadow_table(pgd) ? : pgd; + S390_lowcore.user_exec_asce = asce_bits | __pa(shadow_pgd); + asm volatile(LCTL_OPCODE" 1,1,%0\n" + : : "m" (S390_lowcore.user_exec_asce) ); + } else + /* Load home space page table origin. */ + asm volatile(LCTL_OPCODE" 13,13,%0" + : : "m" (S390_lowcore.user_asce) ); } static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) { - pgd_t *shadow_pgd = get_shadow_pgd(next->pgd); - - if (prev != next) { - S390_lowcore.user_asce = (__pa(next->pgd) & PAGE_MASK) | - PGTABLE_BITS; - if (shadow_pgd) { - /* Load primary/secondary space page table origin. */ - S390_lowcore.user_exec_asce = - (__pa(shadow_pgd) & PAGE_MASK) | PGTABLE_BITS; - asm volatile(LCTL_OPCODE" 1,1,%0\n" - LCTL_OPCODE" 7,7,%1" - : : "m" (S390_lowcore.user_exec_asce), - "m" (S390_lowcore.user_asce) ); - } else if (switch_amode) { - /* Load primary space page table origin. */ - asm volatile(LCTL_OPCODE" 1,1,%0" - : : "m" (S390_lowcore.user_asce) ); - } else - /* Load home space page table origin. */ - asm volatile(LCTL_OPCODE" 13,13,%0" - : : "m" (S390_lowcore.user_asce) ); - } + if (unlikely(prev == next)) + return; cpu_set(smp_processor_id(), next->cpu_vm_mask); + update_mm(next, tsk); } +#define enter_lazy_tlb(mm,tsk) do { } while (0) #define deactivate_mm(tsk,mm) do { } while (0) static inline void activate_mm(struct mm_struct *prev, diff --git a/include/asm-s390/pgalloc.h b/include/asm-s390/pgalloc.h index 6cbbfe4..229b0bd 100644 --- a/include/asm-s390/pgalloc.h +++ b/include/asm-s390/pgalloc.h @@ -19,114 +19,75 @@ #define check_pgt_cache() do {} while (0) -/* - * Page allocation orders. - */ -#ifndef __s390x__ -# define PTE_ALLOC_ORDER 0 -# define PMD_ALLOC_ORDER 0 -# define PGD_ALLOC_ORDER 1 -#else /* __s390x__ */ -# define PTE_ALLOC_ORDER 0 -# define PMD_ALLOC_ORDER 2 -# define PGD_ALLOC_ORDER 2 -#endif /* __s390x__ */ +unsigned long *crst_table_alloc(struct mm_struct *, int); +void crst_table_free(unsigned long *); -/* - * Allocate and free page tables. The xxx_kernel() versions are - * used to allocate a kernel page table - this turns on ASN bits - * if any. - */ +unsigned long *page_table_alloc(int); +void page_table_free(unsigned long *); -static inline pgd_t *pgd_alloc(struct mm_struct *mm) +static inline void clear_table(unsigned long *s, unsigned long val, size_t n) { - pgd_t *pgd = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ALLOC_ORDER); - int i; - - if (!pgd) - return NULL; - if (s390_noexec) { - pgd_t *shadow_pgd = (pgd_t *) - __get_free_pages(GFP_KERNEL, PGD_ALLOC_ORDER); - struct page *page = virt_to_page(pgd); - - if (!shadow_pgd) { - free_pages((unsigned long) pgd, PGD_ALLOC_ORDER); - return NULL; - } - page->lru.next = (void *) shadow_pgd; - } - for (i = 0; i < PTRS_PER_PGD; i++) -#ifndef __s390x__ - pmd_clear(pmd_offset(pgd + i, i*PGDIR_SIZE)); + *s = val; + n = (n / 256) - 1; + asm volatile( +#ifdef CONFIG_64BIT + " mvc 8(248,%0),0(%0)\n" #else - pgd_clear(pgd + i); + " mvc 4(252,%0),0(%0)\n" #endif - return pgd; + "0: mvc 256(256,%0),0(%0)\n" + " la %0,256(%0)\n" + " brct %1,0b\n" + : "+a" (s), "+d" (n)); } -static inline void pgd_free(pgd_t *pgd) +static inline void crst_table_init(unsigned long *crst, unsigned long entry) { - pgd_t *shadow_pgd = get_shadow_pgd(pgd); - - if (shadow_pgd) - free_pages((unsigned long) shadow_pgd, PGD_ALLOC_ORDER); - free_pages((unsigned long) pgd, PGD_ALLOC_ORDER); + clear_table(crst, entry, sizeof(unsigned long)*2048); + crst = get_shadow_table(crst); + if (crst) + clear_table(crst, entry, sizeof(unsigned long)*2048); } #ifndef __s390x__ -/* - * page middle directory allocation/free routines. - * We use pmd cache only on s390x, so these are dummy routines. This - * code never triggers because the pgd will always be present. - */ -#define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); }) -#define pmd_free(x) do { } while (0) -#define pgd_populate(mm, pmd, pte) BUG() + +static inline unsigned long pgd_entry_type(struct mm_struct *mm) +{ + return _SEGMENT_ENTRY_EMPTY; +} + +#define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); }) +#define pmd_free(x) do { } while (0) + +#define pgd_populate(mm, pmd, pte) BUG() #define pgd_populate_kernel(mm, pmd, pte) BUG() + #else /* __s390x__ */ -static inline pmd_t * pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr) + +static inline unsigned long pgd_entry_type(struct mm_struct *mm) { - pmd_t *pmd = (pmd_t *) __get_free_pages(GFP_KERNEL, PMD_ALLOC_ORDER); - int i; - - if (!pmd) - return NULL; - if (s390_noexec) { - pmd_t *shadow_pmd = (pmd_t *) - __get_free_pages(GFP_KERNEL, PMD_ALLOC_ORDER); - struct page *page = virt_to_page(pmd); - - if (!shadow_pmd) { - free_pages((unsigned long) pmd, PMD_ALLOC_ORDER); - return NULL; - } - page->lru.next = (void *) shadow_pmd; - } - for (i=0; i < PTRS_PER_PMD; i++) - pmd_clear(pmd + i); - return pmd; + return _REGION3_ENTRY_EMPTY; } -static inline void pmd_free (pmd_t *pmd) +static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr) { - pmd_t *shadow_pmd = get_shadow_pmd(pmd); - - if (shadow_pmd) - free_pages((unsigned long) shadow_pmd, PMD_ALLOC_ORDER); - free_pages((unsigned long) pmd, PMD_ALLOC_ORDER); + unsigned long *crst = crst_table_alloc(mm, s390_noexec); + if (crst) + crst_table_init(crst, _SEGMENT_ENTRY_EMPTY); + return (pmd_t *) crst; } +#define pmd_free(pmd) crst_table_free((unsigned long *) pmd) -static inline void -pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) +static inline void pgd_populate_kernel(struct mm_struct *mm, + pgd_t *pgd, pmd_t *pmd) { - pgd_val(*pgd) = _PGD_ENTRY | __pa(pmd); + pgd_val(*pgd) = _REGION3_ENTRY | __pa(pmd); } static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) { - pgd_t *shadow_pgd = get_shadow_pgd(pgd); - pmd_t *shadow_pmd = get_shadow_pmd(pmd); + pgd_t *shadow_pgd = get_shadow_table(pgd); + pmd_t *shadow_pmd = get_shadow_table(pmd); if (shadow_pgd && shadow_pmd) pgd_populate_kernel(mm, shadow_pgd, shadow_pmd); @@ -135,17 +96,26 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) #endif /* __s390x__ */ +static inline pgd_t *pgd_alloc(struct mm_struct *mm) +{ + unsigned long *crst = crst_table_alloc(mm, s390_noexec); + if (crst) + crst_table_init(crst, pgd_entry_type(mm)); + return (pgd_t *) crst; +} +#define pgd_free(pgd) crst_table_free((unsigned long *) pgd) + static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) { #ifndef __s390x__ - pmd_val(pmd[0]) = _PAGE_TABLE + __pa(pte); - pmd_val(pmd[1]) = _PAGE_TABLE + __pa(pte+256); - pmd_val(pmd[2]) = _PAGE_TABLE + __pa(pte+512); - pmd_val(pmd[3]) = _PAGE_TABLE + __pa(pte+768); + pmd_val(pmd[0]) = _SEGMENT_ENTRY + __pa(pte); + pmd_val(pmd[1]) = _SEGMENT_ENTRY + __pa(pte+256); + pmd_val(pmd[2]) = _SEGMENT_ENTRY + __pa(pte+512); + pmd_val(pmd[3]) = _SEGMENT_ENTRY + __pa(pte+768); #else /* __s390x__ */ - pmd_val(*pmd) = _PMD_ENTRY + __pa(pte); - pmd_val1(*pmd) = _PMD_ENTRY + __pa(pte+256); + pmd_val(*pmd) = _SEGMENT_ENTRY + __pa(pte); + pmd_val1(*pmd) = _SEGMENT_ENTRY + __pa(pte+256); #endif /* __s390x__ */ } @@ -153,7 +123,7 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *page) { pte_t *pte = (pte_t *)page_to_phys(page); - pmd_t *shadow_pmd = get_shadow_pmd(pmd); + pmd_t *shadow_pmd = get_shadow_table(pmd); pte_t *shadow_pte = get_shadow_pte(pte); pmd_populate_kernel(mm, pmd, pte); @@ -164,57 +134,14 @@ pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *page) /* * page table entry allocation/free routines. */ -static inline pte_t * -pte_alloc_one_kernel(struct mm_struct *mm, unsigned long vmaddr) -{ - pte_t *pte = (pte_t *) __get_free_page(GFP_KERNEL|__GFP_REPEAT); - int i; - - if (!pte) - return NULL; - if (s390_noexec) { - pte_t *shadow_pte = (pte_t *) - __get_free_page(GFP_KERNEL|__GFP_REPEAT); - struct page *page = virt_to_page(pte); - - if (!shadow_pte) { - free_page((unsigned long) pte); - return NULL; - } - page->lru.next = (void *) shadow_pte; - } - for (i=0; i < PTRS_PER_PTE; i++) { - pte_clear(mm, vmaddr, pte + i); - vmaddr += PAGE_SIZE; - } - return pte; -} - -static inline struct page * -pte_alloc_one(struct mm_struct *mm, unsigned long vmaddr) -{ - pte_t *pte = pte_alloc_one_kernel(mm, vmaddr); - if (pte) - return virt_to_page(pte); - return NULL; -} - -static inline void pte_free_kernel(pte_t *pte) -{ - pte_t *shadow_pte = get_shadow_pte(pte); - - if (shadow_pte) - free_page((unsigned long) shadow_pte); - free_page((unsigned long) pte); -} - -static inline void pte_free(struct page *pte) -{ - struct page *shadow_page = get_shadow_page(pte); - - if (shadow_page) - __free_page(shadow_page); - __free_page(pte); -} +#define pte_alloc_one_kernel(mm, vmaddr) \ + ((pte_t *) page_table_alloc(s390_noexec)) +#define pte_alloc_one(mm, vmaddr) \ + virt_to_page(page_table_alloc(s390_noexec)) + +#define pte_free_kernel(pte) \ + page_table_free((unsigned long *) pte) +#define pte_free(pte) \ + page_table_free((unsigned long *) page_to_phys((struct page *) pte)) #endif /* _S390_PGALLOC_H */ diff --git a/include/asm-s390/pgtable.h b/include/asm-s390/pgtable.h index b424ab2..f9f59a8 100644 --- a/include/asm-s390/pgtable.h +++ b/include/asm-s390/pgtable.h @@ -35,9 +35,6 @@ #include <asm/bug.h> #include <asm/processor.h> -struct vm_area_struct; /* forward declaration (include/linux/mm.h) */ -struct mm_struct; - extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096))); extern void paging_init(void); extern void vmem_map_init(void); @@ -221,6 +218,8 @@ extern unsigned long vmalloc_end; /* Hardware bits in the page table entry */ #define _PAGE_RO 0x200 /* HW read-only bit */ #define _PAGE_INVALID 0x400 /* HW invalid bit */ + +/* Software bits in the page table entry */ #define _PAGE_SWT 0x001 /* SW pte type bit t */ #define _PAGE_SWX 0x002 /* SW pte type bit x */ @@ -264,60 +263,75 @@ extern unsigned long vmalloc_end; #ifndef __s390x__ -/* Bits in the segment table entry */ -#define _PAGE_TABLE_LEN 0xf /* only full page-tables */ -#define _PAGE_TABLE_COM 0x10 /* common page-table */ -#define _PAGE_TABLE_INV 0x20 /* invalid page-table */ -#define _SEG_PRESENT 0x001 /* Software (overlap with PTL) */ - -/* Bits int the storage key */ -#define _PAGE_CHANGED 0x02 /* HW changed bit */ -#define _PAGE_REFERENCED 0x04 /* HW referenced bit */ - -#define _USER_SEG_TABLE_LEN 0x7f /* user-segment-table up to 2 GB */ -#define _KERNEL_SEG_TABLE_LEN 0x7f /* kernel-segment-table up to 2 GB */ - -/* - * User and Kernel pagetables are identical - */ -#define _PAGE_TABLE _PAGE_TABLE_LEN -#define _KERNPG_TABLE _PAGE_TABLE_LEN - -/* - * The Kernel segment-tables includes the User segment-table - */ +/* Bits in the segment table address-space-control-element */ +#define _ASCE_SPACE_SWITCH 0x80000000UL /* space switch event */ +#define _ASCE_ORIGIN_MASK 0x7ffff000UL /* segment table origin */ +#define _ASCE_PRIVATE_SPACE 0x100 /* private space control */ +#define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */ +#define _ASCE_TABLE_LENGTH 0x7f /* 128 x 64 entries = 8k */ -#define _SEGMENT_TABLE (_USER_SEG_TABLE_LEN|0x80000000|0x100) -#define _KERNSEG_TABLE _KERNEL_SEG_TABLE_LEN +/* Bits in the segment table entry */ +#define _SEGMENT_ENTRY_ORIGIN 0x7fffffc0UL /* page table origin */ +#define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */ +#define _SEGMENT_ENTRY_COMMON 0x10 /* common segment bit */ +#define _SEGMENT_ENTRY_PTL 0x0f /* page table length */ -#define USER_STD_MASK 0x00000080UL +#define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL) +#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV) #else /* __s390x__ */ +/* Bits in the segment/region table address-space-control-element */ +#define _ASCE_ORIGIN ~0xfffUL/* segment table origin */ +#define _ASCE_PRIVATE_SPACE 0x100 /* private space control */ +#define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */ +#define _ASCE_SPACE_SWITCH 0x40 /* space switch event */ +#define _ASCE_REAL_SPACE 0x20 /* real space control */ +#define _ASCE_TYPE_MASK 0x0c /* asce table type mask */ +#define _ASCE_TYPE_REGION1 0x0c /* region first table type */ +#define _ASCE_TYPE_REGION2 0x08 /* region second table type */ +#define _ASCE_TYPE_REGION3 0x04 /* region third table type */ +#define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */ +#define _ASCE_TABLE_LENGTH 0x03 /* region table length */ + +/* Bits in the region table entry */ +#define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */ +#define _REGION_ENTRY_INV 0x20 /* invalid region table entry */ +#define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */ +#define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */ +#define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */ +#define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */ +#define _REGION_ENTRY_LENGTH 0x03 /* region third length */ + +#define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH) +#define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INV) +#define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH) +#define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INV) +#define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH) +#define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INV) + /* Bits in the segment table entry */ -#define _PMD_ENTRY_INV 0x20 /* invalid segment table entry */ -#define _PMD_ENTRY 0x00 +#define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */ +#define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */ +#define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */ -/* Bits in the region third table entry */ -#define _PGD_ENTRY_INV 0x20 /* invalid region table entry */ -#define _PGD_ENTRY 0x07 +#define _SEGMENT_ENTRY (0) +#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV) + +#endif /* __s390x__ */ /* - * User and kernel page directory + * A user page table pointer has the space-switch-event bit, the + * private-space-control bit and the storage-alteration-event-control + * bit set. A kernel page table pointer doesn't need them. */ -#define _REGION_THIRD 0x4 -#define _REGION_THIRD_LEN 0x3 -#define _REGION_TABLE (_REGION_THIRD|_REGION_THIRD_LEN|0x40|0x100) -#define _KERN_REGION_TABLE (_REGION_THIRD|_REGION_THIRD_LEN) - -#define USER_STD_MASK 0x0000000000000080UL +#define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \ + _ASCE_ALT_EVENT) -/* Bits in the storage key */ +/* Bits int the storage key */ #define _PAGE_CHANGED 0x02 /* HW changed bit */ #define _PAGE_REFERENCED 0x04 /* HW referenced bit */ -#endif /* __s390x__ */ - /* * Page protection definitions. */ @@ -358,65 +372,38 @@ extern unsigned long vmalloc_end; #define __S111 PAGE_EX_RW #ifndef __s390x__ -# define PMD_SHADOW_SHIFT 1 -# define PGD_SHADOW_SHIFT 1 +# define PxD_SHADOW_SHIFT 1 #else /* __s390x__ */ -# define PMD_SHADOW_SHIFT 2 -# define PGD_SHADOW_SHIFT 2 +# define PxD_SHADOW_SHIFT 2 #endif /* __s390x__ */ static inline struct page *get_shadow_page(struct page *page) { - if (s390_noexec && !list_empty(&page->lru)) - return virt_to_page(page->lru.next); - return NULL; -} - -static inline pte_t *get_shadow_pte(pte_t *ptep) -{ - unsigned long pteptr = (unsigned long) (ptep); - - if (s390_noexec) { - unsigned long offset = pteptr & (PAGE_SIZE - 1); - void *addr = (void *) (pteptr ^ offset); - struct page *page = virt_to_page(addr); - if (!list_empty(&page->lru)) - return (pte_t *) ((unsigned long) page->lru.next | - offset); - } + if (s390_noexec && page->index) + return virt_to_page((void *)(addr_t) page->index); return NULL; } -static inline pmd_t *get_shadow_pmd(pmd_t *pmdp) +static inline void *get_shadow_pte(void *table) { - unsigned long pmdptr = (unsigned long) (pmdp); + unsigned long addr, offset; + struct page *page; - if (s390_noexec) { - unsigned long offset = pmdptr & - ((PAGE_SIZE << PMD_SHADOW_SHIFT) - 1); - void *addr = (void *) (pmdptr ^ offset); - struct page *page = virt_to_page(addr); - if (!list_empty(&page->lru)) - return (pmd_t *) ((unsigned long) page->lru.next | - offset); - } - return NULL; + addr = (unsigned long) table; + offset = addr & (PAGE_SIZE - 1); + page = virt_to_page((void *)(addr ^ offset)); + return (void *)(addr_t)(page->index ? (page->index | offset) : 0UL); } -static inline pgd_t *get_shadow_pgd(pgd_t *pgdp) +static inline void *get_shadow_table(void *table) { - unsigned long pgdptr = (unsigned long) (pgdp); + unsigned long addr, offset; + struct page *page; - if (s390_noexec) { - unsigned long offset = pgdptr & - ((PAGE_SIZE << PGD_SHADOW_SHIFT) - 1); - void *addr = (void *) (pgdptr ^ offset); - struct page *page = virt_to_page(addr); - if (!list_empty(&page->lru)) - return (pgd_t *) ((unsigned long) page->lru.next | - offset); - } - return NULL; + addr = (unsigned long) table; + offset = addr & ((PAGE_SIZE << PxD_SHADOW_SHIFT) - 1); + page = virt_to_page((void *)(addr ^ offset)); + return (void *)(addr_t)(page->index ? (page->index | offset) : 0UL); } /* @@ -448,47 +435,42 @@ static inline int pgd_present(pgd_t pgd) { return 1; } static inline int pgd_none(pgd_t pgd) { return 0; } static inline int pgd_bad(pgd_t pgd) { return 0; } -static inline int pmd_present(pmd_t pmd) { return pmd_val(pmd) & _SEG_PRESENT; } -static inline int pmd_none(pmd_t pmd) { return pmd_val(pmd) & _PAGE_TABLE_INV; } -static inline int pmd_bad(pmd_t pmd) -{ - return (pmd_val(pmd) & (~PAGE_MASK & ~_PAGE_TABLE_INV)) != _PAGE_TABLE; -} - #else /* __s390x__ */ static inline int pgd_present(pgd_t pgd) { - return (pgd_val(pgd) & ~PAGE_MASK) == _PGD_ENTRY; + return pgd_val(pgd) & _REGION_ENTRY_ORIGIN; } static inline int pgd_none(pgd_t pgd) { - return pgd_val(pgd) & _PGD_ENTRY_INV; + return pgd_val(pgd) & _REGION_ENTRY_INV; } static inline int pgd_bad(pgd_t pgd) { - return (pgd_val(pgd) & (~PAGE_MASK & ~_PGD_ENTRY_INV)) != _PGD_ENTRY; + unsigned long mask = ~_REGION_ENTRY_ORIGIN & ~_REGION_ENTRY_INV; + return (pgd_val(pgd) & mask) != _REGION3_ENTRY; } +#endif /* __s390x__ */ + static inline int pmd_present(pmd_t pmd) { - return (pmd_val(pmd) & ~PAGE_MASK) == _PMD_ENTRY; + return pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN; } static inline int pmd_none(pmd_t pmd) { - return pmd_val(pmd) & _PMD_ENTRY_INV; + return pmd_val(pmd) & _SEGMENT_ENTRY_INV; } static inline int pmd_bad(pmd_t pmd) { - return (pmd_val(pmd) & (~PAGE_MASK & ~_PMD_ENTRY_INV)) != _PMD_ENTRY; + unsigned long mask = ~_SEGMENT_ENTRY_ORIGIN & ~_SEGMENT_ENTRY_INV; + return (pmd_val(pmd) & mask) != _SEGMENT_ENTRY; } -#endif /* __s390x__ */ - static inline int pte_none(pte_t pte) { return (pte_val(pte) & _PAGE_INVALID) && !(pte_val(pte) & _PAGE_SWT); @@ -548,31 +530,22 @@ static inline void pgd_clear(pgd_t * pgdp) { } static inline void pmd_clear_kernel(pmd_t * pmdp) { - pmd_val(pmdp[0]) = _PAGE_TABLE_INV; - pmd_val(pmdp[1]) = _PAGE_TABLE_INV; - pmd_val(pmdp[2]) = _PAGE_TABLE_INV; - pmd_val(pmdp[3]) = _PAGE_TABLE_INV; -} - -static inline void pmd_clear(pmd_t * pmdp) -{ - pmd_t *shadow_pmd = get_shadow_pmd(pmdp); - - pmd_clear_kernel(pmdp); - if (shadow_pmd) - pmd_clear_kernel(shadow_pmd); + pmd_val(pmdp[0]) = _SEGMENT_ENTRY_EMPTY; + pmd_val(pmdp[1]) = _SEGMENT_ENTRY_EMPTY; + pmd_val(pmdp[2]) = _SEGMENT_ENTRY_EMPTY; + pmd_val(pmdp[3]) = _SEGMENT_ENTRY_EMPTY; } #else /* __s390x__ */ static inline void pgd_clear_kernel(pgd_t * pgdp) { - pgd_val(*pgdp) = _PGD_ENTRY_INV | _PGD_ENTRY; + pgd_val(*pgdp) = _REGION3_ENTRY_EMPTY; } static inline void pgd_clear(pgd_t * pgdp) { - pgd_t *shadow_pgd = get_shadow_pgd(pgdp); + pgd_t *shadow_pgd = get_shadow_table(pgdp); pgd_clear_kernel(pgdp); if (shadow_pgd) @@ -581,21 +554,21 @@ static inline void pgd_clear(pgd_t * pgdp) static inline void pmd_clear_kernel(pmd_t * pmdp) { - pmd_val(*pmdp) = _PMD_ENTRY_INV | _PMD_ENTRY; - pmd_val1(*pmdp) = _PMD_ENTRY_INV | _PMD_ENTRY; + pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY; + pmd_val1(*pmdp) = _SEGMENT_ENTRY_EMPTY; } +#endif /* __s390x__ */ + static inline void pmd_clear(pmd_t * pmdp) { - pmd_t *shadow_pmd = get_shadow_pmd(pmdp); + pmd_t *shadow_pmd = get_shadow_table(pmdp); pmd_clear_kernel(pmdp); if (shadow_pmd) pmd_clear_kernel(shadow_pmd); } -#endif /* __s390x__ */ - static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { pte_t *shadow_pte = get_shadow_pte(ptep); diff --git a/include/asm-s390/processor.h b/include/asm-s390/processor.h index 81efccc..21d40a1 100644 --- a/include/asm-s390/processor.h +++ b/include/asm-s390/processor.h @@ -127,12 +127,6 @@ struct stack_frame { #define ARCH_MIN_TASKALIGN 8 -#ifndef __s390x__ -# define __SWAPPER_PG_DIR __pa(&swapper_pg_dir[0]) + _SEGMENT_TABLE -#else /* __s390x__ */ -# define __SWAPPER_PG_DIR __pa(&swapper_pg_dir[0]) + _REGION_TABLE -#endif /* __s390x__ */ - #define INIT_THREAD { \ .ksp = sizeof(init_stack) + (unsigned long) &init_stack, \ } diff --git a/include/asm-s390/tlbflush.h b/include/asm-s390/tlbflush.h index 3a9985f..a69bd24 100644 --- a/include/asm-s390/tlbflush.h +++ b/include/asm-s390/tlbflush.h @@ -61,7 +61,7 @@ static inline void __tlb_flush_mm(struct mm_struct * mm) * only ran on the local cpu. */ if (MACHINE_HAS_IDTE) { - pgd_t *shadow_pgd = get_shadow_pgd(mm->pgd); + pgd_t *shadow_pgd = get_shadow_table(mm->pgd); if (shadow_pgd) __tlb_flush_idte(shadow_pgd); |