diff options
author | Paul Mundt <lethal@linux-sh.org> | 2010-05-20 11:57:38 +0900 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2010-05-20 11:57:38 +0900 |
commit | ffee72d4681c8777918268a96aef42bdeb6c367b (patch) | |
tree | 4baf91483886d561c198ed0524ab54b783273e86 /arch/mips/mm | |
parent | fb54d268329846aa13b2bc44a64d90e9b7131192 (diff) | |
parent | f72caf7e496465182eeda842ac66a5e75404ddf1 (diff) | |
download | op-kernel-dev-ffee72d4681c8777918268a96aef42bdeb6c367b.zip op-kernel-dev-ffee72d4681c8777918268a96aef42bdeb6c367b.tar.gz |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'arch/mips/mm')
-rw-r--r-- | arch/mips/mm/cache.c | 2 | ||||
-rw-r--r-- | arch/mips/mm/tlbex.c | 150 | ||||
-rw-r--r-- | arch/mips/mm/uasm.c | 23 |
3 files changed, 132 insertions, 43 deletions
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c index be8627b..12af739 100644 --- a/arch/mips/mm/cache.c +++ b/arch/mips/mm/cache.c @@ -133,7 +133,7 @@ void __update_cache(struct vm_area_struct *vma, unsigned long address, } unsigned long _page_cachable_default; -EXPORT_SYMBOL_GPL(_page_cachable_default); +EXPORT_SYMBOL(_page_cachable_default); static inline void setup_protection_map(void) { diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 0de0e41..86f004d 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -31,6 +31,16 @@ #include <asm/war.h> #include <asm/uasm.h> +/* + * TLB load/store/modify handlers. + * + * Only the fastpath gets synthesized at runtime, the slowpath for + * do_page_fault remains normal asm. + */ +extern void tlb_do_page_fault_0(void); +extern void tlb_do_page_fault_1(void); + + static inline int r45k_bvahwbug(void) { /* XXX: We should probe for the presence of this bug, but we don't. */ @@ -83,6 +93,7 @@ enum label_id { label_nopage_tlbm, label_smp_pgtable_change, label_r3000_write_probe_fail, + label_large_segbits_fault, #ifdef CONFIG_HUGETLB_PAGE label_tlb_huge_update, #endif @@ -101,6 +112,7 @@ UASM_L_LA(_nopage_tlbs) UASM_L_LA(_nopage_tlbm) UASM_L_LA(_smp_pgtable_change) UASM_L_LA(_r3000_write_probe_fail) +UASM_L_LA(_large_segbits_fault) #ifdef CONFIG_HUGETLB_PAGE UASM_L_LA(_tlb_huge_update) #endif @@ -157,6 +169,10 @@ static u32 tlb_handler[128] __cpuinitdata; static struct uasm_label labels[128] __cpuinitdata; static struct uasm_reloc relocs[128] __cpuinitdata; +#ifdef CONFIG_64BIT +static int check_for_high_segbits __cpuinitdata; +#endif + #ifndef CONFIG_MIPS_PGD_C0_CONTEXT /* * CONFIG_MIPS_PGD_C0_CONTEXT implies 64 bit and lack of pgd_current, @@ -408,7 +424,7 @@ static __cpuinit __maybe_unused void build_convert_pte_to_entrylo(u32 **p, UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC)); } else { #ifdef CONFIG_64BIT_PHYS_ADDR - uasm_i_dsrl(p, reg, reg, ilog2(_PAGE_GLOBAL)); + uasm_i_dsrl_safe(p, reg, reg, ilog2(_PAGE_GLOBAL)); #else UASM_i_SRL(p, reg, reg, ilog2(_PAGE_GLOBAL)); #endif @@ -532,7 +548,24 @@ build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, * The vmalloc handling is not in the hotpath. */ uasm_i_dmfc0(p, tmp, C0_BADVADDR); - uasm_il_bltz(p, r, tmp, label_vmalloc); + + if (check_for_high_segbits) { + /* + * The kernel currently implicitely assumes that the + * MIPS SEGBITS parameter for the processor is + * (PGDIR_SHIFT+PGDIR_BITS) or less, and will never + * allocate virtual addresses outside the maximum + * range for SEGBITS = (PGDIR_SHIFT+PGDIR_BITS). But + * that doesn't prevent user code from accessing the + * higher xuseg addresses. Here, we make sure that + * everything but the lower xuseg addresses goes down + * the module_alloc/vmalloc path. + */ + uasm_i_dsrl_safe(p, ptr, tmp, PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3); + uasm_il_bnez(p, r, ptr, label_vmalloc); + } else { + uasm_il_bltz(p, r, tmp, label_vmalloc); + } /* No uasm_i_nop needed here, since the next insn doesn't touch TMP. */ #ifdef CONFIG_MIPS_PGD_C0_CONTEXT @@ -549,14 +582,14 @@ build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, * SMTC uses TCBind value as "CPU" index */ uasm_i_mfc0(p, ptr, C0_TCBIND); - uasm_i_dsrl(p, ptr, ptr, 19); + uasm_i_dsrl_safe(p, ptr, ptr, 19); # else /* * 64 bit SMP running in XKPHYS has smp_processor_id() << 3 * stored in CONTEXT. */ uasm_i_dmfc0(p, ptr, C0_CONTEXT); - uasm_i_dsrl(p, ptr, ptr, 23); + uasm_i_dsrl_safe(p, ptr, ptr, 23); # endif UASM_i_LA_mostly(p, tmp, pgdc); uasm_i_daddu(p, ptr, ptr, tmp); @@ -569,44 +602,78 @@ build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, uasm_l_vmalloc_done(l, *p); - if (PGDIR_SHIFT - 3 < 32) /* get pgd offset in bytes */ - uasm_i_dsrl(p, tmp, tmp, PGDIR_SHIFT-3); - else - uasm_i_dsrl32(p, tmp, tmp, PGDIR_SHIFT - 3 - 32); + /* get pgd offset in bytes */ + uasm_i_dsrl_safe(p, tmp, tmp, PGDIR_SHIFT - 3); uasm_i_andi(p, tmp, tmp, (PTRS_PER_PGD - 1)<<3); uasm_i_daddu(p, ptr, ptr, tmp); /* add in pgd offset */ #ifndef __PAGETABLE_PMD_FOLDED uasm_i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */ uasm_i_ld(p, ptr, 0, ptr); /* get pmd pointer */ - uasm_i_dsrl(p, tmp, tmp, PMD_SHIFT-3); /* get pmd offset in bytes */ + uasm_i_dsrl_safe(p, tmp, tmp, PMD_SHIFT-3); /* get pmd offset in bytes */ uasm_i_andi(p, tmp, tmp, (PTRS_PER_PMD - 1)<<3); uasm_i_daddu(p, ptr, ptr, tmp); /* add in pmd offset */ #endif } +enum vmalloc64_mode {not_refill, refill}; /* * BVADDR is the faulting address, PTR is scratch. * PTR will hold the pgd for vmalloc. */ static void __cpuinit build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, - unsigned int bvaddr, unsigned int ptr) + unsigned int bvaddr, unsigned int ptr, + enum vmalloc64_mode mode) { long swpd = (long)swapper_pg_dir; + int single_insn_swpd; + int did_vmalloc_branch = 0; + + single_insn_swpd = uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd); uasm_l_vmalloc(l, *p); - if (uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd)) { - uasm_il_b(p, r, label_vmalloc_done); - uasm_i_lui(p, ptr, uasm_rel_hi(swpd)); - } else { - UASM_i_LA_mostly(p, ptr, swpd); - uasm_il_b(p, r, label_vmalloc_done); - if (uasm_in_compat_space_p(swpd)) - uasm_i_addiu(p, ptr, ptr, uasm_rel_lo(swpd)); - else - uasm_i_daddiu(p, ptr, ptr, uasm_rel_lo(swpd)); + if (mode == refill && check_for_high_segbits) { + if (single_insn_swpd) { + uasm_il_bltz(p, r, bvaddr, label_vmalloc_done); + uasm_i_lui(p, ptr, uasm_rel_hi(swpd)); + did_vmalloc_branch = 1; + /* fall through */ + } else { + uasm_il_bgez(p, r, bvaddr, label_large_segbits_fault); + } + } + if (!did_vmalloc_branch) { + if (uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd)) { + uasm_il_b(p, r, label_vmalloc_done); + uasm_i_lui(p, ptr, uasm_rel_hi(swpd)); + } else { + UASM_i_LA_mostly(p, ptr, swpd); + uasm_il_b(p, r, label_vmalloc_done); + if (uasm_in_compat_space_p(swpd)) + uasm_i_addiu(p, ptr, ptr, uasm_rel_lo(swpd)); + else + uasm_i_daddiu(p, ptr, ptr, uasm_rel_lo(swpd)); + } + } + if (mode == refill && check_for_high_segbits) { + uasm_l_large_segbits_fault(l, *p); + /* + * We get here if we are an xsseg address, or if we are + * an xuseg address above (PGDIR_SHIFT+PGDIR_BITS) boundary. + * + * Ignoring xsseg (assume disabled so would generate + * (address errors?), the only remaining possibility + * is the upper xuseg addresses. On processors with + * TLB_SEGBITS <= PGDIR_SHIFT+PGDIR_BITS, these + * addresses would have taken an address error. We try + * to mimic that here by taking a load/istream page + * fault. + */ + UASM_i_LA(p, ptr, (unsigned long)tlb_do_page_fault_0); + uasm_i_jr(p, ptr); + uasm_i_nop(p); } } @@ -720,9 +787,9 @@ static void __cpuinit build_update_entries(u32 **p, unsigned int tmp, UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC)); } else { - uasm_i_dsrl(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */ + uasm_i_dsrl_safe(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */ UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ - uasm_i_dsrl(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); /* convert to entrylo1 */ + uasm_i_dsrl_safe(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); /* convert to entrylo1 */ } UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */ } else { @@ -788,10 +855,15 @@ static void __cpuinit build_r4000_tlb_refill_handler(void) * create the plain linear handler */ if (bcm1250_m3_war()) { - UASM_i_MFC0(&p, K0, C0_BADVADDR); - UASM_i_MFC0(&p, K1, C0_ENTRYHI); + unsigned int segbits = 44; + + uasm_i_dmfc0(&p, K0, C0_BADVADDR); + uasm_i_dmfc0(&p, K1, C0_ENTRYHI); uasm_i_xor(&p, K0, K0, K1); - UASM_i_SRL(&p, K0, K0, PAGE_SHIFT + 1); + uasm_i_dsrl_safe(&p, K1, K0, 62); + uasm_i_dsrl_safe(&p, K0, K0, 12 + 1); + uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits); + uasm_i_or(&p, K0, K0, K1); uasm_il_bnez(&p, &r, K0, label_leave); /* No need for uasm_i_nop */ } @@ -820,7 +892,7 @@ static void __cpuinit build_r4000_tlb_refill_handler(void) #endif #ifdef CONFIG_64BIT - build_get_pgd_vmalloc64(&p, &l, &r, K0, K1); + build_get_pgd_vmalloc64(&p, &l, &r, K0, K1, refill); #endif /* @@ -930,15 +1002,6 @@ static void __cpuinit build_r4000_tlb_refill_handler(void) } /* - * TLB load/store/modify handlers. - * - * Only the fastpath gets synthesized at runtime, the slowpath for - * do_page_fault remains normal asm. - */ -extern void tlb_do_page_fault_0(void); -extern void tlb_do_page_fault_1(void); - -/* * 128 instructions for the fastpath handler is generous and should * never be exceeded. */ @@ -1297,7 +1360,7 @@ build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l, uasm_i_eret(p); /* return from trap */ #ifdef CONFIG_64BIT - build_get_pgd_vmalloc64(p, l, r, tmp, ptr); + build_get_pgd_vmalloc64(p, l, r, tmp, ptr, not_refill); #endif } @@ -1312,10 +1375,15 @@ static void __cpuinit build_r4000_tlb_load_handler(void) memset(relocs, 0, sizeof(relocs)); if (bcm1250_m3_war()) { - UASM_i_MFC0(&p, K0, C0_BADVADDR); - UASM_i_MFC0(&p, K1, C0_ENTRYHI); + unsigned int segbits = 44; + + uasm_i_dmfc0(&p, K0, C0_BADVADDR); + uasm_i_dmfc0(&p, K1, C0_ENTRYHI); uasm_i_xor(&p, K0, K0, K1); - UASM_i_SRL(&p, K0, K0, PAGE_SHIFT + 1); + uasm_i_dsrl_safe(&p, K1, K0, 62); + uasm_i_dsrl_safe(&p, K0, K0, 12 + 1); + uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits); + uasm_i_or(&p, K0, K0, K1); uasm_il_bnez(&p, &r, K0, label_leave); /* No need for uasm_i_nop */ } @@ -1516,6 +1584,10 @@ void __cpuinit build_tlb_refill_handler(void) */ static int run_once = 0; +#ifdef CONFIG_64BIT + check_for_high_segbits = current_cpu_data.vmbits > (PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3); +#endif + switch (current_cpu_type()) { case CPU_R2000: case CPU_R3000: diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c index 1581e98..611d564 100644 --- a/arch/mips/mm/uasm.c +++ b/arch/mips/mm/uasm.c @@ -31,7 +31,8 @@ enum fields { BIMM = 0x040, JIMM = 0x080, FUNC = 0x100, - SET = 0x200 + SET = 0x200, + SCIMM = 0x400 }; #define OP_MASK 0x3f @@ -52,6 +53,8 @@ enum fields { #define FUNC_SH 0 #define SET_MASK 0x7 #define SET_SH 0 +#define SCIMM_MASK 0xfffff +#define SCIMM_SH 6 enum opcode { insn_invalid, @@ -61,10 +64,10 @@ enum opcode { insn_dmtc0, insn_dsll, insn_dsll32, insn_dsra, insn_dsrl, insn_dsrl32, insn_drotr, insn_dsubu, insn_eret, insn_j, insn_jal, insn_jr, insn_ld, insn_ll, insn_lld, insn_lui, insn_lw, insn_mfc0, - insn_mtc0, insn_ori, insn_pref, insn_rfe, insn_sc, insn_scd, + insn_mtc0, insn_or, insn_ori, insn_pref, insn_rfe, insn_sc, insn_scd, insn_sd, insn_sll, insn_sra, insn_srl, insn_rotr, insn_subu, insn_sw, insn_tlbp, insn_tlbr, insn_tlbwi, insn_tlbwr, insn_xor, insn_xori, - insn_dins + insn_dins, insn_syscall }; struct insn { @@ -117,6 +120,7 @@ static struct insn insn_table[] __cpuinitdata = { { insn_lw, M(lw_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, { insn_mfc0, M(cop0_op, mfc_op, 0, 0, 0, 0), RT | RD | SET}, { insn_mtc0, M(cop0_op, mtc_op, 0, 0, 0, 0), RT | RD | SET}, + { insn_or, M(spec_op, 0, 0, 0, 0, or_op), RS | RT | RD }, { insn_ori, M(ori_op, 0, 0, 0, 0, 0), RS | RT | UIMM }, { insn_pref, M(pref_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, { insn_rfe, M(cop0_op, cop_op, 0, 0, 0, rfe_op), 0 }, @@ -136,6 +140,7 @@ static struct insn insn_table[] __cpuinitdata = { { insn_xor, M(spec_op, 0, 0, 0, 0, xor_op), RS | RT | RD }, { insn_xori, M(xori_op, 0, 0, 0, 0, 0), RS | RT | UIMM }, { insn_dins, M(spec3_op, 0, 0, 0, 0, dins_op), RS | RT | RD | RE }, + { insn_syscall, M(spec_op, 0, 0, 0, 0, syscall_op), SCIMM}, { insn_invalid, 0, 0 } }; @@ -208,6 +213,14 @@ static inline __cpuinit u32 build_jimm(u32 arg) return (arg >> 2) & JIMM_MASK; } +static inline __cpuinit u32 build_scimm(u32 arg) +{ + if (arg & ~SCIMM_MASK) + printk(KERN_WARNING "Micro-assembler field overflow\n"); + + return (arg & SCIMM_MASK) << SCIMM_SH; +} + static inline __cpuinit u32 build_func(u32 arg) { if (arg & ~FUNC_MASK) @@ -266,6 +279,8 @@ static void __cpuinit build_insn(u32 **buf, enum opcode opc, ...) op |= build_func(va_arg(ap, u32)); if (ip->fields & SET) op |= build_set(va_arg(ap, u32)); + if (ip->fields & SCIMM) + op |= build_scimm(va_arg(ap, u32)); va_end(ap); **buf = op; @@ -373,6 +388,7 @@ I_u2s3u1(_lw) I_u1u2u3(_mfc0) I_u1u2u3(_mtc0) I_u2u1u3(_ori) +I_u3u1u2(_or) I_u2s3u1(_pref) I_0(_rfe) I_u2s3u1(_sc) @@ -391,6 +407,7 @@ I_0(_tlbwr) I_u3u1u2(_xor) I_u2u1u3(_xori) I_u2u1msbu3(_dins); +I_u1(_syscall); /* Handle labels. */ void __cpuinit uasm_build_label(struct uasm_label **lab, u32 *addr, int lid) |