summaryrefslogtreecommitdiffstats
path: root/arch/xtensa/mm
diff options
context:
space:
mode:
authorChris Zankel <czankel@tensilica.com>2006-12-10 02:18:48 -0800
committerLinus Torvalds <torvalds@woody.osdl.org>2006-12-10 09:55:39 -0800
commit173d6681380aa1d60dfc35ed7178bd7811ba2784 (patch)
tree9d6d4d2c6dd791499ebab558647efb67ac88ae3a /arch/xtensa/mm
parentfd43fe19b830d6cd0eba08a6c6a5f71a6bd9c1b0 (diff)
downloadop-kernel-dev-173d6681380aa1d60dfc35ed7178bd7811ba2784.zip
op-kernel-dev-173d6681380aa1d60dfc35ed7178bd7811ba2784.tar.gz
[PATCH] xtensa: remove extra header files
The Xtensa port contained many header files that were never needed. This rather lengthy patch removes all those files. Unfortunately, there were many dependencies that needed to be updated, so this patch touches quite a few source files. Signed-off-by: Chris Zankel <chris@zankel.net> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/xtensa/mm')
-rw-r--r--arch/xtensa/mm/fault.c10
-rw-r--r--arch/xtensa/mm/init.c6
-rw-r--r--arch/xtensa/mm/misc.S265
-rw-r--r--arch/xtensa/mm/tlb.c445
4 files changed, 80 insertions, 646 deletions
diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c
index dd0dbec..3dc6f2f 100644
--- a/arch/xtensa/mm/fault.c
+++ b/arch/xtensa/mm/fault.c
@@ -21,7 +21,7 @@
#include <asm/system.h>
#include <asm/pgalloc.h>
-unsigned long asid_cache = ASID_FIRST_VERSION;
+unsigned long asid_cache = ASID_USER_FIRST;
void bad_page_fault(struct pt_regs*, unsigned long, int);
/*
@@ -58,10 +58,10 @@ void do_page_fault(struct pt_regs *regs)
return;
}
- is_write = (exccause == XCHAL_EXCCAUSE_STORE_CACHE_ATTRIBUTE) ? 1 : 0;
- is_exec = (exccause == XCHAL_EXCCAUSE_ITLB_PRIVILEGE ||
- exccause == XCHAL_EXCCAUSE_ITLB_MISS ||
- exccause == XCHAL_EXCCAUSE_FETCH_CACHE_ATTRIBUTE) ? 1 : 0;
+ is_write = (exccause == EXCCAUSE_STORE_CACHE_ATTRIBUTE) ? 1 : 0;
+ is_exec = (exccause == EXCCAUSE_ITLB_PRIVILEGE ||
+ exccause == EXCCAUSE_ITLB_MISS ||
+ exccause == EXCCAUSE_FETCH_CACHE_ATTRIBUTE) ? 1 : 0;
#if 0
printk("[%s:%d:%08x:%d:%08x:%s%s]\n", current->comm, current->pid,
diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c
index 660ef05..e1ec2d1 100644
--- a/arch/xtensa/mm/init.c
+++ b/arch/xtensa/mm/init.c
@@ -141,8 +141,8 @@ void __init bootmem_init(void)
if (min_low_pfn > max_pfn)
panic("No memory found!\n");
- max_low_pfn = max_pfn < MAX_LOW_MEMORY >> PAGE_SHIFT ?
- max_pfn : MAX_LOW_MEMORY >> PAGE_SHIFT;
+ max_low_pfn = max_pfn < MAX_MEM_PFN >> PAGE_SHIFT ?
+ max_pfn : MAX_MEM_PFN >> PAGE_SHIFT;
/* Find an area to use for the bootmem bitmap. */
@@ -215,7 +215,7 @@ void __init init_mmu (void)
/* Set rasid register to a known value. */
- set_rasid_register (ASID_ALL_RESERVED);
+ set_rasid_register (ASID_USER_FIRST);
/* Set PTEVADDR special register to the start of the page
* table, which is in kernel mappable space (ie. not
diff --git a/arch/xtensa/mm/misc.S b/arch/xtensa/mm/misc.S
index 327c0f1..ae08533 100644
--- a/arch/xtensa/mm/misc.S
+++ b/arch/xtensa/mm/misc.S
@@ -19,9 +19,8 @@
#include <linux/linkage.h>
#include <asm/page.h>
#include <asm/pgtable.h>
-
-#include <xtensa/cacheasm.h>
-#include <xtensa/cacheattrasm.h>
+#include <asm/asmmacro.h>
+#include <asm/cacheasm.h>
/* clear_page (page) */
@@ -74,104 +73,66 @@ ENTRY(copy_page)
retw
-
/*
- * void __flush_invalidate_cache_all(void)
+ * void __invalidate_icache_page(ulong start)
*/
-ENTRY(__flush_invalidate_cache_all)
+ENTRY(__invalidate_icache_page)
entry sp, 16
- dcache_writeback_inv_all a2, a3
- icache_invalidate_all a2, a3
- retw
-/*
- * void __invalidate_icache_all(void)
- */
+ ___invalidate_icache_page a2 a3
+ isync
-ENTRY(__invalidate_icache_all)
- entry sp, 16
- icache_invalidate_all a2, a3
retw
/*
- * void __flush_invalidate_dcache_all(void)
+ * void __invalidate_dcache_page(ulong start)
*/
-ENTRY(__flush_invalidate_dcache_all)
+ENTRY(__invalidate_dcache_page)
entry sp, 16
- dcache_writeback_inv_all a2, a3
- retw
-
-/*
- * void __flush_invalidate_cache_range(ulong start, ulong size)
- */
+ ___invalidate_dcache_page a2 a3
+ dsync
-ENTRY(__flush_invalidate_cache_range)
- entry sp, 16
- mov a4, a2
- mov a5, a3
- dcache_writeback_inv_region a4, a5, a6
- icache_invalidate_region a2, a3, a4
retw
/*
- * void __invalidate_icache_page(ulong start)
+ * void __flush_invalidate_dcache_page(ulong start)
*/
-ENTRY(__invalidate_icache_page)
+ENTRY(__flush_invalidate_dcache_page)
entry sp, 16
- movi a3, PAGE_SIZE
- icache_invalidate_region a2, a3, a4
- retw
-/*
- * void __invalidate_dcache_page(ulong start)
- */
+ ___flush_invalidate_dcache_page a2 a3
-ENTRY(__invalidate_dcache_page)
- entry sp, 16
- movi a3, PAGE_SIZE
- dcache_invalidate_region a2, a3, a4
+ dsync
retw
/*
- * void __invalidate_icache_range(ulong start, ulong size)
+ * void __flush_dcache_page(ulong start)
*/
-ENTRY(__invalidate_icache_range)
+ENTRY(__flush_dcache_page)
entry sp, 16
- icache_invalidate_region a2, a3, a4
- retw
-/*
- * void __invalidate_dcache_range(ulong start, ulong size)
- */
+ ___flush_dcache_page a2 a3
-ENTRY(__invalidate_dcache_range)
- entry sp, 16
- dcache_invalidate_region a2, a3, a4
+ dsync
retw
-/*
- * void __flush_dcache_page(ulong start)
- */
-ENTRY(__flush_dcache_page)
- entry sp, 16
- movi a3, PAGE_SIZE
- dcache_writeback_region a2, a3, a4
- retw
/*
- * void __flush_invalidate_dcache_page(ulong start)
+ * void __invalidate_icache_range(ulong start, ulong size)
*/
-ENTRY(__flush_invalidate_dcache_page)
+ENTRY(__invalidate_icache_range)
entry sp, 16
- movi a3, PAGE_SIZE
- dcache_writeback_inv_region a2, a3, a4
+
+ ___invalidate_icache_range a2 a3 a4
+ isync
+
retw
/*
@@ -180,195 +141,69 @@ ENTRY(__flush_invalidate_dcache_page)
ENTRY(__flush_invalidate_dcache_range)
entry sp, 16
- dcache_writeback_inv_region a2, a3, a4
- retw
-/*
- * void __invalidate_dcache_all(void)
- */
+ ___flush_invalidate_dcache_range a2 a3 a4
+ dsync
-ENTRY(__invalidate_dcache_all)
- entry sp, 16
- dcache_invalidate_all a2, a3
retw
/*
- * void __flush_invalidate_dcache_page_phys(ulong start)
+ * void _flush_dcache_range(ulong start, ulong size)
*/
-ENTRY(__flush_invalidate_dcache_page_phys)
+ENTRY(__flush_dcache_range)
entry sp, 16
- movi a3, XCHAL_DCACHE_SIZE
- movi a4, PAGE_MASK | 1
- addi a2, a2, 1
-
-1: addi a3, a3, -XCHAL_DCACHE_LINESIZE
-
- ldct a6, a3
+ ___flush_dcache_range a2 a3 a4
dsync
- and a6, a6, a4
- beq a6, a2, 2f
- bgeui a3, 2, 1b
- retw
-2: diwbi a3, 0
- bgeui a3, 2, 1b
retw
-ENTRY(check_dcache_low0)
- entry sp, 16
-
- movi a3, XCHAL_DCACHE_SIZE / 4
- movi a4, PAGE_MASK | 1
- addi a2, a2, 1
-
-1: addi a3, a3, -XCHAL_DCACHE_LINESIZE
-
- ldct a6, a3
- dsync
- and a6, a6, a4
- beq a6, a2, 2f
- bgeui a3, 2, 1b
- retw
-
-2: j 2b
-
-ENTRY(check_dcache_high0)
- entry sp, 16
-
- movi a5, XCHAL_DCACHE_SIZE / 4
- movi a3, XCHAL_DCACHE_SIZE / 2
- movi a4, PAGE_MASK | 1
- addi a2, a2, 1
-
-1: addi a3, a3, -XCHAL_DCACHE_LINESIZE
- addi a5, a5, -XCHAL_DCACHE_LINESIZE
-
- ldct a6, a3
- dsync
- and a6, a6, a4
- beq a6, a2, 2f
- bgeui a5, 2, 1b
- retw
-
-2: j 2b
+/*
+ * void _invalidate_dcache_range(ulong start, ulong size)
+ */
-ENTRY(check_dcache_low1)
+ENTRY(__invalidate_dcache_range)
entry sp, 16
- movi a5, XCHAL_DCACHE_SIZE / 4
- movi a3, XCHAL_DCACHE_SIZE * 3 / 4
- movi a4, PAGE_MASK | 1
- addi a2, a2, 1
+ ___invalidate_dcache_range a2 a3 a4
-1: addi a3, a3, -XCHAL_DCACHE_LINESIZE
- addi a5, a5, -XCHAL_DCACHE_LINESIZE
- ldct a6, a3
- dsync
- and a6, a6, a4
- beq a6, a2, 2f
- bgeui a5, 2, 1b
retw
-2: j 2b
+/*
+ * void _invalidate_icache_all(void)
+ */
-ENTRY(check_dcache_high1)
+ENTRY(__invalidate_icache_all)
entry sp, 16
- movi a5, XCHAL_DCACHE_SIZE / 4
- movi a3, XCHAL_DCACHE_SIZE
- movi a4, PAGE_MASK | 1
- addi a2, a2, 1
-
-1: addi a3, a3, -XCHAL_DCACHE_LINESIZE
- addi a5, a5, -XCHAL_DCACHE_LINESIZE
+ ___invalidate_icache_all a2 a3
+ isync
- ldct a6, a3
- dsync
- and a6, a6, a4
- beq a6, a2, 2f
- bgeui a5, 2, 1b
retw
-2: j 2b
-
-
/*
- * void __invalidate_icache_page_phys(ulong start)
+ * void _flush_invalidate_dcache_all(void)
*/
-ENTRY(__invalidate_icache_page_phys)
+ENTRY(__flush_invalidate_dcache_all)
entry sp, 16
- movi a3, XCHAL_ICACHE_SIZE
- movi a4, PAGE_MASK | 1
- addi a2, a2, 1
-
-1: addi a3, a3, -XCHAL_ICACHE_LINESIZE
-
- lict a6, a3
- isync
- and a6, a6, a4
- beq a6, a2, 2f
- bgeui a3, 2, 1b
- retw
+ ___flush_invalidate_dcache_all a2 a3
+ dsync
-2: iii a3, 0
- bgeui a3, 2, 1b
retw
+/*
+ * void _invalidate_dcache_all(void)
+ */
-#if 0
-
- movi a3, XCHAL_DCACHE_WAYS - 1
- movi a4, PAGE_SIZE
-
-1: mov a5, a2
- add a6, a2, a4
-
-2: diwbi a5, 0
- diwbi a5, XCHAL_DCACHE_LINESIZE
- diwbi a5, XCHAL_DCACHE_LINESIZE * 2
- diwbi a5, XCHAL_DCACHE_LINESIZE * 3
-
- addi a5, a5, XCHAL_DCACHE_LINESIZE * 4
- blt a5, a6, 2b
-
- addi a3, a3, -1
- addi a2, a2, XCHAL_DCACHE_SIZE / XCHAL_DCACHE_WAYS
- bgez a3, 1b
-
- retw
-
-ENTRY(__invalidate_icache_page_index)
+ENTRY(__invalidate_dcache_all)
entry sp, 16
- movi a3, XCHAL_ICACHE_WAYS - 1
- movi a4, PAGE_SIZE
-
-1: mov a5, a2
- add a6, a2, a4
-
-2: iii a5, 0
- iii a5, XCHAL_ICACHE_LINESIZE
- iii a5, XCHAL_ICACHE_LINESIZE * 2
- iii a5, XCHAL_ICACHE_LINESIZE * 3
-
- addi a5, a5, XCHAL_ICACHE_LINESIZE * 4
- blt a5, a6, 2b
-
- addi a3, a3, -1
- addi a2, a2, XCHAL_ICACHE_SIZE / XCHAL_ICACHE_WAYS
- bgez a3, 2b
+ ___invalidate_dcache_all a2 a3
+ dsync
retw
-#endif
-
-
-
-
-
-
diff --git a/arch/xtensa/mm/tlb.c b/arch/xtensa/mm/tlb.c
index 0fefb866..239461d 100644
--- a/arch/xtensa/mm/tlb.c
+++ b/arch/xtensa/mm/tlb.c
@@ -24,12 +24,12 @@
static inline void __flush_itlb_all (void)
{
- int way, index;
+ int w, i;
- for (way = 0; way < XCHAL_ITLB_ARF_WAYS; way++) {
- for (index = 0; index < ITLB_ENTRIES_PER_ARF_WAY; index++) {
- int entry = way + (index << PAGE_SHIFT);
- invalidate_itlb_entry_no_isync (entry);
+ for (w = 0; w < ITLB_ARF_WAYS; w++) {
+ for (i = 0; i < (1 << XCHAL_ITLB_ARF_ENTRIES_LOG2); i++) {
+ int e = w + (i << PAGE_SHIFT);
+ invalidate_itlb_entry_no_isync(e);
}
}
asm volatile ("isync\n");
@@ -37,12 +37,12 @@ static inline void __flush_itlb_all (void)
static inline void __flush_dtlb_all (void)
{
- int way, index;
+ int w, i;
- for (way = 0; way < XCHAL_DTLB_ARF_WAYS; way++) {
- for (index = 0; index < DTLB_ENTRIES_PER_ARF_WAY; index++) {
- int entry = way + (index << PAGE_SHIFT);
- invalidate_dtlb_entry_no_isync (entry);
+ for (w = 0; w < DTLB_ARF_WAYS; w++) {
+ for (i = 0; i < (1 << XCHAL_DTLB_ARF_ENTRIES_LOG2); i++) {
+ int e = w + (i << PAGE_SHIFT);
+ invalidate_dtlb_entry_no_isync(e);
}
}
asm volatile ("isync\n");
@@ -63,21 +63,25 @@ void flush_tlb_all (void)
void flush_tlb_mm(struct mm_struct *mm)
{
-#if 0
- printk("[tlbmm<%lx>]\n", (unsigned long)mm->context);
-#endif
-
if (mm == current->active_mm) {
int flags;
local_save_flags(flags);
- get_new_mmu_context(mm, asid_cache);
- set_rasid_register(ASID_INSERT(mm->context));
+ __get_new_mmu_context(mm);
+ __load_mmu_context(mm);
local_irq_restore(flags);
}
else
mm->context = 0;
}
+#define _ITLB_ENTRIES (ITLB_ARF_WAYS << XCHAL_ITLB_ARF_ENTRIES_LOG2)
+#define _DTLB_ENTRIES (DTLB_ARF_WAYS << XCHAL_DTLB_ARF_ENTRIES_LOG2)
+#if _ITLB_ENTRIES > _DTLB_ENTRIES
+# define _TLB_ENTRIES _ITLB_ENTRIES
+#else
+# define _TLB_ENTRIES _DTLB_ENTRIES
+#endif
+
void flush_tlb_range (struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
@@ -93,7 +97,7 @@ void flush_tlb_range (struct vm_area_struct *vma,
#endif
local_save_flags(flags);
- if (end-start + (PAGE_SIZE-1) <= SMALLEST_NTLB_ENTRIES << PAGE_SHIFT) {
+ if (end-start + (PAGE_SIZE-1) <= _TLB_ENTRIES << PAGE_SHIFT) {
int oldpid = get_rasid_register();
set_rasid_register (ASID_INSERT(mm->context));
start &= PAGE_MASK;
@@ -111,9 +115,7 @@ void flush_tlb_range (struct vm_area_struct *vma,
set_rasid_register(oldpid);
} else {
- get_new_mmu_context(mm, asid_cache);
- if (mm == current->active_mm)
- set_rasid_register(ASID_INSERT(mm->context));
+ flush_tlb_mm(mm);
}
local_irq_restore(flags);
}
@@ -123,10 +125,6 @@ void flush_tlb_page (struct vm_area_struct *vma, unsigned long page)
struct mm_struct* mm = vma->vm_mm;
unsigned long flags;
int oldpid;
-#if 0
- printk("[tlbpage<%02lx,%08lx>]\n",
- (unsigned long)mm->context, page);
-#endif
if(mm->context == NO_CONTEXT)
return;
@@ -142,404 +140,5 @@ void flush_tlb_page (struct vm_area_struct *vma, unsigned long page)
set_rasid_register(oldpid);
local_irq_restore(flags);
-
-#if 0
- flush_tlb_all();
- return;
-#endif
-}
-
-
-#ifdef DEBUG_TLB
-
-#define USE_ITLB 0
-#define USE_DTLB 1
-
-struct way_config_t {
- int indicies;
- int indicies_log2;
- int pgsz_log2;
- int arf;
-};
-
-static struct way_config_t itlb[XCHAL_ITLB_WAYS] =
-{
- { XCHAL_ITLB_SET(XCHAL_ITLB_WAY0_SET, ENTRIES),
- XCHAL_ITLB_SET(XCHAL_ITLB_WAY0_SET, ENTRIES_LOG2),
- XCHAL_ITLB_SET(XCHAL_ITLB_WAY0_SET, PAGESZ_LOG2_MIN),
- XCHAL_ITLB_SET(XCHAL_ITLB_WAY0_SET, ARF)
- },
- { XCHAL_ITLB_SET(XCHAL_ITLB_WAY1_SET, ENTRIES),
- XCHAL_ITLB_SET(XCHAL_ITLB_WAY1_SET, ENTRIES_LOG2),
- XCHAL_ITLB_SET(XCHAL_ITLB_WAY1_SET, PAGESZ_LOG2_MIN),
- XCHAL_ITLB_SET(XCHAL_ITLB_WAY1_SET, ARF)
- },
- { XCHAL_ITLB_SET(XCHAL_ITLB_WAY2_SET, ENTRIES),
- XCHAL_ITLB_SET(XCHAL_ITLB_WAY2_SET, ENTRIES_LOG2),
- XCHAL_ITLB_SET(XCHAL_ITLB_WAY2_SET, PAGESZ_LOG2_MIN),
- XCHAL_ITLB_SET(XCHAL_ITLB_WAY2_SET, ARF)
- },
- { XCHAL_ITLB_SET(XCHAL_ITLB_WAY3_SET, ENTRIES),
- XCHAL_ITLB_SET(XCHAL_ITLB_WAY3_SET, ENTRIES_LOG2),
- XCHAL_ITLB_SET(XCHAL_ITLB_WAY3_SET, PAGESZ_LOG2_MIN),
- XCHAL_ITLB_SET(XCHAL_ITLB_WAY3_SET, ARF)
- },
- { XCHAL_ITLB_SET(XCHAL_ITLB_WAY4_SET, ENTRIES),
- XCHAL_ITLB_SET(XCHAL_ITLB_WAY4_SET, ENTRIES_LOG2),
- XCHAL_ITLB_SET(XCHAL_ITLB_WAY4_SET, PAGESZ_LOG2_MIN),
- XCHAL_ITLB_SET(XCHAL_ITLB_WAY4_SET, ARF)
- },
- { XCHAL_ITLB_SET(XCHAL_ITLB_WAY5_SET, ENTRIES),
- XCHAL_ITLB_SET(XCHAL_ITLB_WAY5_SET, ENTRIES_LOG2),
- XCHAL_ITLB_SET(XCHAL_ITLB_WAY5_SET, PAGESZ_LOG2_MIN),
- XCHAL_ITLB_SET(XCHAL_ITLB_WAY5_SET, ARF)
- },
- { XCHAL_ITLB_SET(XCHAL_ITLB_WAY6_SET, ENTRIES),
- XCHAL_ITLB_SET(XCHAL_ITLB_WAY6_SET, ENTRIES_LOG2),
- XCHAL_ITLB_SET(XCHAL_ITLB_WAY6_SET, PAGESZ_LOG2_MIN),
- XCHAL_ITLB_SET(XCHAL_ITLB_WAY6_SET, ARF)
- }
-};
-
-static struct way_config_t dtlb[XCHAL_DTLB_WAYS] =
-{
- { XCHAL_DTLB_SET(XCHAL_DTLB_WAY0_SET, ENTRIES),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY0_SET, ENTRIES_LOG2),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY0_SET, PAGESZ_LOG2_MIN),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY0_SET, ARF)
- },
- { XCHAL_DTLB_SET(XCHAL_DTLB_WAY1_SET, ENTRIES),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY1_SET, ENTRIES_LOG2),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY1_SET, PAGESZ_LOG2_MIN),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY1_SET, ARF)
- },
- { XCHAL_DTLB_SET(XCHAL_DTLB_WAY2_SET, ENTRIES),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY2_SET, ENTRIES_LOG2),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY2_SET, PAGESZ_LOG2_MIN),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY2_SET, ARF)
- },
- { XCHAL_DTLB_SET(XCHAL_DTLB_WAY3_SET, ENTRIES),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY3_SET, ENTRIES_LOG2),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY3_SET, PAGESZ_LOG2_MIN),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY3_SET, ARF)
- },
- { XCHAL_DTLB_SET(XCHAL_DTLB_WAY4_SET, ENTRIES),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY4_SET, ENTRIES_LOG2),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY4_SET, PAGESZ_LOG2_MIN),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY4_SET, ARF)
- },
- { XCHAL_DTLB_SET(XCHAL_DTLB_WAY5_SET, ENTRIES),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY5_SET, ENTRIES_LOG2),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY5_SET, PAGESZ_LOG2_MIN),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY5_SET, ARF)
- },
- { XCHAL_DTLB_SET(XCHAL_DTLB_WAY6_SET, ENTRIES),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY6_SET, ENTRIES_LOG2),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY6_SET, PAGESZ_LOG2_MIN),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY6_SET, ARF)
- },
- { XCHAL_DTLB_SET(XCHAL_DTLB_WAY7_SET, ENTRIES),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY7_SET, ENTRIES_LOG2),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY7_SET, PAGESZ_LOG2_MIN),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY7_SET, ARF)
- },
- { XCHAL_DTLB_SET(XCHAL_DTLB_WAY8_SET, ENTRIES),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY8_SET, ENTRIES_LOG2),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY8_SET, PAGESZ_LOG2_MIN),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY8_SET, ARF)
- },
- { XCHAL_DTLB_SET(XCHAL_DTLB_WAY9_SET, ENTRIES),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY9_SET, ENTRIES_LOG2),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY9_SET, PAGESZ_LOG2_MIN),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY9_SET, ARF)
- }
-};
-
-/* Total number of entries: */
-#define ITLB_TOTAL_ENTRIES \
- XCHAL_ITLB_SET(XCHAL_ITLB_WAY0_SET, ENTRIES) + \
- XCHAL_ITLB_SET(XCHAL_ITLB_WAY1_SET, ENTRIES) + \
- XCHAL_ITLB_SET(XCHAL_ITLB_WAY2_SET, ENTRIES) + \
- XCHAL_ITLB_SET(XCHAL_ITLB_WAY3_SET, ENTRIES) + \
- XCHAL_ITLB_SET(XCHAL_ITLB_WAY4_SET, ENTRIES) + \
- XCHAL_ITLB_SET(XCHAL_ITLB_WAY5_SET, ENTRIES) + \
- XCHAL_ITLB_SET(XCHAL_ITLB_WAY6_SET, ENTRIES)
-#define DTLB_TOTAL_ENTRIES \
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY0_SET, ENTRIES) + \
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY1_SET, ENTRIES) + \
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY2_SET, ENTRIES) + \
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY3_SET, ENTRIES) + \
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY4_SET, ENTRIES) + \
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY5_SET, ENTRIES) + \
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY6_SET, ENTRIES) + \
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY7_SET, ENTRIES) + \
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY8_SET, ENTRIES) + \
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY9_SET, ENTRIES)
-
-
-typedef struct {
- unsigned va;
- unsigned pa;
- unsigned char asid;
- unsigned char ca;
- unsigned char way;
- unsigned char index;
- unsigned char pgsz_log2; /* 0 .. 32 */
- unsigned char type; /* 0=ITLB 1=DTLB */
-} tlb_dump_entry_t;
-
-/* Return -1 if a precedes b, +1 if a follows b, 0 if same: */
-int cmp_tlb_dump_info( tlb_dump_entry_t *a, tlb_dump_entry_t *b )
-{
- if (a->asid < b->asid) return -1;
- if (a->asid > b->asid) return 1;
- if (a->va < b->va) return -1;
- if (a->va > b->va) return 1;
- if (a->pa < b->pa) return -1;
- if (a->pa > b->pa) return 1;
- if (a->ca < b->ca) return -1;
- if (a->ca > b->ca) return 1;
- if (a->way < b->way) return -1;
- if (a->way > b->way) return 1;
- if (a->index < b->index) return -1;
- if (a->index > b->index) return 1;
- return 0;
-}
-
-void sort_tlb_dump_info( tlb_dump_entry_t *t, int n )
-{
- int i, j;
- /* Simple O(n*n) sort: */
- for (i = 0; i < n-1; i++)
- for (j = i+1; j < n; j++)
- if (cmp_tlb_dump_info(t+i, t+j) > 0) {
- tlb_dump_entry_t tmp = t[i];
- t[i] = t[j];
- t[j] = tmp;
- }
-}
-
-
-static tlb_dump_entry_t itlb_dump_info[ITLB_TOTAL_ENTRIES];
-static tlb_dump_entry_t dtlb_dump_info[DTLB_TOTAL_ENTRIES];
-
-
-static inline char *way_type (int type)
-{
- return type ? "autorefill" : "non-autorefill";
-}
-
-void print_entry (struct way_config_t *way_info,
- unsigned int way,
- unsigned int index,
- unsigned int virtual,
- unsigned int translation)
-{
- char valid_chr;
- unsigned int va, pa, asid, ca;
-
- va = virtual &
- ~((1 << (way_info->pgsz_log2 + way_info->indicies_log2)) - 1);
- asid = virtual & ((1 << XCHAL_MMU_ASID_BITS) - 1);
- pa = translation & ~((1 << way_info->pgsz_log2) - 1);
- ca = translation & ((1 << XCHAL_MMU_CA_BITS) - 1);
- valid_chr = asid ? 'V' : 'I';
-
- /* Compute and incorporate the effect of the index bits on the
- * va. It's more useful for kernel debugging, since we always
- * want to know the effective va anyway. */
-
- va += index << way_info->pgsz_log2;
-
- printk ("\t[%d,%d] (%c) vpn 0x%.8x ppn 0x%.8x asid 0x%.2x am 0x%x\n",
- way, index, valid_chr, va, pa, asid, ca);
-}
-
-void print_itlb_entry (struct way_config_t *way_info, int way, int index)
-{
- print_entry (way_info, way, index,
- read_itlb_virtual (way + (index << way_info->pgsz_log2)),
- read_itlb_translation (way + (index << way_info->pgsz_log2)));
-}
-
-void print_dtlb_entry (struct way_config_t *way_info, int way, int index)
-{
- print_entry (way_info, way, index,
- read_dtlb_virtual (way + (index << way_info->pgsz_log2)),
- read_dtlb_translation (way + (index << way_info->pgsz_log2)));
-}
-
-void dump_itlb (void)
-{
- int way, index;
-
- printk ("\nITLB: ways = %d\n", XCHAL_ITLB_WAYS);
-
- for (way = 0; way < XCHAL_ITLB_WAYS; way++) {
- printk ("\nWay: %d, Entries: %d, MinPageSize: %d, Type: %s\n",
- way, itlb[way].indicies,
- itlb[way].pgsz_log2, way_type(itlb[way].arf));
- for (index = 0; index < itlb[way].indicies; index++) {
- print_itlb_entry(&itlb[way], way, index);
- }
- }
-}
-
-void dump_dtlb (void)
-{
- int way, index;
-
- printk ("\nDTLB: ways = %d\n", XCHAL_DTLB_WAYS);
-
- for (way = 0; way < XCHAL_DTLB_WAYS; way++) {
- printk ("\nWay: %d, Entries: %d, MinPageSize: %d, Type: %s\n",
- way, dtlb[way].indicies,
- dtlb[way].pgsz_log2, way_type(dtlb[way].arf));
- for (index = 0; index < dtlb[way].indicies; index++) {
- print_dtlb_entry(&dtlb[way], way, index);
- }
- }
-}
-
-void dump_tlb (tlb_dump_entry_t *tinfo, struct way_config_t *config,
- int entries, int ways, int type, int show_invalid)
-{
- tlb_dump_entry_t *e = tinfo;
- int way, i;
-
- /* Gather all info: */
- for (way = 0; way < ways; way++) {
- struct way_config_t *cfg = config + way;
- for (i = 0; i < cfg->indicies; i++) {
- unsigned wayindex = way + (i << cfg->pgsz_log2);
- unsigned vv = (type ? read_dtlb_virtual (wayindex)
- : read_itlb_virtual (wayindex));
- unsigned pp = (type ? read_dtlb_translation (wayindex)
- : read_itlb_translation (wayindex));
-
- /* Compute and incorporate the effect of the index bits on the
- * va. It's more useful for kernel debugging, since we always
- * want to know the effective va anyway. */
-
- e->va = (vv & ~((1 << (cfg->pgsz_log2 + cfg->indicies_log2)) - 1));
- e->va += (i << cfg->pgsz_log2);
- e->pa = (pp & ~((1 << cfg->pgsz_log2) - 1));
- e->asid = (vv & ((1 << XCHAL_MMU_ASID_BITS) - 1));
- e->ca = (pp & ((1 << XCHAL_MMU_CA_BITS) - 1));
- e->way = way;
- e->index = i;
- e->pgsz_log2 = cfg->pgsz_log2;
- e->type = type;
- e++;
- }
- }
-#if 1
- /* Sort by ASID and VADDR: */
- sort_tlb_dump_info (tinfo, entries);
-#endif
-
- /* Display all sorted info: */
- printk ("\n%cTLB dump:\n", (type ? 'D' : 'I'));
- for (e = tinfo, i = 0; i < entries; i++, e++) {
-#if 0
- if (e->asid == 0 && !show_invalid)
- continue;
-#endif
- printk ("%c way=%d i=%d ASID=%02X V=%08X -> P=%08X CA=%X (%d %cB)\n",
- (e->type ? 'D' : 'I'), e->way, e->index,
- e->asid, e->va, e->pa, e->ca,
- (1 << (e->pgsz_log2 % 10)),
- " kMG"[e->pgsz_log2 / 10]
- );
- }
-}
-
-void dump_tlbs2 (int showinv)
-{
- dump_tlb (itlb_dump_info, itlb, ITLB_TOTAL_ENTRIES, XCHAL_ITLB_WAYS, 0, showinv);
- dump_tlb (dtlb_dump_info, dtlb, DTLB_TOTAL_ENTRIES, XCHAL_DTLB_WAYS, 1, showinv);
-}
-
-void dump_all_tlbs (void)
-{
- dump_tlbs2 (1);
-}
-
-void dump_valid_tlbs (void)
-{
- dump_tlbs2 (0);
}
-
-void dump_tlbs (void)
-{
- dump_itlb();
- dump_dtlb();
-}
-
-void dump_cache_tag(int dcache, int idx)
-{
- int w, i, s, e;
- unsigned long tag, index;
- unsigned long num_lines, num_ways, cache_size, line_size;
-
- num_ways = dcache ? XCHAL_DCACHE_WAYS : XCHAL_ICACHE_WAYS;
- cache_size = dcache ? XCHAL_DCACHE_SIZE : XCHAL_ICACHE_SIZE;
- line_size = dcache ? XCHAL_DCACHE_LINESIZE : XCHAL_ICACHE_LINESIZE;
-
- num_lines = cache_size / num_ways;
-
- s = 0; e = num_lines;
-
- if (idx >= 0)
- e = (s = idx * line_size) + 1;
-
- for (i = s; i < e; i+= line_size) {
- printk("\nline %#08x:", i);
- for (w = 0; w < num_ways; w++) {
- index = w * num_lines + i;
- if (dcache)
- __asm__ __volatile__("ldct %0, %1\n\t"
- : "=a"(tag) : "a"(index));
- else
- __asm__ __volatile__("lict %0, %1\n\t"
- : "=a"(tag) : "a"(index));
-
- printk(" %#010lx", tag);
- }
- }
- printk ("\n");
-}
-
-void dump_icache(int index)
-{
- unsigned long data, addr;
- int w, i;
-
- const unsigned long num_ways = XCHAL_ICACHE_WAYS;
- const unsigned long cache_size = XCHAL_ICACHE_SIZE;
- const unsigned long line_size = XCHAL_ICACHE_LINESIZE;
- const unsigned long num_lines = cache_size / num_ways / line_size;
-
- for (w = 0; w < num_ways; w++) {
- printk ("\nWay %d", w);
-
- for (i = 0; i < line_size; i+= 4) {
- addr = w * num_lines + index * line_size + i;
- __asm__ __volatile__("licw %0, %1\n\t"
- : "=a"(data) : "a"(addr));
- printk(" %#010lx", data);
- }
- }
- printk ("\n");
-}
-
-void dump_cache_tags(void)
-{
- printk("Instruction cache\n");
- dump_cache_tag(0, -1);
- printk("Data cache\n");
- dump_cache_tag(1, -1);
-}
-
-#endif
OpenPOWER on IntegriCloud