summaryrefslogtreecommitdiffstats
path: root/include/asm-sparc64/mmu.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-sparc64/mmu.h')
-rw-r--r--include/asm-sparc64/mmu.h36
1 files changed, 22 insertions, 14 deletions
diff --git a/include/asm-sparc64/mmu.h b/include/asm-sparc64/mmu.h
index 8627eed..230ba67 100644
--- a/include/asm-sparc64/mmu.h
+++ b/include/asm-sparc64/mmu.h
@@ -4,20 +4,9 @@
#include <linux/config.h>
#include <asm/page.h>
#include <asm/const.h>
+#include <asm/hypervisor.h>
-/*
- * For the 8k pagesize kernel, use only 10 hw context bits to optimize some
- * shifts in the fast tlbmiss handlers, instead of all 13 bits (specifically
- * for vpte offset calculation). For other pagesizes, this optimization in
- * the tlbhandlers can not be done; but still, all 13 bits can not be used
- * because the tlb handlers use "andcc" instruction which sign extends 13
- * bit arguments.
- */
-#if PAGE_SHIFT == 13
-#define CTX_NR_BITS 10
-#else
-#define CTX_NR_BITS 12
-#endif
+#define CTX_NR_BITS 13
#define TAG_CONTEXT_BITS ((_AC(1,UL) << CTX_NR_BITS) - _AC(1,UL))
@@ -90,8 +79,27 @@
#ifndef __ASSEMBLY__
+#define TSB_ENTRY_ALIGNMENT 16
+
+struct tsb {
+ unsigned long tag;
+ unsigned long pte;
+} __attribute__((aligned(TSB_ENTRY_ALIGNMENT)));
+
+extern void __tsb_insert(unsigned long ent, unsigned long tag, unsigned long pte);
+extern void tsb_flush(unsigned long ent, unsigned long tag);
+extern void tsb_init(struct tsb *tsb, unsigned long size);
+
typedef struct {
- unsigned long sparc64_ctx_val;
+ spinlock_t lock;
+ unsigned long sparc64_ctx_val;
+ struct tsb *tsb;
+ unsigned long tsb_rss_limit;
+ unsigned long tsb_nentries;
+ unsigned long tsb_reg_val;
+ unsigned long tsb_map_vaddr;
+ unsigned long tsb_map_pte;
+ struct hv_tsb_descr tsb_descr;
} mm_context_t;
#endif /* !__ASSEMBLY__ */
OpenPOWER on IntegriCloud