diff options
author | David Gibson <david@gibson.dropbear.id.au> | 2005-12-14 16:08:40 +1100 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2006-01-09 15:05:47 +1100 |
commit | 14c89e7fc84ae55354b8bf12fee1b6d14f259c8a (patch) | |
tree | 83d6bbd44499d81e927bbe743f1a212ff5d30b51 | |
parent | 56c8eaee65d688b526c12dca54a30276335679e5 (diff) | |
download | op-kernel-dev-14c89e7fc84ae55354b8bf12fee1b6d14f259c8a.zip op-kernel-dev-14c89e7fc84ae55354b8bf12fee1b6d14f259c8a.tar.gz |
[PATCH] powerpc: Replace VMALLOCBASE with VMALLOC_START
On ppc64, we independently define VMALLOCBASE and VMALLOC_START to be
the same thing: the start of the vmalloc() area at 0xd000000000000000.
VMALLOC_START is used much more widely, including in generic code, so
this patch gets rid of the extraneous VMALLOCBASE.
This does require moving the definitions of region IDs from page_64.h
to pgtable.h, but they don't clearly belong in the former rather than
the latter, anyway. While we're moving them, clean up the definitions
of the REGION_IDs:
- Abolish REGION_SIZE, it was only used once, to define
REGION_MASK anyway
- Define the specific region ids in terms of the REGION_ID()
macro.
- Define KERNEL_REGION_ID in terms of PAGE_OFFSET rather than
KERNELBASE. It amounts to the same thing, but conceptually this is
about the region of the linear mapping (which starts at PAGE_OFFSET)
rather than of the kernel text itself (which is at KERNELBASE).
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Paul Mackerras <paulus@samba.org>
-rw-r--r-- | arch/powerpc/kernel/lparmap.c | 4 | ||||
-rw-r--r-- | arch/powerpc/mm/slb.c | 6 | ||||
-rw-r--r-- | include/asm-powerpc/page_64.h | 10 | ||||
-rw-r--r-- | include/asm-powerpc/pgtable.h | 11 |
4 files changed, 16 insertions, 15 deletions
diff --git a/arch/powerpc/kernel/lparmap.c b/arch/powerpc/kernel/lparmap.c index 8a53d43..92d9474 100644 --- a/arch/powerpc/kernel/lparmap.c +++ b/arch/powerpc/kernel/lparmap.c @@ -18,8 +18,8 @@ const struct LparMap __attribute__((__section__(".text"))) xLparMap = { .xEsids = { { .xKernelEsid = GET_ESID(PAGE_OFFSET), .xKernelVsid = KERNEL_VSID(PAGE_OFFSET), }, - { .xKernelEsid = GET_ESID(VMALLOCBASE), - .xKernelVsid = KERNEL_VSID(VMALLOCBASE), }, + { .xKernelEsid = GET_ESID(VMALLOC_START), + .xKernelVsid = KERNEL_VSID(VMALLOC_START), }, }, .xRanges = { diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c index cc22570..ffc8ed4 100644 --- a/arch/powerpc/mm/slb.c +++ b/arch/powerpc/mm/slb.c @@ -87,8 +87,8 @@ static void slb_flush_and_rebolt(void) /* Slot 2 - kernel stack */ "slbmte %2,%3\n" "isync" - :: "r"(mk_vsid_data(VMALLOCBASE, vflags)), - "r"(mk_esid_data(VMALLOCBASE, 1)), + :: "r"(mk_vsid_data(VMALLOC_START, vflags)), + "r"(mk_esid_data(VMALLOC_START, 1)), "r"(mk_vsid_data(ksp_esid_data, lflags)), "r"(ksp_esid_data) : "memory"); @@ -216,7 +216,7 @@ void slb_initialize(void) create_slbe(PAGE_OFFSET, lflags, 0); /* VMALLOC space has 4K pages always for now */ - create_slbe(VMALLOCBASE, vflags, 1); + create_slbe(VMALLOC_START, vflags, 1); /* We don't bolt the stack for the time being - we're in boot, * so the stack is in the bolted segment. By the time it goes diff --git a/include/asm-powerpc/page_64.h b/include/asm-powerpc/page_64.h index 6642c01..8a07a93 100644 --- a/include/asm-powerpc/page_64.h +++ b/include/asm-powerpc/page_64.h @@ -25,16 +25,6 @@ */ #define PAGE_FACTOR (PAGE_SHIFT - HW_PAGE_SHIFT) -#define REGION_SIZE 4UL -#define REGION_SHIFT 60UL -#define REGION_MASK (((1UL<<REGION_SIZE)-1UL)<<REGION_SHIFT) - -#define VMALLOCBASE ASM_CONST(0xD000000000000000) -#define VMALLOC_REGION_ID (VMALLOCBASE >> REGION_SHIFT) -#define KERNEL_REGION_ID (KERNELBASE >> REGION_SHIFT) -#define USER_REGION_ID (0UL) -#define REGION_ID(ea) (((unsigned long)(ea)) >> REGION_SHIFT) - /* Segment size */ #define SID_SHIFT 28 #define SID_MASK 0xfffffffffUL diff --git a/include/asm-powerpc/pgtable.h b/include/asm-powerpc/pgtable.h index 0303f57..3518adb 100644 --- a/include/asm-powerpc/pgtable.h +++ b/include/asm-powerpc/pgtable.h @@ -58,6 +58,17 @@ struct mm_struct; #define IMALLOC_END (VMALLOC_START + PGTABLE_RANGE) /* + * Region IDs + */ +#define REGION_SHIFT 60UL +#define REGION_MASK (0xfUL << REGION_SHIFT) +#define REGION_ID(ea) (((unsigned long)(ea)) >> REGION_SHIFT) + +#define VMALLOC_REGION_ID (REGION_ID(VMALLOC_START)) +#define KERNEL_REGION_ID (REGION_ID(PAGE_OFFSET)) +#define USER_REGION_ID (0UL) + +/* * Common bits in a linux-style PTE. These match the bits in the * (hardware-defined) PowerPC PTE as closely as possible. Additional * bits may be defined in pgtable-*.h |