summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2005-08-29 17:11:29 -0700
committerLinus Torvalds <torvalds@g5.osdl.org>2005-08-29 17:11:29 -0700
commit40193713df2cdb9c233b3fc2029ecdccb40cb1e4 (patch)
treedb2ce73665b250672f5f5c0cf7544ec370c122f9 /include
parent8f3d17fb7bcb7c255197d11469fb5e9695c9d2f4 (diff)
parentc594adad5653491813959277fb87a2fef54c4e05 (diff)
downloadop-kernel-dev-40193713df2cdb9c233b3fc2029ecdccb40cb1e4.zip
op-kernel-dev-40193713df2cdb9c233b3fc2029ecdccb40cb1e4.tar.gz
Merge HEAD from master.kernel.org:/pub/scm/linux/kernel/git/paulus/ppc64-2.6
Diffstat (limited to 'include')
-rw-r--r--include/asm-ppc64/abs_addr.h86
-rw-r--r--include/asm-ppc64/cputable.h47
-rw-r--r--include/asm-ppc64/firmware.h101
-rw-r--r--include/asm-ppc64/imalloc.h2
-rw-r--r--include/asm-ppc64/iommu.h3
-rw-r--r--include/asm-ppc64/lmb.h1
-rw-r--r--include/asm-ppc64/machdep.h3
-rw-r--r--include/asm-ppc64/mmu.h16
-rw-r--r--include/asm-ppc64/naca.h7
-rw-r--r--include/asm-ppc64/page.h55
-rw-r--r--include/asm-ppc64/pgalloc.h93
-rw-r--r--include/asm-ppc64/pgtable.h90
-rw-r--r--include/asm-ppc64/pmc.h2
-rw-r--r--include/asm-ppc64/processor.h4
-rw-r--r--include/asm-ppc64/prom.h14
-rw-r--r--include/asm-ppc64/system.h4
-rw-r--r--include/asm-ppc64/vio.h10
17 files changed, 312 insertions, 226 deletions
diff --git a/include/asm-ppc64/abs_addr.h b/include/asm-ppc64/abs_addr.h
index 6d4e8e7..84c24d4 100644
--- a/include/asm-ppc64/abs_addr.h
+++ b/include/asm-ppc64/abs_addr.h
@@ -16,93 +16,51 @@
#include <asm/page.h>
#include <asm/prom.h>
#include <asm/lmb.h>
+#include <asm/firmware.h>
-typedef u32 msChunks_entry;
-struct msChunks {
+struct mschunks_map {
unsigned long num_chunks;
unsigned long chunk_size;
unsigned long chunk_shift;
unsigned long chunk_mask;
- msChunks_entry *abs;
+ u32 *mapping;
};
-extern struct msChunks msChunks;
+extern struct mschunks_map mschunks_map;
-extern unsigned long msChunks_alloc(unsigned long, unsigned long, unsigned long);
-extern unsigned long reloc_offset(void);
+/* Chunks are 256 KB */
+#define MSCHUNKS_CHUNK_SHIFT (18)
+#define MSCHUNKS_CHUNK_SIZE (1UL << MSCHUNKS_CHUNK_SHIFT)
+#define MSCHUNKS_OFFSET_MASK (MSCHUNKS_CHUNK_SIZE - 1)
-#ifdef CONFIG_MSCHUNKS
-
-static inline unsigned long
-chunk_to_addr(unsigned long chunk)
+static inline unsigned long chunk_to_addr(unsigned long chunk)
{
- unsigned long offset = reloc_offset();
- struct msChunks *_msChunks = PTRRELOC(&msChunks);
-
- return chunk << _msChunks->chunk_shift;
+ return chunk << MSCHUNKS_CHUNK_SHIFT;
}
-static inline unsigned long
-addr_to_chunk(unsigned long addr)
+static inline unsigned long addr_to_chunk(unsigned long addr)
{
- unsigned long offset = reloc_offset();
- struct msChunks *_msChunks = PTRRELOC(&msChunks);
-
- return addr >> _msChunks->chunk_shift;
+ return addr >> MSCHUNKS_CHUNK_SHIFT;
}
-static inline unsigned long
-chunk_offset(unsigned long addr)
+static inline unsigned long phys_to_abs(unsigned long pa)
{
- unsigned long offset = reloc_offset();
- struct msChunks *_msChunks = PTRRELOC(&msChunks);
+ unsigned long chunk;
- return addr & _msChunks->chunk_mask;
-}
+ /* This is a no-op on non-iSeries */
+ if (!firmware_has_feature(FW_FEATURE_ISERIES))
+ return pa;
-static inline unsigned long
-abs_chunk(unsigned long pchunk)
-{
- unsigned long offset = reloc_offset();
- struct msChunks *_msChunks = PTRRELOC(&msChunks);
- if ( pchunk >= _msChunks->num_chunks ) {
- return pchunk;
- }
- return PTRRELOC(_msChunks->abs)[pchunk];
-}
+ chunk = addr_to_chunk(pa);
-/* A macro so it can take pointers or unsigned long. */
-#define phys_to_abs(pa) \
- ({ unsigned long _pa = (unsigned long)(pa); \
- chunk_to_addr(abs_chunk(addr_to_chunk(_pa))) + chunk_offset(_pa); \
- })
+ if (chunk < mschunks_map.num_chunks)
+ chunk = mschunks_map.mapping[chunk];
-static inline unsigned long
-physRpn_to_absRpn(unsigned long rpn)
-{
- unsigned long pa = rpn << PAGE_SHIFT;
- unsigned long aa = phys_to_abs(pa);
- return (aa >> PAGE_SHIFT);
+ return chunk_to_addr(chunk) + (pa & MSCHUNKS_OFFSET_MASK);
}
-/* A macro so it can take pointers or unsigned long. */
-#define abs_to_phys(aa) lmb_abs_to_phys((unsigned long)(aa))
-
-#else /* !CONFIG_MSCHUNKS */
-
-#define chunk_to_addr(chunk) ((unsigned long)(chunk))
-#define addr_to_chunk(addr) (addr)
-#define chunk_offset(addr) (0)
-#define abs_chunk(pchunk) (pchunk)
-
-#define phys_to_abs(pa) (pa)
-#define physRpn_to_absRpn(rpn) (rpn)
-#define abs_to_phys(aa) (aa)
-
-#endif /* !CONFIG_MSCHUNKS */
-
/* Convenience macros */
#define virt_to_abs(va) phys_to_abs(__pa(va))
-#define abs_to_virt(aa) __va(abs_to_phys(aa))
+#define abs_to_virt(aa) __va(aa)
#endif /* _ABS_ADDR_H */
diff --git a/include/asm-ppc64/cputable.h b/include/asm-ppc64/cputable.h
index d67fa9e..ae6cf38 100644
--- a/include/asm-ppc64/cputable.h
+++ b/include/asm-ppc64/cputable.h
@@ -56,11 +56,6 @@ struct cpu_spec {
* BHT, SPD, etc... from head.S before branching to identify_machine
*/
cpu_setup_t cpu_setup;
-
- /* This is used to identify firmware features which are available
- * to the kernel.
- */
- unsigned long firmware_features;
};
extern struct cpu_spec cpu_specs[];
@@ -71,39 +66,6 @@ static inline unsigned long cpu_has_feature(unsigned long feature)
return cur_cpu_spec->cpu_features & feature;
}
-
-/* firmware feature bitmask values */
-#define FIRMWARE_MAX_FEATURES 63
-
-#define FW_FEATURE_PFT (1UL<<0)
-#define FW_FEATURE_TCE (1UL<<1)
-#define FW_FEATURE_SPRG0 (1UL<<2)
-#define FW_FEATURE_DABR (1UL<<3)
-#define FW_FEATURE_COPY (1UL<<4)
-#define FW_FEATURE_ASR (1UL<<5)
-#define FW_FEATURE_DEBUG (1UL<<6)
-#define FW_FEATURE_TERM (1UL<<7)
-#define FW_FEATURE_PERF (1UL<<8)
-#define FW_FEATURE_DUMP (1UL<<9)
-#define FW_FEATURE_INTERRUPT (1UL<<10)
-#define FW_FEATURE_MIGRATE (1UL<<11)
-#define FW_FEATURE_PERFMON (1UL<<12)
-#define FW_FEATURE_CRQ (1UL<<13)
-#define FW_FEATURE_VIO (1UL<<14)
-#define FW_FEATURE_RDMA (1UL<<15)
-#define FW_FEATURE_LLAN (1UL<<16)
-#define FW_FEATURE_BULK (1UL<<17)
-#define FW_FEATURE_XDABR (1UL<<18)
-#define FW_FEATURE_MULTITCE (1UL<<19)
-#define FW_FEATURE_SPLPAR (1UL<<20)
-
-typedef struct {
- unsigned long val;
- char * name;
-} firmware_feature_t;
-
-extern firmware_feature_t firmware_features_table[];
-
#endif /* __ASSEMBLY__ */
/* CPU kernel features */
@@ -140,10 +102,8 @@ extern firmware_feature_t firmware_features_table[];
#define CPU_FTR_MMCRA_SIHV ASM_CONST(0x0000080000000000)
#define CPU_FTR_CTRL ASM_CONST(0x0000100000000000)
-/* Platform firmware features */
-#define FW_FTR_ ASM_CONST(0x0000000000000001)
-
#ifndef __ASSEMBLY__
+
#define COMMON_USER_PPC64 (PPC_FEATURE_32 | PPC_FEATURE_64 | \
PPC_FEATURE_HAS_FPU | PPC_FEATURE_HAS_MMU)
@@ -156,10 +116,9 @@ extern firmware_feature_t firmware_features_table[];
#define CPU_FTR_PPCAS_ARCH_V2 (CPU_FTR_PPCAS_ARCH_V2_BASE)
#else
#define CPU_FTR_PPCAS_ARCH_V2 (CPU_FTR_PPCAS_ARCH_V2_BASE | CPU_FTR_16M_PAGE)
-#endif
+#endif /* CONFIG_PPC_ISERIES */
-#define COMMON_PPC64_FW (0)
-#endif
+#endif /* __ASSEMBLY */
#ifdef __ASSEMBLY__
diff --git a/include/asm-ppc64/firmware.h b/include/asm-ppc64/firmware.h
new file mode 100644
index 0000000..22bb85c
--- /dev/null
+++ b/include/asm-ppc64/firmware.h
@@ -0,0 +1,101 @@
+/*
+ * include/asm-ppc64/firmware.h
+ *
+ * Extracted from include/asm-ppc64/cputable.h
+ *
+ * Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org)
+ *
+ * Modifications for ppc64:
+ * Copyright (C) 2003 Dave Engebretsen <engebret@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef __ASM_PPC_FIRMWARE_H
+#define __ASM_PPC_FIRMWARE_H
+
+#ifdef __KERNEL__
+
+#ifndef __ASSEMBLY__
+
+/* firmware feature bitmask values */
+#define FIRMWARE_MAX_FEATURES 63
+
+#define FW_FEATURE_PFT (1UL<<0)
+#define FW_FEATURE_TCE (1UL<<1)
+#define FW_FEATURE_SPRG0 (1UL<<2)
+#define FW_FEATURE_DABR (1UL<<3)
+#define FW_FEATURE_COPY (1UL<<4)
+#define FW_FEATURE_ASR (1UL<<5)
+#define FW_FEATURE_DEBUG (1UL<<6)
+#define FW_FEATURE_TERM (1UL<<7)
+#define FW_FEATURE_PERF (1UL<<8)
+#define FW_FEATURE_DUMP (1UL<<9)
+#define FW_FEATURE_INTERRUPT (1UL<<10)
+#define FW_FEATURE_MIGRATE (1UL<<11)
+#define FW_FEATURE_PERFMON (1UL<<12)
+#define FW_FEATURE_CRQ (1UL<<13)
+#define FW_FEATURE_VIO (1UL<<14)
+#define FW_FEATURE_RDMA (1UL<<15)
+#define FW_FEATURE_LLAN (1UL<<16)
+#define FW_FEATURE_BULK (1UL<<17)
+#define FW_FEATURE_XDABR (1UL<<18)
+#define FW_FEATURE_MULTITCE (1UL<<19)
+#define FW_FEATURE_SPLPAR (1UL<<20)
+#define FW_FEATURE_ISERIES (1UL<<21)
+
+enum {
+ FW_FEATURE_PSERIES_POSSIBLE = FW_FEATURE_PFT | FW_FEATURE_TCE |
+ FW_FEATURE_SPRG0 | FW_FEATURE_DABR | FW_FEATURE_COPY |
+ FW_FEATURE_ASR | FW_FEATURE_DEBUG | FW_FEATURE_TERM |
+ FW_FEATURE_PERF | FW_FEATURE_DUMP | FW_FEATURE_INTERRUPT |
+ FW_FEATURE_MIGRATE | FW_FEATURE_PERFMON | FW_FEATURE_CRQ |
+ FW_FEATURE_VIO | FW_FEATURE_RDMA | FW_FEATURE_LLAN |
+ FW_FEATURE_BULK | FW_FEATURE_XDABR | FW_FEATURE_MULTITCE |
+ FW_FEATURE_SPLPAR,
+ FW_FEATURE_PSERIES_ALWAYS = 0,
+ FW_FEATURE_ISERIES_POSSIBLE = FW_FEATURE_ISERIES,
+ FW_FEATURE_ISERIES_ALWAYS = FW_FEATURE_ISERIES,
+ FW_FEATURE_POSSIBLE =
+#ifdef CONFIG_PPC_PSERIES
+ FW_FEATURE_PSERIES_POSSIBLE |
+#endif
+#ifdef CONFIG_PPC_ISERIES
+ FW_FEATURE_ISERIES_POSSIBLE |
+#endif
+ 0,
+ FW_FEATURE_ALWAYS =
+#ifdef CONFIG_PPC_PSERIES
+ FW_FEATURE_PSERIES_ALWAYS &
+#endif
+#ifdef CONFIG_PPC_ISERIES
+ FW_FEATURE_ISERIES_ALWAYS &
+#endif
+ FW_FEATURE_POSSIBLE,
+};
+
+/* This is used to identify firmware features which are available
+ * to the kernel.
+ */
+extern unsigned long ppc64_firmware_features;
+
+static inline unsigned long firmware_has_feature(unsigned long feature)
+{
+ return (FW_FEATURE_ALWAYS & feature) ||
+ (FW_FEATURE_POSSIBLE & ppc64_firmware_features & feature);
+}
+
+#ifdef CONFIG_PPC_PSERIES
+typedef struct {
+ unsigned long val;
+ char * name;
+} firmware_feature_t;
+
+extern firmware_feature_t firmware_features_table[];
+#endif
+
+#endif /* __ASSEMBLY__ */
+#endif /* __KERNEL__ */
+#endif /* __ASM_PPC_FIRMWARE_H */
diff --git a/include/asm-ppc64/imalloc.h b/include/asm-ppc64/imalloc.h
index e46ff68..42adf70 100644
--- a/include/asm-ppc64/imalloc.h
+++ b/include/asm-ppc64/imalloc.h
@@ -6,7 +6,7 @@
*/
#define PHBS_IO_BASE VMALLOC_END
#define IMALLOC_BASE (PHBS_IO_BASE + 0x80000000ul) /* Reserve 2 gigs for PHBs */
-#define IMALLOC_END (VMALLOC_START + EADDR_MASK)
+#define IMALLOC_END (VMALLOC_START + PGTABLE_RANGE)
/* imalloc region types */
diff --git a/include/asm-ppc64/iommu.h b/include/asm-ppc64/iommu.h
index 729de5c..72dcf81 100644
--- a/include/asm-ppc64/iommu.h
+++ b/include/asm-ppc64/iommu.h
@@ -104,9 +104,6 @@ extern void iommu_devnode_init_pSeries(struct device_node *dn);
#ifdef CONFIG_PPC_ISERIES
-/* Initializes tables for bio buses */
-extern void __init iommu_vio_init(void);
-
struct iSeries_Device_Node;
/* Creates table for an individual device node */
extern void iommu_devnode_init_iSeries(struct iSeries_Device_Node *dn);
diff --git a/include/asm-ppc64/lmb.h b/include/asm-ppc64/lmb.h
index a6cbca2..cb368bf 100644
--- a/include/asm-ppc64/lmb.h
+++ b/include/asm-ppc64/lmb.h
@@ -22,7 +22,6 @@
struct lmb_property {
unsigned long base;
- unsigned long physbase;
unsigned long size;
};
diff --git a/include/asm-ppc64/machdep.h b/include/asm-ppc64/machdep.h
index f0ef063..ff2c928 100644
--- a/include/asm-ppc64/machdep.h
+++ b/include/asm-ppc64/machdep.h
@@ -140,6 +140,9 @@ struct machdep_calls {
/* Idle loop for this platform, leave empty for default idle loop */
int (*idle_loop)(void);
+
+ /* Function to enable pmcs for this platform, called once per cpu. */
+ void (*enable_pmcs)(void);
};
extern int default_idle(void);
diff --git a/include/asm-ppc64/mmu.h b/include/asm-ppc64/mmu.h
index 70348a8..ad36bb2 100644
--- a/include/asm-ppc64/mmu.h
+++ b/include/asm-ppc64/mmu.h
@@ -28,9 +28,12 @@
#define STE_VSID_SHIFT 12
/* Location of cpu0's segment table */
-#define STAB0_PAGE 0x9
+#define STAB0_PAGE 0x6
#define STAB0_PHYS_ADDR (STAB0_PAGE<<PAGE_SHIFT)
-#define STAB0_VIRT_ADDR (KERNELBASE+STAB0_PHYS_ADDR)
+
+#ifndef __ASSEMBLY__
+extern char initial_stab[];
+#endif /* ! __ASSEMBLY */
/*
* SLB
@@ -259,8 +262,10 @@ extern void stabs_alloc(void);
#define VSID_BITS 36
#define VSID_MODULUS ((1UL<<VSID_BITS)-1)
-#define CONTEXT_BITS 20
-#define USER_ESID_BITS 15
+#define CONTEXT_BITS 19
+#define USER_ESID_BITS 16
+
+#define USER_VSID_RANGE (1UL << (USER_ESID_BITS + SID_SHIFT))
/*
* This macro generates asm code to compute the VSID scramble
@@ -302,8 +307,7 @@ typedef unsigned long mm_context_id_t;
typedef struct {
mm_context_id_t id;
#ifdef CONFIG_HUGETLB_PAGE
- pgd_t *huge_pgdir;
- u16 htlb_segs; /* bitmask */
+ u16 low_htlb_areas, high_htlb_areas;
#endif
} mm_context_t;
diff --git a/include/asm-ppc64/naca.h b/include/asm-ppc64/naca.h
index bfb7caa3..d2afe64 100644
--- a/include/asm-ppc64/naca.h
+++ b/include/asm-ppc64/naca.h
@@ -12,8 +12,6 @@
#include <asm/types.h>
-#ifndef __ASSEMBLY__
-
struct naca_struct {
/* Kernel only data - undefined for user space */
void *xItVpdAreas; /* VPD Data 0x00 */
@@ -23,9 +21,4 @@ struct naca_struct {
extern struct naca_struct naca;
-#endif /* __ASSEMBLY__ */
-
-#define NACA_PAGE 0x4
-#define NACA_PHYS_ADDR (NACA_PAGE<<PAGE_SHIFT)
-
#endif /* _NACA_H */
diff --git a/include/asm-ppc64/page.h b/include/asm-ppc64/page.h
index a5893a3..a79a08d 100644
--- a/include/asm-ppc64/page.h
+++ b/include/asm-ppc64/page.h
@@ -37,39 +37,45 @@
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
-/* For 64-bit processes the hugepage range is 1T-1.5T */
-#define TASK_HPAGE_BASE ASM_CONST(0x0000010000000000)
-#define TASK_HPAGE_END ASM_CONST(0x0000018000000000)
+#define HTLB_AREA_SHIFT 40
+#define HTLB_AREA_SIZE (1UL << HTLB_AREA_SHIFT)
+#define GET_HTLB_AREA(x) ((x) >> HTLB_AREA_SHIFT)
#define LOW_ESID_MASK(addr, len) (((1U << (GET_ESID(addr+len-1)+1)) \
- (1U << GET_ESID(addr))) & 0xffff)
+#define HTLB_AREA_MASK(addr, len) (((1U << (GET_HTLB_AREA(addr+len-1)+1)) \
+ - (1U << GET_HTLB_AREA(addr))) & 0xffff)
#define ARCH_HAS_HUGEPAGE_ONLY_RANGE
#define ARCH_HAS_PREPARE_HUGEPAGE_RANGE
+#define ARCH_HAS_SETCLEAR_HUGE_PTE
#define touches_hugepage_low_range(mm, addr, len) \
- (LOW_ESID_MASK((addr), (len)) & mm->context.htlb_segs)
-#define touches_hugepage_high_range(addr, len) \
- (((addr) > (TASK_HPAGE_BASE-(len))) && ((addr) < TASK_HPAGE_END))
+ (LOW_ESID_MASK((addr), (len)) & (mm)->context.low_htlb_areas)
+#define touches_hugepage_high_range(mm, addr, len) \
+ (HTLB_AREA_MASK((addr), (len)) & (mm)->context.high_htlb_areas)
#define __within_hugepage_low_range(addr, len, segmask) \
((LOW_ESID_MASK((addr), (len)) | (segmask)) == (segmask))
#define within_hugepage_low_range(addr, len) \
__within_hugepage_low_range((addr), (len), \
- current->mm->context.htlb_segs)
-#define within_hugepage_high_range(addr, len) (((addr) >= TASK_HPAGE_BASE) \
- && ((addr)+(len) <= TASK_HPAGE_END) && ((addr)+(len) >= (addr)))
+ current->mm->context.low_htlb_areas)
+#define __within_hugepage_high_range(addr, len, zonemask) \
+ ((HTLB_AREA_MASK((addr), (len)) | (zonemask)) == (zonemask))
+#define within_hugepage_high_range(addr, len) \
+ __within_hugepage_high_range((addr), (len), \
+ current->mm->context.high_htlb_areas)
#define is_hugepage_only_range(mm, addr, len) \
- (touches_hugepage_high_range((addr), (len)) || \
+ (touches_hugepage_high_range((mm), (addr), (len)) || \
touches_hugepage_low_range((mm), (addr), (len)))
#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
#define in_hugepage_area(context, addr) \
(cpu_has_feature(CPU_FTR_16M_PAGE) && \
- ( (((addr) >= TASK_HPAGE_BASE) && ((addr) < TASK_HPAGE_END)) || \
+ ( ((1 << GET_HTLB_AREA(addr)) & (context).high_htlb_areas) || \
( ((addr) < 0x100000000L) && \
- ((1 << GET_ESID(addr)) & (context).htlb_segs) ) ) )
+ ((1 << GET_ESID(addr)) & (context).low_htlb_areas) ) ) )
#else /* !CONFIG_HUGETLB_PAGE */
@@ -125,36 +131,42 @@ extern void copy_user_page(void *to, void *from, unsigned long vaddr, struct pag
* Entries in the pte table are 64b, while entries in the pgd & pmd are 32b.
*/
typedef struct { unsigned long pte; } pte_t;
-typedef struct { unsigned int pmd; } pmd_t;
-typedef struct { unsigned int pgd; } pgd_t;
+typedef struct { unsigned long pmd; } pmd_t;
+typedef struct { unsigned long pud; } pud_t;
+typedef struct { unsigned long pgd; } pgd_t;
typedef struct { unsigned long pgprot; } pgprot_t;
#define pte_val(x) ((x).pte)
#define pmd_val(x) ((x).pmd)
+#define pud_val(x) ((x).pud)
#define pgd_val(x) ((x).pgd)
#define pgprot_val(x) ((x).pgprot)
-#define __pte(x) ((pte_t) { (x) } )
-#define __pmd(x) ((pmd_t) { (x) } )
-#define __pgd(x) ((pgd_t) { (x) } )
-#define __pgprot(x) ((pgprot_t) { (x) } )
+#define __pte(x) ((pte_t) { (x) })
+#define __pmd(x) ((pmd_t) { (x) })
+#define __pud(x) ((pud_t) { (x) })
+#define __pgd(x) ((pgd_t) { (x) })
+#define __pgprot(x) ((pgprot_t) { (x) })
#else
/*
* .. while these make it easier on the compiler
*/
typedef unsigned long pte_t;
-typedef unsigned int pmd_t;
-typedef unsigned int pgd_t;
+typedef unsigned long pmd_t;
+typedef unsigned long pud_t;
+typedef unsigned long pgd_t;
typedef unsigned long pgprot_t;
#define pte_val(x) (x)
#define pmd_val(x) (x)
+#define pud_val(x) (x)
#define pgd_val(x) (x)
#define pgprot_val(x) (x)
#define __pte(x) (x)
#define __pmd(x) (x)
+#define __pud(x) (x)
#define __pgd(x) (x)
#define __pgprot(x) (x)
@@ -208,9 +220,6 @@ extern u64 ppc64_pft_size; /* Log 2 of page table size */
#define USER_REGION_ID (0UL)
#define REGION_ID(ea) (((unsigned long)(ea)) >> REGION_SHIFT)
-#define __bpn_to_ba(x) ((((unsigned long)(x)) << PAGE_SHIFT) + KERNELBASE)
-#define __ba_to_bpn(x) ((((unsigned long)(x)) & ~REGION_MASK) >> PAGE_SHIFT)
-
#define __va(x) ((void *)((unsigned long)(x) + KERNELBASE))
#ifdef CONFIG_DISCONTIGMEM
diff --git a/include/asm-ppc64/pgalloc.h b/include/asm-ppc64/pgalloc.h
index 4fc4b73..26bc49c 100644
--- a/include/asm-ppc64/pgalloc.h
+++ b/include/asm-ppc64/pgalloc.h
@@ -6,7 +6,12 @@
#include <linux/cpumask.h>
#include <linux/percpu.h>
-extern kmem_cache_t *zero_cache;
+extern kmem_cache_t *pgtable_cache[];
+
+#define PTE_CACHE_NUM 0
+#define PMD_CACHE_NUM 1
+#define PUD_CACHE_NUM 1
+#define PGD_CACHE_NUM 0
/*
* This program is free software; you can redistribute it and/or
@@ -15,30 +20,40 @@ extern kmem_cache_t *zero_cache;
* 2 of the License, or (at your option) any later version.
*/
-static inline pgd_t *
-pgd_alloc(struct mm_struct *mm)
+static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
- return kmem_cache_alloc(zero_cache, GFP_KERNEL);
+ return kmem_cache_alloc(pgtable_cache[PGD_CACHE_NUM], GFP_KERNEL);
}
-static inline void
-pgd_free(pgd_t *pgd)
+static inline void pgd_free(pgd_t *pgd)
{
- kmem_cache_free(zero_cache, pgd);
+ kmem_cache_free(pgtable_cache[PGD_CACHE_NUM], pgd);
+}
+
+#define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
+
+static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
+{
+ return kmem_cache_alloc(pgtable_cache[PUD_CACHE_NUM],
+ GFP_KERNEL|__GFP_REPEAT);
+}
+
+static inline void pud_free(pud_t *pud)
+{
+ kmem_cache_free(pgtable_cache[PUD_CACHE_NUM], pud);
}
#define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
-static inline pmd_t *
-pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
+static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
{
- return kmem_cache_alloc(zero_cache, GFP_KERNEL|__GFP_REPEAT);
+ return kmem_cache_alloc(pgtable_cache[PMD_CACHE_NUM],
+ GFP_KERNEL|__GFP_REPEAT);
}
-static inline void
-pmd_free(pmd_t *pmd)
+static inline void pmd_free(pmd_t *pmd)
{
- kmem_cache_free(zero_cache, pmd);
+ kmem_cache_free(pgtable_cache[PMD_CACHE_NUM], pmd);
}
#define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, pte)
@@ -47,44 +62,58 @@ pmd_free(pmd_t *pmd)
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
- return kmem_cache_alloc(zero_cache, GFP_KERNEL|__GFP_REPEAT);
+ return kmem_cache_alloc(pgtable_cache[PTE_CACHE_NUM],
+ GFP_KERNEL|__GFP_REPEAT);
}
static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
- pte_t *pte = kmem_cache_alloc(zero_cache, GFP_KERNEL|__GFP_REPEAT);
- if (pte)
- return virt_to_page(pte);
- return NULL;
+ return virt_to_page(pte_alloc_one_kernel(mm, address));
}
static inline void pte_free_kernel(pte_t *pte)
{
- kmem_cache_free(zero_cache, pte);
+ kmem_cache_free(pgtable_cache[PTE_CACHE_NUM], pte);
}
static inline void pte_free(struct page *ptepage)
{
- kmem_cache_free(zero_cache, page_address(ptepage));
+ pte_free_kernel(page_address(ptepage));
}
-struct pte_freelist_batch
+#define PGF_CACHENUM_MASK 0xf
+
+typedef struct pgtable_free {
+ unsigned long val;
+} pgtable_free_t;
+
+static inline pgtable_free_t pgtable_free_cache(void *p, int cachenum,
+ unsigned long mask)
{
- struct rcu_head rcu;
- unsigned int index;
- struct page * pages[0];
-};
+ BUG_ON(cachenum > PGF_CACHENUM_MASK);
-#define PTE_FREELIST_SIZE ((PAGE_SIZE - sizeof(struct pte_freelist_batch)) / \
- sizeof(struct page *))
+ return (pgtable_free_t){.val = ((unsigned long) p & ~mask) | cachenum};
+}
-extern void pte_free_now(struct page *ptepage);
-extern void pte_free_submit(struct pte_freelist_batch *batch);
+static inline void pgtable_free(pgtable_free_t pgf)
+{
+ void *p = (void *)(pgf.val & ~PGF_CACHENUM_MASK);
+ int cachenum = pgf.val & PGF_CACHENUM_MASK;
-DECLARE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur);
+ kmem_cache_free(pgtable_cache[cachenum], p);
+}
-void __pte_free_tlb(struct mmu_gather *tlb, struct page *ptepage);
-#define __pmd_free_tlb(tlb, pmd) __pte_free_tlb(tlb, virt_to_page(pmd))
+void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf);
+
+#define __pte_free_tlb(tlb, ptepage) \
+ pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \
+ PTE_CACHE_NUM, PTE_TABLE_SIZE-1))
+#define __pmd_free_tlb(tlb, pmd) \
+ pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \
+ PMD_CACHE_NUM, PMD_TABLE_SIZE-1))
+#define __pud_free_tlb(tlb, pmd) \
+ pgtable_free_tlb(tlb, pgtable_free_cache(pud, \
+ PUD_CACHE_NUM, PUD_TABLE_SIZE-1))
#define check_pgt_cache() do { } while (0)
diff --git a/include/asm-ppc64/pgtable.h b/include/asm-ppc64/pgtable.h
index 46cf61c..5ea952a 100644
--- a/include/asm-ppc64/pgtable.h
+++ b/include/asm-ppc64/pgtable.h
@@ -15,19 +15,24 @@
#include <asm/tlbflush.h>
#endif /* __ASSEMBLY__ */
-#include <asm-generic/pgtable-nopud.h>
-
/*
* Entries per page directory level. The PTE level must use a 64b record
* for each page table entry. The PMD and PGD level use a 32b record for
* each entry by assuming that each entry is page aligned.
*/
#define PTE_INDEX_SIZE 9
-#define PMD_INDEX_SIZE 10
-#define PGD_INDEX_SIZE 10
+#define PMD_INDEX_SIZE 7
+#define PUD_INDEX_SIZE 7
+#define PGD_INDEX_SIZE 9
+
+#define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE)
+#define PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE)
+#define PUD_TABLE_SIZE (sizeof(pud_t) << PUD_INDEX_SIZE)
+#define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE)
#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE)
#define PTRS_PER_PMD (1 << PMD_INDEX_SIZE)
+#define PTRS_PER_PUD (1 << PMD_INDEX_SIZE)
#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE)
/* PMD_SHIFT determines what a second-level page table entry can map */
@@ -35,8 +40,13 @@
#define PMD_SIZE (1UL << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1))
-/* PGDIR_SHIFT determines what a third-level page table entry can map */
-#define PGDIR_SHIFT (PMD_SHIFT + PMD_INDEX_SIZE)
+/* PUD_SHIFT determines what a third-level page table entry can map */
+#define PUD_SHIFT (PMD_SHIFT + PMD_INDEX_SIZE)
+#define PUD_SIZE (1UL << PUD_SHIFT)
+#define PUD_MASK (~(PUD_SIZE-1))
+
+/* PGDIR_SHIFT determines what a fourth-level page table entry can map */
+#define PGDIR_SHIFT (PUD_SHIFT + PUD_INDEX_SIZE)
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE-1))
@@ -45,15 +55,23 @@
/*
* Size of EA range mapped by our pagetables.
*/
-#define EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \
- PGD_INDEX_SIZE + PAGE_SHIFT)
-#define EADDR_MASK ((1UL << EADDR_SIZE) - 1)
+#define PGTABLE_EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \
+ PUD_INDEX_SIZE + PGD_INDEX_SIZE + PAGE_SHIFT)
+#define PGTABLE_RANGE (1UL << PGTABLE_EADDR_SIZE)
+
+#if TASK_SIZE_USER64 > PGTABLE_RANGE
+#error TASK_SIZE_USER64 exceeds pagetable range
+#endif
+
+#if TASK_SIZE_USER64 > (1UL << (USER_ESID_BITS + SID_SHIFT))
+#error TASK_SIZE_USER64 exceeds user VSID range
+#endif
/*
* Define the address range of the vmalloc VM area.
*/
#define VMALLOC_START (0xD000000000000000ul)
-#define VMALLOC_SIZE (0x10000000000UL)
+#define VMALLOC_SIZE (0x80000000000UL)
#define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE)
/*
@@ -154,8 +172,6 @@ extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
#ifndef __ASSEMBLY__
int hash_huge_page(struct mm_struct *mm, unsigned long access,
unsigned long ea, unsigned long vsid, int local);
-
-void hugetlb_mm_free_pgd(struct mm_struct *mm);
#endif /* __ASSEMBLY__ */
#define HAVE_ARCH_UNMAPPED_AREA
@@ -163,7 +179,6 @@ void hugetlb_mm_free_pgd(struct mm_struct *mm);
#else
#define hash_huge_page(mm,a,ea,vsid,local) -1
-#define hugetlb_mm_free_pgd(mm) do {} while (0)
#endif
@@ -197,39 +212,45 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
#define pte_pfn(x) ((unsigned long)((pte_val(x) >> PTE_SHIFT)))
#define pte_page(x) pfn_to_page(pte_pfn(x))
-#define pmd_set(pmdp, ptep) \
- (pmd_val(*(pmdp)) = __ba_to_bpn(ptep))
+#define pmd_set(pmdp, ptep) ({BUG_ON((u64)ptep < KERNELBASE); pmd_val(*(pmdp)) = (unsigned long)(ptep);})
#define pmd_none(pmd) (!pmd_val(pmd))
#define pmd_bad(pmd) (pmd_val(pmd) == 0)
#define pmd_present(pmd) (pmd_val(pmd) != 0)
#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0)
-#define pmd_page_kernel(pmd) (__bpn_to_ba(pmd_val(pmd)))
+#define pmd_page_kernel(pmd) (pmd_val(pmd))
#define pmd_page(pmd) virt_to_page(pmd_page_kernel(pmd))
-#define pud_set(pudp, pmdp) (pud_val(*(pudp)) = (__ba_to_bpn(pmdp)))
+#define pud_set(pudp, pmdp) (pud_val(*(pudp)) = (unsigned long)(pmdp))
#define pud_none(pud) (!pud_val(pud))
-#define pud_bad(pud) ((pud_val(pud)) == 0UL)
-#define pud_present(pud) (pud_val(pud) != 0UL)
-#define pud_clear(pudp) (pud_val(*(pudp)) = 0UL)
-#define pud_page(pud) (__bpn_to_ba(pud_val(pud)))
+#define pud_bad(pud) ((pud_val(pud)) == 0)
+#define pud_present(pud) (pud_val(pud) != 0)
+#define pud_clear(pudp) (pud_val(*(pudp)) = 0)
+#define pud_page(pud) (pud_val(pud))
+
+#define pgd_set(pgdp, pudp) ({pgd_val(*(pgdp)) = (unsigned long)(pudp);})
+#define pgd_none(pgd) (!pgd_val(pgd))
+#define pgd_bad(pgd) (pgd_val(pgd) == 0)
+#define pgd_present(pgd) (pgd_val(pgd) != 0)
+#define pgd_clear(pgdp) (pgd_val(*(pgdp)) = 0)
+#define pgd_page(pgd) (pgd_val(pgd))
/*
* Find an entry in a page-table-directory. We combine the address region
* (the high order N bits) and the pgd portion of the address.
*/
/* to avoid overflow in free_pgtables we don't use PTRS_PER_PGD here */
-#define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & 0x7ff)
+#define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & 0x1ff)
#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
-/* Find an entry in the second-level page table.. */
+#define pud_offset(pgdp, addr) \
+ (((pud_t *) pgd_page(*(pgdp))) + (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)))
+
#define pmd_offset(pudp,addr) \
- ((pmd_t *) pud_page(*(pudp)) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)))
+ (((pmd_t *) pud_page(*(pudp))) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)))
-/* Find an entry in the third-level page table.. */
#define pte_offset_kernel(dir,addr) \
- ((pte_t *) pmd_page_kernel(*(dir)) \
- + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
+ (((pte_t *) pmd_page_kernel(*(dir))) + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
#define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr))
@@ -458,23 +479,18 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long addr,
#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0)
#define pmd_ERROR(e) \
- printk("%s:%d: bad pmd %08x.\n", __FILE__, __LINE__, pmd_val(e))
+ printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
+#define pud_ERROR(e) \
+ printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pud_val(e))
#define pgd_ERROR(e) \
- printk("%s:%d: bad pgd %08x.\n", __FILE__, __LINE__, pgd_val(e))
+ printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
extern pgd_t swapper_pg_dir[];
extern void paging_init(void);
-/*
- * Because the huge pgtables are only 2 level, they can take
- * at most around 4M, much less than one hugepage which the
- * process is presumably entitled to use. So we don't bother
- * freeing up the pagetables on unmap, and wait until
- * destroy_context() to clean up the lot.
- */
#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) \
- do { } while (0)
+ free_pgd_range(tlb, addr, end, floor, ceiling)
/*
* This gets called at the end of handling a page fault, when
diff --git a/include/asm-ppc64/pmc.h b/include/asm-ppc64/pmc.h
index c924748..d1d297d 100644
--- a/include/asm-ppc64/pmc.h
+++ b/include/asm-ppc64/pmc.h
@@ -26,4 +26,6 @@ typedef void (*perf_irq_t)(struct pt_regs *);
int reserve_pmc_hardware(perf_irq_t new_perf_irq);
void release_pmc_hardware(void);
+void power4_enable_pmcs(void);
+
#endif /* _PPC64_PMC_H */
diff --git a/include/asm-ppc64/processor.h b/include/asm-ppc64/processor.h
index 352306c..50b14c0 100644
--- a/include/asm-ppc64/processor.h
+++ b/include/asm-ppc64/processor.h
@@ -382,8 +382,8 @@ extern long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
extern struct task_struct *last_task_used_math;
extern struct task_struct *last_task_used_altivec;
-/* 64-bit user address space is 41-bits (2TBs user VM) */
-#define TASK_SIZE_USER64 (0x0000020000000000UL)
+/* 64-bit user address space is 44-bits (16TB user VM) */
+#define TASK_SIZE_USER64 (0x0000100000000000UL)
/*
* 32-bit user address space is 4GB - 1 page
diff --git a/include/asm-ppc64/prom.h b/include/asm-ppc64/prom.h
index 04b1a84..dc5330b 100644
--- a/include/asm-ppc64/prom.h
+++ b/include/asm-ppc64/prom.h
@@ -22,13 +22,15 @@
#define RELOC(x) (*PTRRELOC(&(x)))
/* Definitions used by the flattened device tree */
-#define OF_DT_HEADER 0xd00dfeed /* 4: version, 4: total size */
-#define OF_DT_BEGIN_NODE 0x1 /* Start node: full name */
+#define OF_DT_HEADER 0xd00dfeed /* marker */
+#define OF_DT_BEGIN_NODE 0x1 /* Start of node, full name */
#define OF_DT_END_NODE 0x2 /* End node */
-#define OF_DT_PROP 0x3 /* Property: name off, size, content */
+#define OF_DT_PROP 0x3 /* Property: name off, size,
+ * content */
+#define OF_DT_NOP 0x4 /* nop */
#define OF_DT_END 0x9
-#define OF_DT_VERSION 1
+#define OF_DT_VERSION 0x10
/*
* This is what gets passed to the kernel by prom_init or kexec
@@ -54,7 +56,9 @@ struct boot_param_header
u32 version; /* format version */
u32 last_comp_version; /* last compatible version */
/* version 2 fields below */
- u32 boot_cpuid_phys; /* Which physical CPU id we're booting on */
+ u32 boot_cpuid_phys; /* Physical CPU id we're booting on */
+ /* version 3 fields below */
+ u32 dt_strings_size; /* size of the DT strings block */
};
diff --git a/include/asm-ppc64/system.h b/include/asm-ppc64/system.h
index 98d120c..b9e1835 100644
--- a/include/asm-ppc64/system.h
+++ b/include/asm-ppc64/system.h
@@ -88,7 +88,7 @@ DEBUGGER_BOILERPLATE(debugger_dabr_match)
DEBUGGER_BOILERPLATE(debugger_fault_handler)
#ifdef CONFIG_XMON
-extern void xmon_init(void);
+extern void xmon_init(int enable);
#endif
#else
@@ -302,5 +302,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
#define arch_align_stack(x) (x)
+extern unsigned long reloc_offset(void);
+
#endif /* __KERNEL__ */
#endif
diff --git a/include/asm-ppc64/vio.h b/include/asm-ppc64/vio.h
index 20cd98e..a82e87c 100644
--- a/include/asm-ppc64/vio.h
+++ b/include/asm-ppc64/vio.h
@@ -56,6 +56,9 @@ const void * vio_get_attribute(struct vio_dev *vdev, void* which, int* length);
int vio_get_irq(struct vio_dev *dev);
int vio_enable_interrupts(struct vio_dev *dev);
int vio_disable_interrupts(struct vio_dev *dev);
+extern struct vio_dev * __devinit vio_register_device_common(
+ struct vio_dev *viodev, char *name, char *type,
+ uint32_t unit_address, struct iommu_table *iommu_table);
extern struct dma_mapping_ops vio_dma_ops;
@@ -95,9 +98,16 @@ struct vio_dev {
struct device dev;
};
+extern struct vio_dev vio_bus_device;
+
static inline struct vio_dev *to_vio_dev(struct device *dev)
{
return container_of(dev, struct vio_dev, dev);
}
+extern int vio_bus_init(int (*is_match)(const struct vio_device_id *id,
+ const struct vio_dev *dev),
+ void (*)(struct vio_dev *),
+ void (*)(struct device *));
+
#endif /* _ASM_VIO_H */
OpenPOWER on IntegriCloud