summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2012-05-21 15:15:24 +0100
committerRussell King <rmk+kernel@arm.linux.org.uk>2012-05-21 15:15:24 +0100
commit4175160b065e74572819a320dcd34129224a4e1c (patch)
tree3298e2c9a7c7db33bf28617875e5429e17eec61c
parentddf90a2ff2c4a9da99acc898a4afeab3e4251fcd (diff)
parent0ec8e7aa8f63f0cacd545fcd7f40f93fde2c0e6e (diff)
downloadop-kernel-dev-4175160b065e74572819a320dcd34129224a4e1c.zip
op-kernel-dev-4175160b065e74572819a320dcd34129224a4e1c.tar.gz
Merge branch 'misc' into for-linus
Conflicts: arch/arm/kernel/ptrace.c
-rw-r--r--arch/arm/Kconfig15
-rw-r--r--arch/arm/boot/compressed/head.S29
-rw-r--r--arch/arm/common/vic.c56
-rw-r--r--arch/arm/include/asm/cacheflush.h6
-rw-r--r--arch/arm/include/asm/cmpxchg.h73
-rw-r--r--arch/arm/include/asm/mach/time.h5
-rw-r--r--arch/arm/include/asm/pgtable-3level.h2
-rw-r--r--arch/arm/include/asm/ptrace.h5
-rw-r--r--arch/arm/include/asm/syscall.h93
-rw-r--r--arch/arm/kernel/head.S9
-rw-r--r--arch/arm/kernel/ptrace.c21
-rw-r--r--arch/arm/kernel/signal.c2
-rw-r--r--arch/arm/kernel/smp_scu.c3
-rw-r--r--arch/arm/kernel/thumbee.c4
-rw-r--r--arch/arm/kernel/time.c36
-rw-r--r--arch/arm/kernel/traps.c11
-rw-r--r--arch/arm/mach-tegra/timer.c5
-rw-r--r--arch/arm/mm/cache-v3.S1
-rw-r--r--arch/arm/mm/cache-v4.S1
-rw-r--r--arch/arm/mm/cache-v4wb.S6
-rw-r--r--arch/arm/mm/cache-v4wt.S1
-rw-r--r--arch/arm/mm/cache-v6.S10
-rw-r--r--arch/arm/mm/cache-v7.S10
-rw-r--r--arch/arm/mm/proc-arm1020.S1
-rw-r--r--arch/arm/mm/proc-arm1020e.S1
-rw-r--r--arch/arm/mm/proc-arm1022.S1
-rw-r--r--arch/arm/mm/proc-arm1026.S1
-rw-r--r--arch/arm/mm/proc-arm920.S1
-rw-r--r--arch/arm/mm/proc-arm922.S1
-rw-r--r--arch/arm/mm/proc-arm925.S1
-rw-r--r--arch/arm/mm/proc-arm926.S1
-rw-r--r--arch/arm/mm/proc-arm940.S6
-rw-r--r--arch/arm/mm/proc-arm946.S1
-rw-r--r--arch/arm/mm/proc-feroceon.S1
-rw-r--r--arch/arm/mm/proc-mohawk.S1
-rw-r--r--arch/arm/plat-omap/counter_32k.c6
-rw-r--r--arch/arm/vfp/vfpmodule.c16
37 files changed, 269 insertions, 174 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index ed24493..ea2b43c 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -11,6 +11,7 @@ config ARM
select HAVE_OPROFILE if (HAVE_PERF_EVENTS)
select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
select HAVE_ARCH_KGDB
+ select HAVE_ARCH_TRACEHOOK
select HAVE_KPROBES if !XIP_KERNEL
select HAVE_KRETPROBES if (HAVE_KPROBES)
select HAVE_FUNCTION_TRACER if (!XIP_KERNEL)
@@ -30,6 +31,8 @@ config ARM
select HAVE_HW_BREAKPOINT if (PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7))
select HAVE_C_RECORDMCOUNT
select HAVE_GENERIC_HARDIRQS
+ select HARDIRQS_SW_RESEND
+ select GENERIC_IRQ_PROBE
select GENERIC_IRQ_SHOW
select CPU_PM if (SUSPEND || CPU_IDLE)
select GENERIC_PCI_IOMAP
@@ -126,14 +129,6 @@ config TRACE_IRQFLAGS_SUPPORT
bool
default y
-config HARDIRQS_SW_RESEND
- bool
- default y
-
-config GENERIC_IRQ_PROBE
- bool
- default y
-
config GENERIC_LOCKBREAK
bool
default y
@@ -633,7 +628,6 @@ config ARCH_MMP
select CLKDEV_LOOKUP
select GENERIC_CLOCKEVENTS
select GPIO_PXA
- select TICK_ONESHOT
select PLAT_PXA
select SPARSE_IRQ
select GENERIC_ALLOCATOR
@@ -717,7 +711,6 @@ config ARCH_PXA
select ARCH_REQUIRE_GPIOLIB
select GENERIC_CLOCKEVENTS
select GPIO_PXA
- select TICK_ONESHOT
select PLAT_PXA
select SPARSE_IRQ
select AUTO_ZRELADDR
@@ -784,7 +777,6 @@ config ARCH_SA1100
select CPU_FREQ
select GENERIC_CLOCKEVENTS
select CLKDEV_LOOKUP
- select TICK_ONESHOT
select ARCH_REQUIRE_GPIOLIB
select HAVE_IDE
select NEED_MACH_MEMORY_H
@@ -1562,7 +1554,6 @@ config ARM_ARCH_TIMER
config HAVE_ARM_TWD
bool
depends on SMP
- select TICK_ONESHOT
help
This options enables support for the ARM timer and watchdog unit
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index dc7e8ce..5ad33a4 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -567,6 +567,12 @@ __armv3_mpu_cache_on:
mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
mov pc, lr
+#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
+#define CB_BITS 0x08
+#else
+#define CB_BITS 0x0c
+#endif
+
__setup_mmu: sub r3, r4, #16384 @ Page directory size
bic r3, r3, #0xff @ Align the pointer
bic r3, r3, #0x3f00
@@ -578,17 +584,14 @@ __setup_mmu: sub r3, r4, #16384 @ Page directory size
mov r9, r0, lsr #18
mov r9, r9, lsl #18 @ start of RAM
add r10, r9, #0x10000000 @ a reasonable RAM size
- mov r1, #0x12
- orr r1, r1, #3 << 10
+ mov r1, #0x12 @ XN|U + section mapping
+ orr r1, r1, #3 << 10 @ AP=11
add r2, r3, #16384
1: cmp r1, r9 @ if virt > start of RAM
-#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
- orrhs r1, r1, #0x08 @ set cacheable
-#else
- orrhs r1, r1, #0x0c @ set cacheable, bufferable
-#endif
- cmp r1, r10 @ if virt > end of RAM
- bichs r1, r1, #0x0c @ clear cacheable, bufferable
+ cmphs r10, r1 @ && end of RAM > virt
+ bic r1, r1, #0x1c @ clear XN|U + C + B
+ orrlo r1, r1, #0x10 @ Set XN|U for non-RAM
+ orrhs r1, r1, r6 @ set RAM section settings
str r1, [r0], #4 @ 1:1 mapping
add r1, r1, #1048576
teq r0, r2
@@ -599,7 +602,7 @@ __setup_mmu: sub r3, r4, #16384 @ Page directory size
* so there is no map overlap problem for up to 1 MB compressed kernel.
* If the execution is in RAM then we would only be duplicating the above.
*/
- mov r1, #0x1e
+ orr r1, r6, #0x04 @ ensure B is set for this
orr r1, r1, #3 << 10
mov r2, pc
mov r2, r2, lsr #20
@@ -620,6 +623,7 @@ __arm926ejs_mmu_cache_on:
__armv4_mmu_cache_on:
mov r12, lr
#ifdef CONFIG_MMU
+ mov r6, #CB_BITS | 0x12 @ U
bl __setup_mmu
mov r0, #0
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
@@ -641,6 +645,7 @@ __armv7_mmu_cache_on:
#ifdef CONFIG_MMU
mrc p15, 0, r11, c0, c1, 4 @ read ID_MMFR0
tst r11, #0xf @ VMSA
+ movne r6, #CB_BITS | 0x02 @ !XN
blne __setup_mmu
mov r0, #0
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
@@ -655,7 +660,7 @@ __armv7_mmu_cache_on:
orr r0, r0, #1 << 25 @ big-endian page tables
#endif
orrne r0, r0, #1 @ MMU enabled
- movne r1, #-1
+ movne r1, #0xfffffffd @ domain 0 = client
mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer
mcrne p15, 0, r1, c3, c0, 0 @ load domain access control
#endif
@@ -668,6 +673,7 @@ __armv7_mmu_cache_on:
__fa526_cache_on:
mov r12, lr
+ mov r6, #CB_BITS | 0x12 @ U
bl __setup_mmu
mov r0, #0
mcr p15, 0, r0, c7, c7, 0 @ Invalidate whole cache
@@ -682,6 +688,7 @@ __fa526_cache_on:
__arm6_mmu_cache_on:
mov r12, lr
+ mov r6, #CB_BITS | 0x12 @ U
bl __setup_mmu
mov r0, #0
mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
diff --git a/arch/arm/common/vic.c b/arch/arm/common/vic.c
index 7e288f9..e0d5388 100644
--- a/arch/arm/common/vic.c
+++ b/arch/arm/common/vic.c
@@ -39,6 +39,7 @@
* struct vic_device - VIC PM device
* @irq: The IRQ number for the base of the VIC.
* @base: The register base for the VIC.
+ * @valid_sources: A bitmask of valid interrupts
* @resume_sources: A bitmask of interrupts for resume.
* @resume_irqs: The IRQs enabled for resume.
* @int_select: Save for VIC_INT_SELECT.
@@ -50,6 +51,7 @@
struct vic_device {
void __iomem *base;
int irq;
+ u32 valid_sources;
u32 resume_sources;
u32 resume_irqs;
u32 int_select;
@@ -164,10 +166,32 @@ static int __init vic_pm_init(void)
late_initcall(vic_pm_init);
#endif /* CONFIG_PM */
+static struct irq_chip vic_chip;
+
+static int vic_irqdomain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ struct vic_device *v = d->host_data;
+
+ /* Skip invalid IRQs, only register handlers for the real ones */
+ if (!(v->valid_sources & (1 << hwirq)))
+ return -ENOTSUPP;
+ irq_set_chip_and_handler(irq, &vic_chip, handle_level_irq);
+ irq_set_chip_data(irq, v->base);
+ set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+ return 0;
+}
+
+static struct irq_domain_ops vic_irqdomain_ops = {
+ .map = vic_irqdomain_map,
+ .xlate = irq_domain_xlate_onetwocell,
+};
+
/**
* vic_register() - Register a VIC.
* @base: The base address of the VIC.
* @irq: The base IRQ for the VIC.
+ * @valid_sources: bitmask of valid interrupts
* @resume_sources: bitmask of interrupts allowed for resume sources.
* @node: The device tree node associated with the VIC.
*
@@ -178,7 +202,8 @@ late_initcall(vic_pm_init);
* This also configures the IRQ domain for the VIC.
*/
static void __init vic_register(void __iomem *base, unsigned int irq,
- u32 resume_sources, struct device_node *node)
+ u32 valid_sources, u32 resume_sources,
+ struct device_node *node)
{
struct vic_device *v;
@@ -189,11 +214,12 @@ static void __init vic_register(void __iomem *base, unsigned int irq,
v = &vic_devices[vic_id];
v->base = base;
+ v->valid_sources = valid_sources;
v->resume_sources = resume_sources;
v->irq = irq;
vic_id++;
- v->domain = irq_domain_add_legacy(node, 32, irq, 0,
- &irq_domain_simple_ops, v);
+ v->domain = irq_domain_add_legacy(node, fls(valid_sources), irq, 0,
+ &vic_irqdomain_ops, v);
}
static void vic_ack_irq(struct irq_data *d)
@@ -287,23 +313,6 @@ static void __init vic_clear_interrupts(void __iomem *base)
}
}
-static void __init vic_set_irq_sources(void __iomem *base,
- unsigned int irq_start, u32 vic_sources)
-{
- unsigned int i;
-
- for (i = 0; i < 32; i++) {
- if (vic_sources & (1 << i)) {
- unsigned int irq = irq_start + i;
-
- irq_set_chip_and_handler(irq, &vic_chip,
- handle_level_irq);
- irq_set_chip_data(irq, base);
- set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
- }
- }
-}
-
/*
* The PL190 cell from ARM has been modified by ST to handle 64 interrupts.
* The original cell has 32 interrupts, while the modified one has 64,
@@ -338,8 +347,7 @@ static void __init vic_init_st(void __iomem *base, unsigned int irq_start,
writel(32, base + VIC_PL190_DEF_VECT_ADDR);
}
- vic_set_irq_sources(base, irq_start, vic_sources);
- vic_register(base, irq_start, 0, node);
+ vic_register(base, irq_start, vic_sources, 0, node);
}
void __init __vic_init(void __iomem *base, unsigned int irq_start,
@@ -379,9 +387,7 @@ void __init __vic_init(void __iomem *base, unsigned int irq_start,
vic_init2(base);
- vic_set_irq_sources(base, irq_start, vic_sources);
-
- vic_register(base, irq_start, resume_sources, node);
+ vic_register(base, irq_start, vic_sources, resume_sources, node);
}
/**
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index d5d8d5c..004c1bc 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -101,7 +101,7 @@ struct cpu_cache_fns {
void (*flush_user_range)(unsigned long, unsigned long, unsigned int);
void (*coherent_kern_range)(unsigned long, unsigned long);
- void (*coherent_user_range)(unsigned long, unsigned long);
+ int (*coherent_user_range)(unsigned long, unsigned long);
void (*flush_kern_dcache_area)(void *, size_t);
void (*dma_map_area)(const void *, size_t, int);
@@ -142,7 +142,7 @@ extern void __cpuc_flush_kern_all(void);
extern void __cpuc_flush_user_all(void);
extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
-extern void __cpuc_coherent_user_range(unsigned long, unsigned long);
+extern int __cpuc_coherent_user_range(unsigned long, unsigned long);
extern void __cpuc_flush_dcache_area(void *, size_t);
/*
@@ -249,7 +249,7 @@ extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr
* Harvard caches are synchronised for the user space address range.
* This is used for the ARM private sys_cacheflush system call.
*/
-#define flush_cache_user_range(vma,start,end) \
+#define flush_cache_user_range(start,end) \
__cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end))
/*
diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
index d41d7cb..7eb18c1 100644
--- a/arch/arm/include/asm/cmpxchg.h
+++ b/arch/arm/include/asm/cmpxchg.h
@@ -229,66 +229,19 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
(unsigned long)(n), \
sizeof(*(ptr))))
-#ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */
-
-/*
- * Note : ARMv7-M (currently unsupported by Linux) does not support
- * ldrexd/strexd. If ARMv7-M is ever supported by the Linux kernel, it should
- * not be allowed to use __cmpxchg64.
- */
-static inline unsigned long long __cmpxchg64(volatile void *ptr,
- unsigned long long old,
- unsigned long long new)
-{
- register unsigned long long oldval asm("r0");
- register unsigned long long __old asm("r2") = old;
- register unsigned long long __new asm("r4") = new;
- unsigned long res;
-
- do {
- asm volatile(
- " @ __cmpxchg8\n"
- " ldrexd %1, %H1, [%2]\n"
- " mov %0, #0\n"
- " teq %1, %3\n"
- " teqeq %H1, %H3\n"
- " strexdeq %0, %4, %H4, [%2]\n"
- : "=&r" (res), "=&r" (oldval)
- : "r" (ptr), "Ir" (__old), "r" (__new)
- : "memory", "cc");
- } while (res);
-
- return oldval;
-}
-
-static inline unsigned long long __cmpxchg64_mb(volatile void *ptr,
- unsigned long long old,
- unsigned long long new)
-{
- unsigned long long ret;
-
- smp_mb();
- ret = __cmpxchg64(ptr, old, new);
- smp_mb();
-
- return ret;
-}
-
-#define cmpxchg64(ptr,o,n) \
- ((__typeof__(*(ptr)))__cmpxchg64_mb((ptr), \
- (unsigned long long)(o), \
- (unsigned long long)(n)))
-
-#define cmpxchg64_local(ptr,o,n) \
- ((__typeof__(*(ptr)))__cmpxchg64((ptr), \
- (unsigned long long)(o), \
- (unsigned long long)(n)))
-
-#else /* min ARCH = ARMv6 */
-
-#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
-
-#endif
+#define cmpxchg64(ptr, o, n) \
+ ((__typeof__(*(ptr)))atomic64_cmpxchg(container_of((ptr), \
+ atomic64_t, \
+ counter), \
+ (unsigned long)(o), \
+ (unsigned long)(n)))
+
+#define cmpxchg64_local(ptr, o, n) \
+ ((__typeof__(*(ptr)))local64_cmpxchg(container_of((ptr), \
+ local64_t, \
+ a), \
+ (unsigned long)(o), \
+ (unsigned long)(n)))
#endif /* __LINUX_ARM_ARCH__ >= 6 */
diff --git a/arch/arm/include/asm/mach/time.h b/arch/arm/include/asm/mach/time.h
index f73c908..6ca945f 100644
--- a/arch/arm/include/asm/mach/time.h
+++ b/arch/arm/include/asm/mach/time.h
@@ -42,4 +42,9 @@ struct sys_timer {
extern void timer_tick(void);
+struct timespec;
+typedef void (*clock_access_fn)(struct timespec *);
+extern int register_persistent_clock(clock_access_fn read_boot,
+ clock_access_fn read_persistent);
+
#endif
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
index 759af70..b249035 100644
--- a/arch/arm/include/asm/pgtable-3level.h
+++ b/arch/arm/include/asm/pgtable-3level.h
@@ -69,8 +69,6 @@
*/
#define L_PTE_PRESENT (_AT(pteval_t, 3) << 0) /* Valid */
#define L_PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !PRESENT */
-#define L_PTE_BUFFERABLE (_AT(pteval_t, 1) << 2) /* AttrIndx[0] */
-#define L_PTE_CACHEABLE (_AT(pteval_t, 1) << 3) /* AttrIndx[1] */
#define L_PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */
#define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */
#define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h
index 451808b..355ece5 100644
--- a/arch/arm/include/asm/ptrace.h
+++ b/arch/arm/include/asm/ptrace.h
@@ -249,6 +249,11 @@ static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
return regs->ARM_sp;
}
+static inline unsigned long user_stack_pointer(struct pt_regs *regs)
+{
+ return regs->ARM_sp;
+}
+
#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */
diff --git a/arch/arm/include/asm/syscall.h b/arch/arm/include/asm/syscall.h
new file mode 100644
index 0000000..c334a23
--- /dev/null
+++ b/arch/arm/include/asm/syscall.h
@@ -0,0 +1,93 @@
+/*
+ * Access to user system call parameters and results
+ *
+ * See asm-generic/syscall.h for descriptions of what we must do here.
+ */
+
+#ifndef _ASM_ARM_SYSCALL_H
+#define _ASM_ARM_SYSCALL_H
+
+#include <linux/err.h>
+
+extern const unsigned long sys_call_table[];
+
+static inline int syscall_get_nr(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ return task_thread_info(task)->syscall;
+}
+
+static inline void syscall_rollback(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ regs->ARM_r0 = regs->ARM_ORIG_r0;
+}
+
+static inline long syscall_get_error(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ unsigned long error = regs->ARM_r0;
+ return IS_ERR_VALUE(error) ? error : 0;
+}
+
+static inline long syscall_get_return_value(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ return regs->ARM_r0;
+}
+
+static inline void syscall_set_return_value(struct task_struct *task,
+ struct pt_regs *regs,
+ int error, long val)
+{
+ regs->ARM_r0 = (long) error ? error : val;
+}
+
+#define SYSCALL_MAX_ARGS 7
+
+static inline void syscall_get_arguments(struct task_struct *task,
+ struct pt_regs *regs,
+ unsigned int i, unsigned int n,
+ unsigned long *args)
+{
+ if (i + n > SYSCALL_MAX_ARGS) {
+ unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i;
+ unsigned int n_bad = n + i - SYSCALL_MAX_ARGS;
+ pr_warning("%s called with max args %d, handling only %d\n",
+ __func__, i + n, SYSCALL_MAX_ARGS);
+ memset(args_bad, 0, n_bad * sizeof(args[0]));
+ n = SYSCALL_MAX_ARGS - i;
+ }
+
+ if (i == 0) {
+ args[0] = regs->ARM_ORIG_r0;
+ args++;
+ i++;
+ n--;
+ }
+
+ memcpy(args, &regs->ARM_r0 + i, n * sizeof(args[0]));
+}
+
+static inline void syscall_set_arguments(struct task_struct *task,
+ struct pt_regs *regs,
+ unsigned int i, unsigned int n,
+ const unsigned long *args)
+{
+ if (i + n > SYSCALL_MAX_ARGS) {
+ pr_warning("%s called with max args %d, handling only %d\n",
+ __func__, i + n, SYSCALL_MAX_ARGS);
+ n = SYSCALL_MAX_ARGS - i;
+ }
+
+ if (i == 0) {
+ regs->ARM_ORIG_r0 = args[0];
+ args++;
+ i++;
+ n--;
+ }
+
+ memcpy(&regs->ARM_r0 + i, args, n * sizeof(args[0]));
+}
+
+#endif /* _ASM_ARM_SYSCALL_H */
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 3bf0c7f..835898e 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -277,10 +277,6 @@ __create_page_tables:
mov r3, r3, lsl #PMD_ORDER
add r0, r4, r3
- rsb r3, r3, #0x4000 @ PTRS_PER_PGD*sizeof(long)
- cmp r3, #0x0800 @ limit to 512MB
- movhi r3, #0x0800
- add r6, r0, r3
mov r3, r7, lsr #SECTION_SHIFT
ldr r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags
orr r3, r7, r3, lsl #SECTION_SHIFT
@@ -289,13 +285,10 @@ __create_page_tables:
#else
orr r3, r3, #PMD_SECT_XN
#endif
-1: str r3, [r0], #4
+ str r3, [r0], #4
#ifdef CONFIG_ARM_LPAE
str r7, [r0], #4
#endif
- add r3, r3, #1 << SECTION_SHIFT
- cmp r0, r6
- blo 1b
#else /* CONFIG_DEBUG_ICEDCC || CONFIG_DEBUG_SEMIHOSTING */
/* we don't need any serial debugging mappings */
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index 9650c14..14e3826 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -24,6 +24,7 @@
#include <linux/hw_breakpoint.h>
#include <linux/regset.h>
#include <linux/audit.h>
+#include <linux/tracehook.h>
#include <asm/pgtable.h>
#include <asm/traps.h>
@@ -918,8 +919,6 @@ asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
if (!test_thread_flag(TIF_SYSCALL_TRACE))
return scno;
- if (!(current->ptrace & PT_PTRACED))
- return scno;
current_thread_info()->syscall = scno;
@@ -930,19 +929,11 @@ asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
ip = regs->ARM_ip;
regs->ARM_ip = why;
- /* the 0x80 provides a way for the tracing parent to distinguish
- between a syscall stop and SIGTRAP delivery */
- ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
- ? 0x80 : 0));
- /*
- * this isn't the same as continuing with a signal, but it will do
- * for normal use. strace only continues with a signal if the
- * stopping signal is not SIGTRAP. -brl
- */
- if (current->exit_code) {
- send_sig(current->exit_code, current, 1);
- current->exit_code = 0;
- }
+ if (why)
+ tracehook_report_syscall_exit(regs, 0);
+ else if (tracehook_report_syscall_entry(regs))
+ current_thread_info()->syscall = -1;
+
regs->ARM_ip = ip;
return current_thread_info()->syscall;
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index d68d1b6..73d9a42 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -589,6 +589,8 @@ handle_signal(unsigned long sig, struct k_sigaction *ka,
*/
block_sigmask(ka, sig);
+ tracehook_signal_handler(sig, info, ka, regs, 0);
+
return 0;
}
diff --git a/arch/arm/kernel/smp_scu.c b/arch/arm/kernel/smp_scu.c
index 8f5dd79..b9f015e 100644
--- a/arch/arm/kernel/smp_scu.c
+++ b/arch/arm/kernel/smp_scu.c
@@ -11,6 +11,7 @@
#include <linux/init.h>
#include <linux/io.h>
+#include <asm/smp_plat.h>
#include <asm/smp_scu.h>
#include <asm/cacheflush.h>
#include <asm/cputype.h>
@@ -74,7 +75,7 @@ void scu_enable(void __iomem *scu_base)
int scu_power_mode(void __iomem *scu_base, unsigned int mode)
{
unsigned int val;
- int cpu = smp_processor_id();
+ int cpu = cpu_logical_map(smp_processor_id());
if (mode > 3 || mode == 1 || cpu > 3)
return -EINVAL;
diff --git a/arch/arm/kernel/thumbee.c b/arch/arm/kernel/thumbee.c
index aab8997..7b8403b 100644
--- a/arch/arm/kernel/thumbee.c
+++ b/arch/arm/kernel/thumbee.c
@@ -20,6 +20,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
+#include <asm/cputype.h>
#include <asm/system_info.h>
#include <asm/thread_notify.h>
@@ -67,8 +68,7 @@ static int __init thumbee_init(void)
if (cpu_arch < CPU_ARCH_ARMv7)
return 0;
- /* processor feature register 0 */
- asm("mrc p15, 0, %0, c0, c1, 0\n" : "=r" (pfr0));
+ pfr0 = read_cpuid_ext(CPUID_EXT_PFR0);
if ((pfr0 & 0x0000f000) != 0x00001000)
return 0;
diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c
index fe31b22..af2afb0 100644
--- a/arch/arm/kernel/time.c
+++ b/arch/arm/kernel/time.c
@@ -110,6 +110,42 @@ void timer_tick(void)
}
#endif
+static void dummy_clock_access(struct timespec *ts)
+{
+ ts->tv_sec = 0;
+ ts->tv_nsec = 0;
+}
+
+static clock_access_fn __read_persistent_clock = dummy_clock_access;
+static clock_access_fn __read_boot_clock = dummy_clock_access;;
+
+void read_persistent_clock(struct timespec *ts)
+{
+ __read_persistent_clock(ts);
+}
+
+void read_boot_clock(struct timespec *ts)
+{
+ __read_boot_clock(ts);
+}
+
+int __init register_persistent_clock(clock_access_fn read_boot,
+ clock_access_fn read_persistent)
+{
+ /* Only allow the clockaccess functions to be registered once */
+ if (__read_persistent_clock == dummy_clock_access &&
+ __read_boot_clock == dummy_clock_access) {
+ if (read_boot)
+ __read_boot_clock = read_boot;
+ if (read_persistent)
+ __read_persistent_clock = read_persistent;
+
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
#if defined(CONFIG_PM) && !defined(CONFIG_GENERIC_CLOCKEVENTS)
static int timer_suspend(void)
{
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 7784547..3647170 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -479,14 +479,14 @@ static int bad_syscall(int n, struct pt_regs *regs)
return regs->ARM_r0;
}
-static inline void
+static inline int
do_cache_op(unsigned long start, unsigned long end, int flags)
{
struct mm_struct *mm = current->active_mm;
struct vm_area_struct *vma;
if (end < start || flags)
- return;
+ return -EINVAL;
down_read(&mm->mmap_sem);
vma = find_vma(mm, start);
@@ -496,9 +496,11 @@ do_cache_op(unsigned long start, unsigned long end, int flags)
if (end > vma->vm_end)
end = vma->vm_end;
- flush_cache_user_range(vma, start, end);
+ up_read(&mm->mmap_sem);
+ return flush_cache_user_range(start, end);
}
up_read(&mm->mmap_sem);
+ return -EINVAL;
}
/*
@@ -544,8 +546,7 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
* the specified region).
*/
case NR(cacheflush):
- do_cache_op(regs->ARM_r0, regs->ARM_r1, regs->ARM_r2);
- return 0;
+ return do_cache_op(regs->ARM_r0, regs->ARM_r1, regs->ARM_r2);
case NR(usr26):
if (!(elf_hwcap & HWCAP_26BIT))
diff --git a/arch/arm/mach-tegra/timer.c b/arch/arm/mach-tegra/timer.c
index 1eed8d4..315672c 100644
--- a/arch/arm/mach-tegra/timer.c
+++ b/arch/arm/mach-tegra/timer.c
@@ -124,7 +124,7 @@ static u64 tegra_rtc_read_ms(void)
}
/*
- * read_persistent_clock - Return time from a persistent clock.
+ * tegra_read_persistent_clock - Return time from a persistent clock.
*
* Reads the time from a source which isn't disabled during PM, the
* 32k sync timer. Convert the cycles elapsed since last read into
@@ -133,7 +133,7 @@ static u64 tegra_rtc_read_ms(void)
* tegra_rtc driver could be executing to avoid race conditions
* on the RTC shadow register
*/
-void read_persistent_clock(struct timespec *ts)
+static void tegra_read_persistent_clock(struct timespec *ts)
{
u64 delta;
struct timespec *tsp = &persistent_ts;
@@ -243,6 +243,7 @@ static void __init tegra_init_timer(void)
tegra_clockevent.irq = tegra_timer_irq.irq;
clockevents_register_device(&tegra_clockevent);
tegra_twd_init();
+ register_persistent_clock(NULL, tegra_read_persistent_clock);
}
struct sys_timer tegra_timer = {
diff --git a/arch/arm/mm/cache-v3.S b/arch/arm/mm/cache-v3.S
index c2301f2..52e35f3 100644
--- a/arch/arm/mm/cache-v3.S
+++ b/arch/arm/mm/cache-v3.S
@@ -78,6 +78,7 @@ ENTRY(v3_coherent_kern_range)
* - end - virtual end address
*/
ENTRY(v3_coherent_user_range)
+ mov r0, #0
mov pc, lr
/*
diff --git a/arch/arm/mm/cache-v4.S b/arch/arm/mm/cache-v4.S
index fd9bb7a..022135d 100644
--- a/arch/arm/mm/cache-v4.S
+++ b/arch/arm/mm/cache-v4.S
@@ -88,6 +88,7 @@ ENTRY(v4_coherent_kern_range)
* - end - virtual end address
*/
ENTRY(v4_coherent_user_range)
+ mov r0, #0
mov pc, lr
/*
diff --git a/arch/arm/mm/cache-v4wb.S b/arch/arm/mm/cache-v4wb.S
index 4f2c141..8f1eeae 100644
--- a/arch/arm/mm/cache-v4wb.S
+++ b/arch/arm/mm/cache-v4wb.S
@@ -167,9 +167,9 @@ ENTRY(v4wb_coherent_user_range)
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
- mov ip, #0
- mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
- mcr p15, 0, ip, c7, c10, 4 @ drain WB
+ mov r0, #0
+ mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
+ mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
diff --git a/arch/arm/mm/cache-v4wt.S b/arch/arm/mm/cache-v4wt.S
index 4d7b467..b34a5f9 100644
--- a/arch/arm/mm/cache-v4wt.S
+++ b/arch/arm/mm/cache-v4wt.S
@@ -125,6 +125,7 @@ ENTRY(v4wt_coherent_user_range)
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
+ mov r0, #0
mov pc, lr
/*
diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S
index 74c2e5a..4b10760 100644
--- a/arch/arm/mm/cache-v6.S
+++ b/arch/arm/mm/cache-v6.S
@@ -12,6 +12,7 @@
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/assembler.h>
+#include <asm/errno.h>
#include <asm/unwind.h>
#include "proc-macros.S"
@@ -135,7 +136,6 @@ ENTRY(v6_coherent_user_range)
1:
USER( mcr p15, 0, r0, c7, c10, 1 ) @ clean D line
add r0, r0, #CACHE_LINE_SIZE
-2:
cmp r0, r1
blo 1b
#endif
@@ -154,13 +154,11 @@ ENTRY(v6_coherent_user_range)
/*
* Fault handling for the cache operation above. If the virtual address in r0
- * isn't mapped, just try the next page.
+ * isn't mapped, fail with -EFAULT.
*/
9001:
- mov r0, r0, lsr #12
- mov r0, r0, lsl #12
- add r0, r0, #4096
- b 2b
+ mov r0, #-EFAULT
+ mov pc, lr
UNWIND(.fnend )
ENDPROC(v6_coherent_user_range)
ENDPROC(v6_coherent_kern_range)
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index a655d3d..39e3fb3 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -13,6 +13,7 @@
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/assembler.h>
+#include <asm/errno.h>
#include <asm/unwind.h>
#include "proc-macros.S"
@@ -198,7 +199,6 @@ ENTRY(v7_coherent_user_range)
add r12, r12, r2
cmp r12, r1
blo 2b
-3:
mov r0, #0
ALT_SMP(mcr p15, 0, r0, c7, c1, 6) @ invalidate BTB Inner Shareable
ALT_UP(mcr p15, 0, r0, c7, c5, 6) @ invalidate BTB
@@ -208,13 +208,11 @@ ENTRY(v7_coherent_user_range)
/*
* Fault handling for the cache operation above. If the virtual address in r0
- * isn't mapped, just try the next page.
+ * isn't mapped, fail with -EFAULT.
*/
9001:
- mov r12, r12, lsr #12
- mov r12, r12, lsl #12
- add r12, r12, #4096
- b 3b
+ mov r0, #-EFAULT
+ mov pc, lr
UNWIND(.fnend )
ENDPROC(v7_coherent_kern_range)
ENDPROC(v7_coherent_user_range)
diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S
index 2349513..0650bb8 100644
--- a/arch/arm/mm/proc-arm1020.S
+++ b/arch/arm/mm/proc-arm1020.S
@@ -241,6 +241,7 @@ ENTRY(arm1020_coherent_user_range)
cmp r0, r1
blo 1b
mcr p15, 0, ip, c7, c10, 4 @ drain WB
+ mov r0, #0
mov pc, lr
/*
diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S
index c244b06..4188478 100644
--- a/arch/arm/mm/proc-arm1020e.S
+++ b/arch/arm/mm/proc-arm1020e.S
@@ -235,6 +235,7 @@ ENTRY(arm1020e_coherent_user_range)
cmp r0, r1
blo 1b
mcr p15, 0, ip, c7, c10, 4 @ drain WB
+ mov r0, #0
mov pc, lr
/*
diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S
index 38fe22e..33c6882 100644
--- a/arch/arm/mm/proc-arm1022.S
+++ b/arch/arm/mm/proc-arm1022.S
@@ -224,6 +224,7 @@ ENTRY(arm1022_coherent_user_range)
cmp r0, r1
blo 1b
mcr p15, 0, ip, c7, c10, 4 @ drain WB
+ mov r0, #0
mov pc, lr
/*
diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S
index 3eb9c3c..fbc1d5f 100644
--- a/arch/arm/mm/proc-arm1026.S
+++ b/arch/arm/mm/proc-arm1026.S
@@ -218,6 +218,7 @@ ENTRY(arm1026_coherent_user_range)
cmp r0, r1
blo 1b
mcr p15, 0, ip, c7, c10, 4 @ drain WB
+ mov r0, #0
mov pc, lr
/*
diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S
index cb941ae..1a8c138 100644
--- a/arch/arm/mm/proc-arm920.S
+++ b/arch/arm/mm/proc-arm920.S
@@ -210,6 +210,7 @@ ENTRY(arm920_coherent_user_range)
cmp r0, r1
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB
+ mov r0, #0
mov pc, lr
/*
diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S
index 4ec0e07..4c44d7e 100644
--- a/arch/arm/mm/proc-arm922.S
+++ b/arch/arm/mm/proc-arm922.S
@@ -212,6 +212,7 @@ ENTRY(arm922_coherent_user_range)
cmp r0, r1
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB
+ mov r0, #0
mov pc, lr
/*
diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S
index 9dccd9a..ec5b118 100644
--- a/arch/arm/mm/proc-arm925.S
+++ b/arch/arm/mm/proc-arm925.S
@@ -258,6 +258,7 @@ ENTRY(arm925_coherent_user_range)
cmp r0, r1
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB
+ mov r0, #0
mov pc, lr
/*
diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S
index 820259b..c31e62c 100644
--- a/arch/arm/mm/proc-arm926.S
+++ b/arch/arm/mm/proc-arm926.S
@@ -221,6 +221,7 @@ ENTRY(arm926_coherent_user_range)
cmp r0, r1
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB
+ mov r0, #0
mov pc, lr
/*
diff --git a/arch/arm/mm/proc-arm940.S b/arch/arm/mm/proc-arm940.S
index 9fdc0a17..a613a7d 100644
--- a/arch/arm/mm/proc-arm940.S
+++ b/arch/arm/mm/proc-arm940.S
@@ -160,7 +160,7 @@ ENTRY(arm940_coherent_user_range)
* - size - region size
*/
ENTRY(arm940_flush_kern_dcache_area)
- mov ip, #0
+ mov r0, #0
mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
2: mcr p15, 0, r3, c7, c14, 2 @ clean/flush D index
@@ -168,8 +168,8 @@ ENTRY(arm940_flush_kern_dcache_area)
bcs 2b @ entries 63 to 0
subs r1, r1, #1 << 4
bcs 1b @ segments 7 to 0
- mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
- mcr p15, 0, ip, c7, c10, 4 @ drain WB
+ mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
+ mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
/*
diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S
index f684cfe..9f4f299 100644
--- a/arch/arm/mm/proc-arm946.S
+++ b/arch/arm/mm/proc-arm946.S
@@ -190,6 +190,7 @@ ENTRY(arm946_coherent_user_range)
cmp r0, r1
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB
+ mov r0, #0
mov pc, lr
/*
diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S
index ba3c500..23a8e4c 100644
--- a/arch/arm/mm/proc-feroceon.S
+++ b/arch/arm/mm/proc-feroceon.S
@@ -232,6 +232,7 @@ ENTRY(feroceon_coherent_user_range)
cmp r0, r1
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB
+ mov r0, #0
mov pc, lr
/*
diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S
index cdfedc5..b047546 100644
--- a/arch/arm/mm/proc-mohawk.S
+++ b/arch/arm/mm/proc-mohawk.S
@@ -193,6 +193,7 @@ ENTRY(mohawk_coherent_user_range)
cmp r0, r1
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB
+ mov r0, #0
mov pc, lr
/*
diff --git a/arch/arm/plat-omap/counter_32k.c b/arch/arm/plat-omap/counter_32k.c
index 5068fe5..44ae077 100644
--- a/arch/arm/plat-omap/counter_32k.c
+++ b/arch/arm/plat-omap/counter_32k.c
@@ -19,6 +19,7 @@
#include <linux/io.h>
#include <linux/clocksource.h>
+#include <asm/mach/time.h>
#include <asm/sched_clock.h>
#include <plat/hardware.h>
@@ -43,7 +44,7 @@ static u32 notrace omap_32k_read_sched_clock(void)
}
/**
- * read_persistent_clock - Return time from a persistent clock.
+ * omap_read_persistent_clock - Return time from a persistent clock.
*
* Reads the time from a source which isn't disabled during PM, the
* 32k sync timer. Convert the cycles elapsed since last read into
@@ -52,7 +53,7 @@ static u32 notrace omap_32k_read_sched_clock(void)
static struct timespec persistent_ts;
static cycles_t cycles, last_cycles;
static unsigned int persistent_mult, persistent_shift;
-void read_persistent_clock(struct timespec *ts)
+static void omap_read_persistent_clock(struct timespec *ts)
{
unsigned long long nsecs;
cycles_t delta;
@@ -116,6 +117,7 @@ int __init omap_init_clocksource_32k(void)
printk(err, "32k_counter");
setup_sched_clock(omap_32k_read_sched_clock, 32, 32768);
+ register_persistent_clock(NULL, omap_read_persistent_clock);
}
return 0;
}
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index b0197b2..58696192 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -241,11 +241,11 @@ static void vfp_panic(char *reason, u32 inst)
{
int i;
- printk(KERN_ERR "VFP: Error: %s\n", reason);
- printk(KERN_ERR "VFP: EXC 0x%08x SCR 0x%08x INST 0x%08x\n",
+ pr_err("VFP: Error: %s\n", reason);
+ pr_err("VFP: EXC 0x%08x SCR 0x%08x INST 0x%08x\n",
fmrx(FPEXC), fmrx(FPSCR), inst);
for (i = 0; i < 32; i += 2)
- printk(KERN_ERR "VFP: s%2u: 0x%08x s%2u: 0x%08x\n",
+ pr_err("VFP: s%2u: 0x%08x s%2u: 0x%08x\n",
i, vfp_get_float(i), i+1, vfp_get_float(i+1));
}
@@ -452,7 +452,7 @@ static int vfp_pm_suspend(void)
/* if vfp is on, then save state for resumption */
if (fpexc & FPEXC_EN) {
- printk(KERN_DEBUG "%s: saving vfp state\n", __func__);
+ pr_debug("%s: saving vfp state\n", __func__);
vfp_save_state(&ti->vfpstate, fpexc);
/* disable, just in case */
@@ -664,16 +664,16 @@ static int __init vfp_init(void)
barrier();
vfp_vector = vfp_null_entry;
- printk(KERN_INFO "VFP support v0.3: ");
+ pr_info("VFP support v0.3: ");
if (VFP_arch)
- printk("not present\n");
+ pr_cont("not present\n");
else if (vfpsid & FPSID_NODOUBLE) {
- printk("no double precision support\n");
+ pr_cont("no double precision support\n");
} else {
hotcpu_notifier(vfp_hotplug, 0);
VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT; /* Extract the architecture version */
- printk("implementor %02x architecture %d part %02x variant %x rev %x\n",
+ pr_cont("implementor %02x architecture %d part %02x variant %x rev %x\n",
(vfpsid & FPSID_IMPLEMENTER_MASK) >> FPSID_IMPLEMENTER_BIT,
(vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT,
(vfpsid & FPSID_PART_MASK) >> FPSID_PART_BIT,
OpenPOWER on IntegriCloud