diff options
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r-- | arch/powerpc/kernel/Makefile | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/asm-offsets.c | 1 | ||||
-rw-r--r-- | arch/powerpc/kernel/entry_32.S | 35 | ||||
-rw-r--r-- | arch/powerpc/kernel/head_64.S | 49 | ||||
-rw-r--r-- | arch/powerpc/kernel/idle.c | 4 | ||||
-rw-r--r-- | arch/powerpc/kernel/idle_6xx.S | 63 | ||||
-rw-r--r-- | arch/powerpc/kernel/idle_power4.S | 10 | ||||
-rw-r--r-- | arch/powerpc/kernel/iommu.c | 36 | ||||
-rw-r--r-- | arch/powerpc/kernel/irq.c | 36 | ||||
-rw-r--r-- | arch/powerpc/kernel/kprobes.c | 14 | ||||
-rw-r--r-- | arch/powerpc/kernel/lparcfg.c | 31 | ||||
-rw-r--r-- | arch/powerpc/kernel/pci_iommu.c | 40 | ||||
-rw-r--r-- | arch/powerpc/kernel/ppc_ksyms.c | 1 | ||||
-rw-r--r-- | arch/powerpc/kernel/prom.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/prom_init.c | 5 | ||||
-rw-r--r-- | arch/powerpc/kernel/rtas-proc.c | 4 | ||||
-rw-r--r-- | arch/powerpc/kernel/rtas.c | 12 | ||||
-rw-r--r-- | arch/powerpc/kernel/setup_32.c | 6 | ||||
-rw-r--r-- | arch/powerpc/kernel/setup_64.c | 10 | ||||
-rw-r--r-- | arch/powerpc/kernel/sysfs.c | 4 | ||||
-rw-r--r-- | arch/powerpc/kernel/systbl.S | 8 | ||||
-rw-r--r-- | arch/powerpc/kernel/traps.c | 9 | ||||
-rw-r--r-- | arch/powerpc/kernel/vio.c | 6 |
23 files changed, 236 insertions, 152 deletions
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 0cc0995..803858e 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -20,7 +20,7 @@ obj-$(CONFIG_PPC64) += setup_64.o binfmt_elf32.o sys_ppc32.o \ firmware.o sysfs.o obj-$(CONFIG_PPC64) += vdso64/ obj-$(CONFIG_ALTIVEC) += vecemu.o vector.o -obj-$(CONFIG_POWER4) += idle_power4.o +obj-$(CONFIG_PPC_970_NAP) += idle_power4.o obj-$(CONFIG_PPC_OF) += of_device.o prom_parse.o procfs-$(CONFIG_PPC64) := proc_ppc64.o obj-$(CONFIG_PROC_FS) += $(procfs-y) diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 54b48f3..8f85c5e 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -91,6 +91,7 @@ int main(void) #endif /* CONFIG_PPC64 */ DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); + DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags)); DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); DEFINE(TI_TASK, offsetof(struct thread_info, task)); #ifdef CONFIG_PPC32 diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index b3a9794..8866fd2 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -128,37 +128,36 @@ transfer_to_handler: stw r12,4(r11) #endif b 3f + 2: /* if from kernel, check interrupted DOZE/NAP mode and * check for stack overflow */ + lwz r9,THREAD_INFO-THREAD(r12) + cmplw r1,r9 /* if r1 <= current->thread_info */ + ble- stack_ovf /* then the kernel stack overflowed */ +5: #ifdef CONFIG_6xx - mfspr r11,SPRN_HID0 - mtcr r11 -BEGIN_FTR_SECTION - bt- 8,4f /* Check DOZE */ -END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE) -BEGIN_FTR_SECTION - bt- 9,4f /* Check NAP */ -END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP) + tophys(r9,r9) /* check local flags */ + lwz r12,TI_LOCAL_FLAGS(r9) + mtcrf 0x01,r12 + bt- 31-TLF_NAPPING,4f #endif /* CONFIG_6xx */ .globl transfer_to_handler_cont transfer_to_handler_cont: - lwz r11,THREAD_INFO-THREAD(r12) - cmplw r1,r11 /* if r1 <= current->thread_info */ - ble- stack_ovf /* then the kernel stack overflowed */ 3: mflr r9 lwz r11,0(r9) /* virtual address of handler */ lwz r9,4(r9) /* where to go when done */ - FIX_SRR1(r10,r12) mtspr SPRN_SRR0,r11 mtspr SPRN_SRR1,r10 mtlr r9 SYNC RFI /* jump to handler, enable MMU */ -#ifdef CONFIG_6xx -4: b power_save_6xx_restore +#ifdef CONFIG_6xx +4: rlwinm r12,r12,0,~_TLF_NAPPING + stw r12,TI_LOCAL_FLAGS(r9) + b power_save_6xx_restore #endif /* @@ -167,10 +166,10 @@ transfer_to_handler_cont: */ stack_ovf: /* sometimes we use a statically-allocated stack, which is OK. */ - lis r11,_end@h - ori r11,r11,_end@l - cmplw r1,r11 - ble 3b /* r1 <= &_end is OK */ + lis r12,_end@h + ori r12,r12,_end@l + cmplw r1,r12 + ble 5b /* r1 <= &_end is OK */ SAVE_NVGPRS(r11) addi r3,r1,STACK_FRAME_OVERHEAD lis r1,init_thread_union@ha diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index a5ae04a..b7d1404 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -376,11 +376,28 @@ label##_common: \ bl hdlr; \ b .ret_from_except +/* + * Like STD_EXCEPTION_COMMON, but for exceptions that can occur + * in the idle task and therefore need the special idle handling. + */ +#define STD_EXCEPTION_COMMON_IDLE(trap, label, hdlr) \ + .align 7; \ + .globl label##_common; \ +label##_common: \ + EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \ + FINISH_NAP; \ + DISABLE_INTS; \ + bl .save_nvgprs; \ + addi r3,r1,STACK_FRAME_OVERHEAD; \ + bl hdlr; \ + b .ret_from_except + #define STD_EXCEPTION_COMMON_LITE(trap, label, hdlr) \ .align 7; \ .globl label##_common; \ label##_common: \ EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \ + FINISH_NAP; \ DISABLE_INTS; \ bl .ppc64_runlatch_on; \ addi r3,r1,STACK_FRAME_OVERHEAD; \ @@ -388,6 +405,25 @@ label##_common: \ b .ret_from_except_lite /* + * When the idle code in power4_idle puts the CPU into NAP mode, + * it has to do so in a loop, and relies on the external interrupt + * and decrementer interrupt entry code to get it out of the loop. + * It sets the _TLF_NAPPING bit in current_thread_info()->local_flags + * to signal that it is in the loop and needs help to get out. + */ +#ifdef CONFIG_PPC_970_NAP +#define FINISH_NAP \ +BEGIN_FTR_SECTION \ + clrrdi r11,r1,THREAD_SHIFT; \ + ld r9,TI_LOCAL_FLAGS(r11); \ + andi. r10,r9,_TLF_NAPPING; \ + bnel power4_fixup_nap; \ +END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP) +#else +#define FINISH_NAP +#endif + +/* * Start of pSeries system interrupt routines */ . = 0x100 @@ -772,6 +808,7 @@ hardware_interrupt_iSeries_masked: .globl machine_check_common machine_check_common: EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC) + FINISH_NAP DISABLE_INTS bl .save_nvgprs addi r3,r1,STACK_FRAME_OVERHEAD @@ -783,7 +820,7 @@ machine_check_common: STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception) STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception) STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception) - STD_EXCEPTION_COMMON(0xf00, performance_monitor, .performance_monitor_exception) + STD_EXCEPTION_COMMON_IDLE(0xf00, performance_monitor, .performance_monitor_exception) STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception) #ifdef CONFIG_ALTIVEC STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception) @@ -1034,6 +1071,7 @@ unrecov_slb: .globl hardware_interrupt_entry hardware_interrupt_common: EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN) + FINISH_NAP hardware_interrupt_entry: DISABLE_INTS bl .ppc64_runlatch_on @@ -1041,6 +1079,15 @@ hardware_interrupt_entry: bl .do_IRQ b .ret_from_except_lite +#ifdef CONFIG_PPC_970_NAP +power4_fixup_nap: + andc r9,r9,r10 + std r9,TI_LOCAL_FLAGS(r11) + ld r10,_LINK(r1) /* make idle task do the */ + std r10,_NIP(r1) /* equivalent of a blr */ + blr +#endif + .align 7 .globl alignment_common alignment_common: diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c index e9f321d..d491052 100644 --- a/arch/powerpc/kernel/idle.c +++ b/arch/powerpc/kernel/idle.c @@ -50,9 +50,9 @@ void cpu_idle(void) set_thread_flag(TIF_POLLING_NRFLAG); while (1) { - ppc64_runlatch_off(); - while (!need_resched() && !cpu_should_die()) { + ppc64_runlatch_off(); + if (ppc_md.power_save) { clear_thread_flag(TIF_POLLING_NRFLAG); /* diff --git a/arch/powerpc/kernel/idle_6xx.S b/arch/powerpc/kernel/idle_6xx.S index 12a4efb..b45fa0e 100644 --- a/arch/powerpc/kernel/idle_6xx.S +++ b/arch/powerpc/kernel/idle_6xx.S @@ -22,8 +22,6 @@ #include <asm/ppc_asm.h> #include <asm/asm-offsets.h> -#undef DEBUG - .text /* @@ -109,12 +107,6 @@ BEGIN_FTR_SECTION dcbf 0,r4 dcbf 0,r4 END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR) -#ifdef DEBUG - lis r6,nap_enter_count@ha - lwz r4,nap_enter_count@l(r6) - addi r4,r4,1 - stw r4,nap_enter_count@l(r6) -#endif 2: BEGIN_FTR_SECTION /* Go to low speed mode on some 750FX */ @@ -144,48 +136,42 @@ BEGIN_FTR_SECTION DSSALL sync END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) + rlwinm r9,r1,0,0,31-THREAD_SHIFT /* current thread_info */ + lwz r8,TI_LOCAL_FLAGS(r9) /* set napping bit */ + ori r8,r8,_TLF_NAPPING /* so when we take an exception */ + stw r8,TI_LOCAL_FLAGS(r9) /* it will return to our caller */ mfmsr r7 ori r7,r7,MSR_EE oris r7,r7,MSR_POW@h - sync - isync +1: sync mtmsr r7 isync - sync - blr - + b 1b + /* * Return from NAP/DOZE mode, restore some CPU specific registers, * we are called with DR/IR still off and r2 containing physical - * address of current. + * address of current. R11 points to the exception frame (physical + * address). We have to preserve r10. */ _GLOBAL(power_save_6xx_restore) - mfspr r11,SPRN_HID0 - rlwinm. r11,r11,0,10,8 /* Clear NAP & copy NAP bit !state to cr1 EQ */ - cror 4*cr1+eq,4*cr0+eq,4*cr0+eq -BEGIN_FTR_SECTION - rlwinm r11,r11,0,9,7 /* Clear DOZE */ -END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE) - mtspr SPRN_HID0, r11 + lwz r9,_LINK(r11) /* interrupted in ppc6xx_idle: */ + stw r9,_NIP(r11) /* make it do a blr */ -#ifdef DEBUG - beq cr1,1f - lis r11,(nap_return_count-KERNELBASE)@ha - lwz r9,nap_return_count@l(r11) - addi r9,r9,1 - stw r9,nap_return_count@l(r11) -1: -#endif - - rlwinm r9,r1,0,0,18 - tophys(r9,r9) - lwz r11,TI_CPU(r9) +#ifdef CONFIG_SMP + mfspr r12,SPRN_SPRG3 + lwz r11,TI_CPU(r12) /* get cpu number * 4 */ slwi r11,r11,2 +#else + li r11,0 +#endif /* Todo make sure all these are in the same page - * and load r22 (@ha part + CPU offset) only once + * and load r11 (@ha part + CPU offset) only once */ BEGIN_FTR_SECTION - beq cr1,1f + mfspr r9,SPRN_HID0 + andis. r9,r9,HID0_NAP@h + beq 1f addis r9,r11,(nap_save_msscr0-KERNELBASE)@ha lwz r9,nap_save_msscr0@l(r9) mtspr SPRN_MSSCR0, r9 @@ -210,10 +196,3 @@ _GLOBAL(nap_save_hid1) _GLOBAL(powersave_lowspeed) .long 0 - -#ifdef DEBUG -_GLOBAL(nap_enter_count) - .space 4 -_GLOBAL(nap_return_count) - .space 4 -#endif diff --git a/arch/powerpc/kernel/idle_power4.S b/arch/powerpc/kernel/idle_power4.S index 6dad1c0..d85c7c9 100644 --- a/arch/powerpc/kernel/idle_power4.S +++ b/arch/powerpc/kernel/idle_power4.S @@ -35,12 +35,16 @@ BEGIN_FTR_SECTION DSSALL sync END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) + clrrdi r9,r1,THREAD_SHIFT /* current thread_info */ + ld r8,TI_LOCAL_FLAGS(r9) /* set napping bit */ + ori r8,r8,_TLF_NAPPING /* so when we take an exception */ + std r8,TI_LOCAL_FLAGS(r9) /* it will return to our caller */ mfmsr r7 ori r7,r7,MSR_EE oris r7,r7,MSR_POW@h - sync +1: sync isync mtmsrd r7 isync - sync - blr + b 1b + diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index d9a7fde..4eba60a 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c @@ -61,6 +61,7 @@ __setup("iommu=", setup_iommu); static unsigned long iommu_range_alloc(struct iommu_table *tbl, unsigned long npages, unsigned long *handle, + unsigned long mask, unsigned int align_order) { unsigned long n, end, i, start; @@ -97,9 +98,21 @@ static unsigned long iommu_range_alloc(struct iommu_table *tbl, */ if (start >= limit) start = largealloc ? tbl->it_largehint : tbl->it_hint; - + again: + if (limit + tbl->it_offset > mask) { + limit = mask - tbl->it_offset + 1; + /* If we're constrained on address range, first try + * at the masked hint to avoid O(n) search complexity, + * but on second pass, start at 0. + */ + if ((start & mask) >= limit || pass > 0) + start = 0; + else + start &= mask; + } + n = find_next_zero_bit(tbl->it_map, limit, start); /* Align allocation */ @@ -150,14 +163,14 @@ static unsigned long iommu_range_alloc(struct iommu_table *tbl, static dma_addr_t iommu_alloc(struct iommu_table *tbl, void *page, unsigned int npages, enum dma_data_direction direction, - unsigned int align_order) + unsigned long mask, unsigned int align_order) { unsigned long entry, flags; dma_addr_t ret = DMA_ERROR_CODE; - + spin_lock_irqsave(&(tbl->it_lock), flags); - entry = iommu_range_alloc(tbl, npages, NULL, align_order); + entry = iommu_range_alloc(tbl, npages, NULL, mask, align_order); if (unlikely(entry == DMA_ERROR_CODE)) { spin_unlock_irqrestore(&(tbl->it_lock), flags); @@ -236,7 +249,7 @@ static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, int iommu_map_sg(struct device *dev, struct iommu_table *tbl, struct scatterlist *sglist, int nelems, - enum dma_data_direction direction) + unsigned long mask, enum dma_data_direction direction) { dma_addr_t dma_next = 0, dma_addr; unsigned long flags; @@ -274,7 +287,7 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl, vaddr = (unsigned long)page_address(s->page) + s->offset; npages = PAGE_ALIGN(vaddr + slen) - (vaddr & PAGE_MASK); npages >>= PAGE_SHIFT; - entry = iommu_range_alloc(tbl, npages, &handle, 0); + entry = iommu_range_alloc(tbl, npages, &handle, mask >> PAGE_SHIFT, 0); DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen); @@ -479,7 +492,8 @@ void iommu_free_table(struct device_node *dn) * byte within the page as vaddr. */ dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr, - size_t size, enum dma_data_direction direction) + size_t size, unsigned long mask, + enum dma_data_direction direction) { dma_addr_t dma_handle = DMA_ERROR_CODE; unsigned long uaddr; @@ -492,7 +506,8 @@ dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr, npages >>= PAGE_SHIFT; if (tbl) { - dma_handle = iommu_alloc(tbl, vaddr, npages, direction, 0); + dma_handle = iommu_alloc(tbl, vaddr, npages, direction, + mask >> PAGE_SHIFT, 0); if (dma_handle == DMA_ERROR_CODE) { if (printk_ratelimit()) { printk(KERN_INFO "iommu_alloc failed, " @@ -521,7 +536,7 @@ void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle, * to the dma address (mapping) of the first page. */ void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size, - dma_addr_t *dma_handle, gfp_t flag) + dma_addr_t *dma_handle, unsigned long mask, gfp_t flag) { void *ret = NULL; dma_addr_t mapping; @@ -551,7 +566,8 @@ void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size, memset(ret, 0, size); /* Set up tces to cover the allocated range */ - mapping = iommu_alloc(tbl, ret, npages, DMA_BIDIRECTIONAL, order); + mapping = iommu_alloc(tbl, ret, npages, DMA_BIDIRECTIONAL, + mask >> PAGE_SHIFT, order); if (mapping == DMA_ERROR_CODE) { free_pages((unsigned long)ret, order); ret = NULL; diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index bb5c950..57d560c 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -272,18 +272,26 @@ unsigned int virt_irq_to_real_map[NR_IRQS]; * Don't use virtual irqs 0, 1, 2 for devices. * The pcnet32 driver considers interrupt numbers < 2 to be invalid, * and 2 is the XICS IPI interrupt. - * We limit virtual irqs to 17 less than NR_IRQS so that when we - * offset them by 16 (to reserve the first 16 for ISA interrupts) - * we don't end up with an interrupt number >= NR_IRQS. + * We limit virtual irqs to __irq_offet_value less than virt_irq_max so + * that when we offset them we don't end up with an interrupt + * number >= virt_irq_max. */ #define MIN_VIRT_IRQ 3 -#define MAX_VIRT_IRQ (NR_IRQS - NUM_ISA_INTERRUPTS - 1) -#define NR_VIRT_IRQS (MAX_VIRT_IRQ - MIN_VIRT_IRQ + 1) + +unsigned int virt_irq_max; +static unsigned int max_virt_irq; +static unsigned int nr_virt_irqs; void virt_irq_init(void) { int i; + + if ((virt_irq_max == 0) || (virt_irq_max > (NR_IRQS - 1))) + virt_irq_max = NR_IRQS - 1; + max_virt_irq = virt_irq_max - __irq_offset_value; + nr_virt_irqs = max_virt_irq - MIN_VIRT_IRQ + 1; + for (i = 0; i < NR_IRQS; i++) virt_irq_to_real_map[i] = UNDEFINED_IRQ; } @@ -308,17 +316,17 @@ int virt_irq_create_mapping(unsigned int real_irq) return real_irq; } - /* map to a number between MIN_VIRT_IRQ and MAX_VIRT_IRQ */ + /* map to a number between MIN_VIRT_IRQ and max_virt_irq */ virq = real_irq; - if (virq > MAX_VIRT_IRQ) - virq = (virq % NR_VIRT_IRQS) + MIN_VIRT_IRQ; + if (virq > max_virt_irq) + virq = (virq % nr_virt_irqs) + MIN_VIRT_IRQ; /* search for this number or a free slot */ first_virq = virq; while (virt_irq_to_real_map[virq] != UNDEFINED_IRQ) { if (virt_irq_to_real_map[virq] == real_irq) return virq; - if (++virq > MAX_VIRT_IRQ) + if (++virq > max_virt_irq) virq = MIN_VIRT_IRQ; if (virq == first_virq) goto nospace; /* oops, no free slots */ @@ -330,8 +338,8 @@ int virt_irq_create_mapping(unsigned int real_irq) nospace: if (!warned) { printk(KERN_CRIT "Interrupt table is full\n"); - printk(KERN_CRIT "Increase NR_IRQS (currently %d) " - "in your kernel sources and rebuild.\n", NR_IRQS); + printk(KERN_CRIT "Increase virt_irq_max (currently %d) " + "in your kernel sources and rebuild.\n", virt_irq_max); warned = 1; } return NO_IRQ; @@ -349,8 +357,8 @@ unsigned int real_irq_to_virt_slowpath(unsigned int real_irq) virq = real_irq; - if (virq > MAX_VIRT_IRQ) - virq = (virq % NR_VIRT_IRQS) + MIN_VIRT_IRQ; + if (virq > max_virt_irq) + virq = (virq % nr_virt_irqs) + MIN_VIRT_IRQ; first_virq = virq; @@ -360,7 +368,7 @@ unsigned int real_irq_to_virt_slowpath(unsigned int real_irq) virq++; - if (virq >= MAX_VIRT_IRQ) + if (virq >= max_virt_irq) virq = 0; } while (first_virq != virq); diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c index ad7a902..856ef1a 100644 --- a/arch/powerpc/kernel/kprobes.c +++ b/arch/powerpc/kernel/kprobes.c @@ -88,7 +88,7 @@ void __kprobes arch_remove_kprobe(struct kprobe *p) mutex_unlock(&kprobe_mutex); } -static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs) +static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs) { kprobe_opcode_t insn = *p->ainsn.insn; @@ -101,21 +101,21 @@ static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs) regs->nip = (unsigned long)p->ainsn.insn; } -static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb) +static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) { kcb->prev_kprobe.kp = kprobe_running(); kcb->prev_kprobe.status = kcb->kprobe_status; kcb->prev_kprobe.saved_msr = kcb->kprobe_saved_msr; } -static inline void restore_previous_kprobe(struct kprobe_ctlblk *kcb) +static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) { __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp; kcb->kprobe_status = kcb->prev_kprobe.status; kcb->kprobe_saved_msr = kcb->prev_kprobe.saved_msr; } -static inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs, +static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb) { __get_cpu_var(current_kprobe) = p; @@ -141,7 +141,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe *rp, } } -static inline int kprobe_handler(struct pt_regs *regs) +static int __kprobes kprobe_handler(struct pt_regs *regs) { struct kprobe *p; int ret = 0; @@ -334,7 +334,7 @@ static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs) regs->nip = (unsigned long)p->addr + 4; } -static inline int post_kprobe_handler(struct pt_regs *regs) +static int __kprobes post_kprobe_handler(struct pt_regs *regs) { struct kprobe *cur = kprobe_running(); struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); @@ -370,7 +370,7 @@ out: return 1; } -static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr) +static int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) { struct kprobe *cur = kprobe_running(); struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c index 1b73508..2cbde86 100644 --- a/arch/powerpc/kernel/lparcfg.c +++ b/arch/powerpc/kernel/lparcfg.c @@ -37,7 +37,7 @@ #include <asm/prom.h> #include <asm/vdso_datapage.h> -#define MODULE_VERS "1.6" +#define MODULE_VERS "1.7" #define MODULE_NAME "lparcfg" /* #define LPARCFG_DEBUG */ @@ -149,17 +149,17 @@ static void log_plpar_hcall_return(unsigned long rc, char *tag) if (rc == 0) /* success, return */ return; /* check for null tag ? */ - if (rc == H_Hardware) + if (rc == H_HARDWARE) printk(KERN_INFO "plpar-hcall (%s) failed with hardware fault\n", tag); - else if (rc == H_Function) + else if (rc == H_FUNCTION) printk(KERN_INFO "plpar-hcall (%s) failed; function not allowed\n", tag); - else if (rc == H_Authority) + else if (rc == H_AUTHORITY) printk(KERN_INFO - "plpar-hcall (%s) failed; not authorized to this function\n", - tag); - else if (rc == H_Parameter) + "plpar-hcall (%s) failed; not authorized to this" + " function\n", tag); + else if (rc == H_PARAMETER) printk(KERN_INFO "plpar-hcall (%s) failed; Bad parameter(s)\n", tag); else @@ -209,7 +209,7 @@ static void h_pic(unsigned long *pool_idle_time, unsigned long *num_procs) unsigned long dummy; rc = plpar_hcall(H_PIC, 0, 0, 0, 0, pool_idle_time, num_procs, &dummy); - if (rc != H_Authority) + if (rc != H_AUTHORITY) log_plpar_hcall_return(rc, "H_PIC"); } @@ -242,7 +242,7 @@ static void parse_system_parameter_string(struct seq_file *m) { int call_status; - char *local_buffer = kmalloc(SPLPAR_MAXLENGTH, GFP_KERNEL); + unsigned char *local_buffer = kmalloc(SPLPAR_MAXLENGTH, GFP_KERNEL); if (!local_buffer) { printk(KERN_ERR "%s %s kmalloc failure at line %d \n", __FILE__, __FUNCTION__, __LINE__); @@ -254,7 +254,8 @@ static void parse_system_parameter_string(struct seq_file *m) call_status = rtas_call(rtas_token("ibm,get-system-parameter"), 3, 1, NULL, SPLPAR_CHARACTERISTICS_TOKEN, - __pa(rtas_data_buf)); + __pa(rtas_data_buf), + RTAS_DATA_BUF_SIZE); memcpy(local_buffer, rtas_data_buf, SPLPAR_MAXLENGTH); spin_unlock(&rtas_data_buf_lock); @@ -275,7 +276,7 @@ static void parse_system_parameter_string(struct seq_file *m) #ifdef LPARCFG_DEBUG printk(KERN_INFO "success calling get-system-parameter \n"); #endif - splpar_strlen = local_buffer[0] * 16 + local_buffer[1]; + splpar_strlen = local_buffer[0] * 256 + local_buffer[1]; local_buffer += 2; /* step over strlen value */ memset(workbuffer, 0, SPLPAR_MAXLENGTH); @@ -529,13 +530,13 @@ static ssize_t lparcfg_write(struct file *file, const char __user * buf, retval = plpar_hcall_norets(H_SET_PPP, *new_entitled_ptr, *new_weight_ptr); - if (retval == H_Success || retval == H_Constrained) { + if (retval == H_SUCCESS || retval == H_CONSTRAINED) { retval = count; - } else if (retval == H_Busy) { + } else if (retval == H_BUSY) { retval = -EBUSY; - } else if (retval == H_Hardware) { + } else if (retval == H_HARDWARE) { retval = -EIO; - } else if (retval == H_Parameter) { + } else if (retval == H_PARAMETER) { retval = -EINVAL; } else { printk(KERN_WARNING "%s: received unknown hv return code %ld", diff --git a/arch/powerpc/kernel/pci_iommu.c b/arch/powerpc/kernel/pci_iommu.c index c336f3e..c1d95e1 100644 --- a/arch/powerpc/kernel/pci_iommu.c +++ b/arch/powerpc/kernel/pci_iommu.c @@ -59,6 +59,25 @@ static inline struct iommu_table *devnode_table(struct device *dev) } +static inline unsigned long device_to_mask(struct device *hwdev) +{ + struct pci_dev *pdev; + + if (!hwdev) { + pdev = ppc64_isabridge_dev; + if (!pdev) /* This is the best guess we can do */ + return 0xfffffffful; + } else + pdev = to_pci_dev(hwdev); + + if (pdev->dma_mask) + return pdev->dma_mask; + + /* Assume devices without mask can take 32 bit addresses */ + return 0xfffffffful; +} + + /* Allocates a contiguous real buffer and creates mappings over it. * Returns the virtual address of the buffer and sets dma_handle * to the dma address (mapping) of the first page. @@ -67,7 +86,7 @@ static void *pci_iommu_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flag) { return iommu_alloc_coherent(devnode_table(hwdev), size, dma_handle, - flag); + device_to_mask(hwdev), flag); } static void pci_iommu_free_coherent(struct device *hwdev, size_t size, @@ -85,7 +104,8 @@ static void pci_iommu_free_coherent(struct device *hwdev, size_t size, static dma_addr_t pci_iommu_map_single(struct device *hwdev, void *vaddr, size_t size, enum dma_data_direction direction) { - return iommu_map_single(devnode_table(hwdev), vaddr, size, direction); + return iommu_map_single(devnode_table(hwdev), vaddr, size, + device_to_mask(hwdev), direction); } @@ -100,7 +120,7 @@ static int pci_iommu_map_sg(struct device *pdev, struct scatterlist *sglist, int nelems, enum dma_data_direction direction) { return iommu_map_sg(pdev, devnode_table(pdev), sglist, - nelems, direction); + nelems, device_to_mask(pdev), direction); } static void pci_iommu_unmap_sg(struct device *pdev, struct scatterlist *sglist, @@ -112,7 +132,19 @@ static void pci_iommu_unmap_sg(struct device *pdev, struct scatterlist *sglist, /* We support DMA to/from any memory page via the iommu */ static int pci_iommu_dma_supported(struct device *dev, u64 mask) { - return 1; + struct iommu_table *tbl = devnode_table(dev); + + if (!tbl || tbl->it_offset > mask) { + printk(KERN_INFO "Warning: IOMMU table offset too big for device mask\n"); + if (tbl) + printk(KERN_INFO "mask: 0x%08lx, table offset: 0x%08lx\n", + mask, tbl->it_offset); + else + printk(KERN_INFO "mask: 0x%08lx, table unavailable\n", + mask); + return 0; + } else + return 1; } void pci_iommu_init(void) diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c index dfa5398..4b052ae 100644 --- a/arch/powerpc/kernel/ppc_ksyms.c +++ b/arch/powerpc/kernel/ppc_ksyms.c @@ -81,6 +81,7 @@ EXPORT_SYMBOL(strcat); EXPORT_SYMBOL(strlen); EXPORT_SYMBOL(strcmp); EXPORT_SYMBOL(strcasecmp); +EXPORT_SYMBOL(strncasecmp); EXPORT_SYMBOL(csum_partial); EXPORT_SYMBOL(csum_partial_copy_generic); diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 4336390..1cb69e8 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -62,7 +62,7 @@ static int __initdata dt_root_addr_cells; static int __initdata dt_root_size_cells; #ifdef CONFIG_PPC64 -static int __initdata iommu_is_off; +int __initdata iommu_is_off; int __initdata iommu_force_on; unsigned long tce_alloc_start, tce_alloc_end; #endif diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index d66c5e7..7e4d548 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -1528,12 +1528,11 @@ static int __init prom_find_machine_type(void) * non-IBM designs ! * - it has /rtas */ - len = prom_getprop(_prom->root, "model", + len = prom_getprop(_prom->root, "device_type", compat, sizeof(compat)-1); if (len <= 0) return PLATFORM_GENERIC; - compat[len] = 0; - if (strcmp(compat, "chrp")) + if (strncmp(compat, RELOC("chrp"), 4)) return PLATFORM_GENERIC; /* Default to pSeries. We need to know if we are running LPAR */ diff --git a/arch/powerpc/kernel/rtas-proc.c b/arch/powerpc/kernel/rtas-proc.c index 456286c..9c9ad1f 100644 --- a/arch/powerpc/kernel/rtas-proc.c +++ b/arch/powerpc/kernel/rtas-proc.c @@ -258,11 +258,11 @@ static int __init proc_rtas_init(void) struct proc_dir_entry *entry; if (!machine_is(pseries)) - return 1; + return -ENODEV; rtas_node = of_find_node_by_name(NULL, "rtas"); if (rtas_node == NULL) - return 1; + return -ENODEV; entry = create_proc_entry("ppc64/rtas/progress", S_IRUGO|S_IWUSR, NULL); if (entry) diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index 06636c9..0112318 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -578,18 +578,18 @@ static void rtas_percpu_suspend_me(void *info) * We use "waiting" to indicate our state. As long * as it is >0, we are still trying to all join up. * If it goes to 0, we have successfully joined up and - * one thread got H_Continue. If any error happens, + * one thread got H_CONTINUE. If any error happens, * we set it to <0. */ local_irq_save(flags); do { rc = plpar_hcall_norets(H_JOIN); smp_rmb(); - } while (rc == H_Success && data->waiting > 0); - if (rc == H_Success) + } while (rc == H_SUCCESS && data->waiting > 0); + if (rc == H_SUCCESS) goto out; - if (rc == H_Continue) { + if (rc == H_CONTINUE) { data->waiting = 0; data->args->args[data->args->nargs] = rtas_call(ibm_suspend_me_token, 0, 1, NULL); @@ -597,7 +597,7 @@ static void rtas_percpu_suspend_me(void *info) plpar_hcall_norets(H_PROD,i); } else { data->waiting = -EBUSY; - printk(KERN_ERR "Error on H_Join hypervisor call\n"); + printk(KERN_ERR "Error on H_JOIN hypervisor call\n"); } out: @@ -624,7 +624,7 @@ static int rtas_ibm_suspend_me(struct rtas_args *args) printk(KERN_ERR "Error doing global join\n"); /* Prod each CPU. This won't hurt, and will wake - * anyone we successfully put to sleep with H_Join + * anyone we successfully put to sleep with H_JOIN. */ for_each_possible_cpu(i) plpar_hcall_norets(H_PROD, i); diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index a72bf5d..69ac257 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c @@ -50,7 +50,6 @@ #include <asm/kgdb.h> #endif -extern void platform_init(void); extern void bootx_init(unsigned long r4, unsigned long phys); boot_infos_t *boot_infos; @@ -138,12 +137,7 @@ void __init machine_init(unsigned long dt_ptr, unsigned long phys) strlcpy(cmd_line, CONFIG_CMDLINE, sizeof(cmd_line)); #endif /* CONFIG_CMDLINE */ -#ifdef CONFIG_PPC_MULTIPLATFORM probe_machine(); -#else - /* Base init based on machine type. Obsoloete, please kill ! */ - platform_init(); -#endif #ifdef CONFIG_6xx if (cpu_has_feature(CPU_FTR_CAN_DOZE) || diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 59aa92c..13e91c4 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -215,12 +215,10 @@ void __init early_setup(unsigned long dt_ptr) /* * Initialize stab / SLB management except on iSeries */ - if (!firmware_has_feature(FW_FEATURE_ISERIES)) { - if (cpu_has_feature(CPU_FTR_SLB)) - slb_initialize(); - else - stab_initialize(get_paca()->stab_real); - } + if (cpu_has_feature(CPU_FTR_SLB)) + slb_initialize(); + else if (!firmware_has_feature(FW_FEATURE_ISERIES)) + stab_initialize(get_paca()->stab_real); DBG(" <- early_setup()\n"); } diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c index 73560ef..ed737ca 100644 --- a/arch/powerpc/kernel/sysfs.c +++ b/arch/powerpc/kernel/sysfs.c @@ -279,7 +279,7 @@ static void unregister_cpu_online(unsigned int cpu) } #endif /* CONFIG_HOTPLUG_CPU */ -static int __devinit sysfs_cpu_notify(struct notifier_block *self, +static int sysfs_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned int)(long)hcpu; @@ -297,7 +297,7 @@ static int __devinit sysfs_cpu_notify(struct notifier_block *self, return NOTIFY_OK; } -static struct notifier_block __devinitdata sysfs_cpu_nb = { +static struct notifier_block sysfs_cpu_nb = { .notifier_call = sysfs_cpu_notify, }; diff --git a/arch/powerpc/kernel/systbl.S b/arch/powerpc/kernel/systbl.S index 1ad55f0..0b98eea 100644 --- a/arch/powerpc/kernel/systbl.S +++ b/arch/powerpc/kernel/systbl.S @@ -322,3 +322,11 @@ SYSCALL(spu_create) COMPAT_SYS(pselect6) COMPAT_SYS(ppoll) SYSCALL(unshare) +SYSCALL(splice) +SYSCALL(tee) +SYSCALL(vmsplice) + +/* + * please add new calls to arch/powerpc/platforms/cell/spu_callbacks.c + * as well when appropriate. + */ diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 4cbde21..064a525 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -228,7 +228,7 @@ void system_reset_exception(struct pt_regs *regs) */ static inline int check_io_access(struct pt_regs *regs) { -#ifdef CONFIG_PPC_PMAC +#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32) unsigned long msr = regs->msr; const struct exception_table_entry *entry; unsigned int *nip = (unsigned int *)regs->nip; @@ -261,7 +261,7 @@ static inline int check_io_access(struct pt_regs *regs) return 1; } } -#endif /* CONFIG_PPC_PMAC */ +#endif /* CONFIG_PPC_PMAC && CONFIG_PPC32 */ return 0; } @@ -308,8 +308,8 @@ platform_machine_check(struct pt_regs *regs) void machine_check_exception(struct pt_regs *regs) { -#ifdef CONFIG_PPC64 int recover = 0; + unsigned long reason = get_mc_reason(regs); /* See if any machine dependent calls */ if (ppc_md.machine_check_exception) @@ -317,8 +317,6 @@ void machine_check_exception(struct pt_regs *regs) if (recover) return; -#else - unsigned long reason = get_mc_reason(regs); if (user_mode(regs)) { regs->msr |= MSR_RI; @@ -462,7 +460,6 @@ void machine_check_exception(struct pt_regs *regs) * additional info, e.g. bus error registers. */ platform_machine_check(regs); -#endif /* CONFIG_PPC64 */ if (debugger_fault_handler(regs)) return; diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c index 13c655b..971020c 100644 --- a/arch/powerpc/kernel/vio.c +++ b/arch/powerpc/kernel/vio.c @@ -202,7 +202,7 @@ static dma_addr_t vio_map_single(struct device *dev, void *vaddr, size_t size, enum dma_data_direction direction) { return iommu_map_single(to_vio_dev(dev)->iommu_table, vaddr, size, - direction); + ~0ul, direction); } static void vio_unmap_single(struct device *dev, dma_addr_t dma_handle, @@ -216,7 +216,7 @@ static int vio_map_sg(struct device *dev, struct scatterlist *sglist, int nelems, enum dma_data_direction direction) { return iommu_map_sg(dev, to_vio_dev(dev)->iommu_table, sglist, - nelems, direction); + nelems, ~0ul, direction); } static void vio_unmap_sg(struct device *dev, struct scatterlist *sglist, @@ -229,7 +229,7 @@ static void *vio_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag) { return iommu_alloc_coherent(to_vio_dev(dev)->iommu_table, size, - dma_handle, flag); + dma_handle, ~0ul, flag); } static void vio_free_coherent(struct device *dev, size_t size, |