From 9747dd6fa98f2983f4dd09cd6dad1fa3d2a4c5f2 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Sat, 30 Apr 2005 10:01:40 -0700 Subject: [PATCH] ppc64: fix 32-bit signal frame back link When the kernel creates a signal frame on the user stack, it puts the old stack pointer value at the beginning so that the signal frame is linked into the chain of stack frames like any other frame. Unfortunately, for 32-bit processes we are writing the old stack pointer as a 64-bit value rather than a 32-bit value, and the process sees that as a null pointer, since it only looks at the first 32 bits, which are zero since ppc is bigendian and the stack pointer is below 4GB. This bug is in SLES9 and RHEL4 too, hence the ccs. This patch fixes the bug by making the signal code write the old stack pointer as a u32 instead of an unsigned long. Signed-off-by: Paul Mackerras Signed-off-by: Linus Torvalds --- arch/ppc64/kernel/signal32.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/ppc64') diff --git a/arch/ppc64/kernel/signal32.c b/arch/ppc64/kernel/signal32.c index b0e167d..3c2fa5c 100644 --- a/arch/ppc64/kernel/signal32.c +++ b/arch/ppc64/kernel/signal32.c @@ -657,7 +657,7 @@ static int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka, /* Save user registers on the stack */ frame = &rt_sf->uc.uc_mcontext; - if (put_user(regs->gpr[1], (unsigned long __user *)newsp)) + if (put_user(regs->gpr[1], (u32 __user *)newsp)) goto badframe; if (vdso32_rt_sigtramp && current->thread.vdso_base) { @@ -842,7 +842,7 @@ static int handle_signal32(unsigned long sig, struct k_sigaction *ka, regs->link = (unsigned long) frame->mctx.tramp; } - if (put_user(regs->gpr[1], (unsigned long __user *)newsp)) + if (put_user(regs->gpr[1], (u32 __user *)newsp)) goto badframe; regs->gpr[1] = (unsigned long) newsp; regs->gpr[3] = sig; -- cgit v1.1 From 1b29f9d13e3cf0fe86bf7f82a3399c9e3caf58e5 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Sun, 1 May 2005 08:58:43 -0700 Subject: [PATCH] ppc64: add PT_NOTE section to vDSO This patch from Roland adds a PT_NOTE section to both 32 and 64 bits vDSOs to expose the kernel version to glibc, thus avoiding a uname syscall on every launch. This is equivalent to the patches Roland posted already for x86 and x86-64. Note: the 64 bits .note is actually using the 32 bits format. This is normal. The ELF spec specifies a different format for 64 bits .note, but for some reason, this was never properly implemented, the core dumps for example are all using 32 bits format .note, and binutils cannot even read a 64 bits format .note. Talking to our toolchain folks, they think we'd rather stick to 32 bits format .note everywhere and get the spec fixed some day ... Signed-off-by: Roland McGrath Signed-off-by: Benjamin Herrenschmidt Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ppc64/kernel/vdso32/Makefile | 2 +- arch/ppc64/kernel/vdso32/note.S | 25 +++++++++++++++++++++++++ arch/ppc64/kernel/vdso32/vdso32.lds.S | 3 +++ arch/ppc64/kernel/vdso64/Makefile | 2 +- arch/ppc64/kernel/vdso64/note.S | 1 + arch/ppc64/kernel/vdso64/vdso64.lds.S | 5 ++++- 6 files changed, 35 insertions(+), 3 deletions(-) create mode 100644 arch/ppc64/kernel/vdso32/note.S create mode 100644 arch/ppc64/kernel/vdso64/note.S (limited to 'arch/ppc64') diff --git a/arch/ppc64/kernel/vdso32/Makefile b/arch/ppc64/kernel/vdso32/Makefile index ede2f7e..0b1b0df 100644 --- a/arch/ppc64/kernel/vdso32/Makefile +++ b/arch/ppc64/kernel/vdso32/Makefile @@ -1,7 +1,7 @@ # List of files in the vdso, has to be asm only for now -obj-vdso32 = sigtramp.o gettimeofday.o datapage.o cacheflush.o +obj-vdso32 = sigtramp.o gettimeofday.o datapage.o cacheflush.o note.o # Build rules diff --git a/arch/ppc64/kernel/vdso32/note.S b/arch/ppc64/kernel/vdso32/note.S new file mode 100644 index 0000000..d4b5be4 --- /dev/null +++ b/arch/ppc64/kernel/vdso32/note.S @@ -0,0 +1,25 @@ +/* + * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text. + * Here we can supply some information useful to userland. + */ + +#include +#include + +#define ASM_ELF_NOTE_BEGIN(name, flags, vendor, type) \ + .section name, flags; \ + .balign 4; \ + .long 1f - 0f; /* name length */ \ + .long 3f - 2f; /* data length */ \ + .long type; /* note type */ \ +0: .asciz vendor; /* vendor name */ \ +1: .balign 4; \ +2: + +#define ASM_ELF_NOTE_END \ +3: .balign 4; /* pad out section */ \ + .previous + + ASM_ELF_NOTE_BEGIN(".note.kernel-version", "a", UTS_SYSNAME, 0) + .long LINUX_VERSION_CODE + ASM_ELF_NOTE_END diff --git a/arch/ppc64/kernel/vdso32/vdso32.lds.S b/arch/ppc64/kernel/vdso32/vdso32.lds.S index cca27bd..11290c9 100644 --- a/arch/ppc64/kernel/vdso32/vdso32.lds.S +++ b/arch/ppc64/kernel/vdso32/vdso32.lds.S @@ -20,6 +20,8 @@ SECTIONS .gnu.version_d : { *(.gnu.version_d) } .gnu.version_r : { *(.gnu.version_r) } + .note : { *(.note.*) } :text :note + . = ALIGN (16); .text : { @@ -87,6 +89,7 @@ SECTIONS PHDRS { text PT_LOAD FILEHDR PHDRS FLAGS(5); /* PF_R|PF_X */ + note PT_NOTE FLAGS(4); /* PF_R */ dynamic PT_DYNAMIC FLAGS(4); /* PF_R */ eh_frame_hdr 0x6474e550; /* PT_GNU_EH_FRAME, but ld doesn't match the name */ } diff --git a/arch/ppc64/kernel/vdso64/Makefile b/arch/ppc64/kernel/vdso64/Makefile index bd3f70b..ab39988 100644 --- a/arch/ppc64/kernel/vdso64/Makefile +++ b/arch/ppc64/kernel/vdso64/Makefile @@ -1,6 +1,6 @@ # List of files in the vdso, has to be asm only for now -obj-vdso64 = sigtramp.o gettimeofday.o datapage.o cacheflush.o +obj-vdso64 = sigtramp.o gettimeofday.o datapage.o cacheflush.o note.o # Build rules diff --git a/arch/ppc64/kernel/vdso64/note.S b/arch/ppc64/kernel/vdso64/note.S new file mode 100644 index 0000000..dc2a509 --- /dev/null +++ b/arch/ppc64/kernel/vdso64/note.S @@ -0,0 +1 @@ +#include "../vdso32/note.S" diff --git a/arch/ppc64/kernel/vdso64/vdso64.lds.S b/arch/ppc64/kernel/vdso64/vdso64.lds.S index 942c815..9cb2818 100644 --- a/arch/ppc64/kernel/vdso64/vdso64.lds.S +++ b/arch/ppc64/kernel/vdso64/vdso64.lds.S @@ -18,12 +18,14 @@ SECTIONS .gnu.version_d : { *(.gnu.version_d) } .gnu.version_r : { *(.gnu.version_r) } + .note : { *(.note.*) } :text :note + . = ALIGN (16); .text : { *(.text .stub .text.* .gnu.linkonce.t.*) *(.sfpr .glink) - } + } :text PROVIDE (__etext = .); PROVIDE (_etext = .); PROVIDE (etext = .); @@ -88,6 +90,7 @@ SECTIONS PHDRS { text PT_LOAD FILEHDR PHDRS FLAGS(5); /* PF_R|PF_X */ + note PT_NOTE FLAGS(4); /* PF_R */ dynamic PT_DYNAMIC FLAGS(4); /* PF_R */ eh_frame_hdr 0x6474e550; /* PT_GNU_EH_FRAME, but ld doesn't match the name */ } -- cgit v1.1 From bb78cb72201985ae9269b723c82ea0f892048b9e Mon Sep 17 00:00:00 2001 From: Olof Johansson Date: Sun, 1 May 2005 08:58:44 -0700 Subject: [PATCH] ppc64: remove unused argument to create_slbe Remove vsid argument to create_slbe, since it's no longer used. Spotted by R Sharada. Signed-off-by: Olof Johansson Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ppc64/mm/slb.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'arch/ppc64') diff --git a/arch/ppc64/mm/slb.c b/arch/ppc64/mm/slb.c index 6a20773..244150a 100644 --- a/arch/ppc64/mm/slb.c +++ b/arch/ppc64/mm/slb.c @@ -33,8 +33,8 @@ static inline unsigned long mk_vsid_data(unsigned long ea, unsigned long flags) return (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | flags; } -static inline void create_slbe(unsigned long ea, unsigned long vsid, - unsigned long flags, unsigned long entry) +static inline void create_slbe(unsigned long ea, unsigned long flags, + unsigned long entry) { asm volatile("slbmte %0,%1" : : "r" (mk_vsid_data(ea, flags)), @@ -145,9 +145,8 @@ void slb_initialize(void) asm volatile("isync":::"memory"); asm volatile("slbmte %0,%0"::"r" (0) : "memory"); asm volatile("isync; slbia; isync":::"memory"); - create_slbe(KERNELBASE, get_kernel_vsid(KERNELBASE), flags, 0); - create_slbe(VMALLOCBASE, get_kernel_vsid(KERNELBASE), - SLB_VSID_KERNEL, 1); + create_slbe(KERNELBASE, flags, 0); + create_slbe(VMALLOCBASE, SLB_VSID_KERNEL, 1); /* We don't bolt the stack for the time being - we're in boot, * so the stack is in the bolted segment. By the time it goes * elsewhere, we'll call _switch() which will bolt in the new -- cgit v1.1 From dc3ec7503e693e05c01d85b664482d0f694429ab Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Sun, 1 May 2005 08:58:44 -0700 Subject: [PATCH] ppc64: Fix irq parsing on powermac When I tried Ben's patches to the powermac sound driver on my G5, I found that it was taking enormous numbers of sound DMA transmit interrupts. This turned out to be because it was incorrectly configured as level-sensitive instead of edge-sensitive, which in turn was because the code that parses the interrupt tree that Open Firmware gives us was incorrectly assigning another device the same irq number as the sound DMA transmit interrupt (i.e. 1). This patch fixes the problem, in a somewhat quick and dirty way for now, but one which will work for all the machines we currently run on. Ultimately Ben and I want to do something more general and robust, but this should go in for 2.6.12. Signed-off-by: Paul Mackerras Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ppc64/kernel/prom.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch/ppc64') diff --git a/arch/ppc64/kernel/prom.c b/arch/ppc64/kernel/prom.c index 45a4ad0..fe2946c 100644 --- a/arch/ppc64/kernel/prom.c +++ b/arch/ppc64/kernel/prom.c @@ -321,6 +321,10 @@ static int __devinit finish_node_interrupts(struct device_node *np, char *name = get_property(ic->parent, "name", NULL); if (name && !strcmp(name, "u3")) np->intrs[intrcount].line += 128; + else if (!(name && !strcmp(name, "mac-io"))) + /* ignore other cascaded controllers, such as + the k2-sata-root */ + break; } np->intrs[intrcount].sense = 1; if (n > 1) -- cgit v1.1 From 0339ad77c4a06fa8529db17c91f790058e18b65b Mon Sep 17 00:00:00 2001 From: "akpm@osdl.org" Date: Sun, 1 May 2005 08:58:44 -0700 Subject: [PATCH] ppc64: nvram cleanups - Fix arch/ppc64/kernel/nvram.c:342: warning: `part' might be used uninitialized in this function - Various codingstyle tweaks. Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ppc64/kernel/nvram.c | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) (limited to 'arch/ppc64') diff --git a/arch/ppc64/kernel/nvram.c b/arch/ppc64/kernel/nvram.c index b9069c2..4e71781 100644 --- a/arch/ppc64/kernel/nvram.c +++ b/arch/ppc64/kernel/nvram.c @@ -339,9 +339,9 @@ static int nvram_remove_os_partition(void) static int nvram_create_os_partition(void) { struct list_head * p; - struct nvram_partition * part; - struct nvram_partition * new_part = NULL; - struct nvram_partition * free_part = NULL; + struct nvram_partition *part = NULL; + struct nvram_partition *new_part = NULL; + struct nvram_partition *free_part = NULL; int seq_init[2] = { 0, 0 }; loff_t tmp_index; long size = 0; @@ -364,13 +364,11 @@ static int nvram_create_os_partition(void) free_part = part; } } - if (!size) { + if (!size) return -ENOSPC; - } /* Create our OS partition */ - new_part = (struct nvram_partition *) - kmalloc(sizeof(struct nvram_partition), GFP_KERNEL); + new_part = kmalloc(sizeof(*new_part), GFP_KERNEL); if (!new_part) { printk(KERN_ERR "nvram_create_os_partition: kmalloc failed\n"); return -ENOMEM; @@ -379,7 +377,7 @@ static int nvram_create_os_partition(void) new_part->index = free_part->index; new_part->header.signature = NVRAM_SIG_OS; new_part->header.length = size; - sprintf(new_part->header.name, "ppc64,linux"); + strcpy(new_part->header.name, "ppc64,linux"); new_part->header.checksum = nvram_checksum(&new_part->header); rc = nvram_write_header(new_part); @@ -394,7 +392,8 @@ static int nvram_create_os_partition(void) tmp_index = new_part->index + NVRAM_HEADER_LEN; rc = ppc_md.nvram_write((char *)&seq_init, sizeof(seq_init), &tmp_index); if (rc <= 0) { - printk(KERN_ERR "nvram_create_os_partition: nvram_write failed (%d)\n", rc); + printk(KERN_ERR "nvram_create_os_partition: nvram_write " + "failed (%d)\n", rc); return rc; } -- cgit v1.1 From 58366af5861eee1479426380e3c91ecb334c301d Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Sun, 1 May 2005 08:58:44 -0700 Subject: [PATCH] ppc64: update to use the new 4L headers This patch converts ppc64 to use the generic pgtable-nopud.h instead of the "fixup" header. Signed-off-by: Benjamin Herrenschmidt Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ppc64/mm/hugetlbpage.c | 45 +++++----- arch/ppc64/mm/init.c | 198 ++++++++++++++++++++++---------------------- 2 files changed, 123 insertions(+), 120 deletions(-) (limited to 'arch/ppc64') diff --git a/arch/ppc64/mm/hugetlbpage.c b/arch/ppc64/mm/hugetlbpage.c index 390296e..d3bf86a 100644 --- a/arch/ppc64/mm/hugetlbpage.c +++ b/arch/ppc64/mm/hugetlbpage.c @@ -42,7 +42,7 @@ static inline int hugepgd_index(unsigned long addr) return (addr & ~REGION_MASK) >> HUGEPGDIR_SHIFT; } -static pgd_t *hugepgd_offset(struct mm_struct *mm, unsigned long addr) +static pud_t *hugepgd_offset(struct mm_struct *mm, unsigned long addr) { int index; @@ -52,21 +52,21 @@ static pgd_t *hugepgd_offset(struct mm_struct *mm, unsigned long addr) index = hugepgd_index(addr); BUG_ON(index >= PTRS_PER_HUGEPGD); - return mm->context.huge_pgdir + index; + return (pud_t *)(mm->context.huge_pgdir + index); } -static inline pte_t *hugepte_offset(pgd_t *dir, unsigned long addr) +static inline pte_t *hugepte_offset(pud_t *dir, unsigned long addr) { int index; - if (pgd_none(*dir)) + if (pud_none(*dir)) return NULL; index = (addr >> HPAGE_SHIFT) % PTRS_PER_HUGEPTE; - return (pte_t *)pgd_page(*dir) + index; + return (pte_t *)pud_page(*dir) + index; } -static pgd_t *hugepgd_alloc(struct mm_struct *mm, unsigned long addr) +static pud_t *hugepgd_alloc(struct mm_struct *mm, unsigned long addr) { BUG_ON(! in_hugepage_area(mm->context, addr)); @@ -90,10 +90,9 @@ static pgd_t *hugepgd_alloc(struct mm_struct *mm, unsigned long addr) return hugepgd_offset(mm, addr); } -static pte_t *hugepte_alloc(struct mm_struct *mm, pgd_t *dir, - unsigned long addr) +static pte_t *hugepte_alloc(struct mm_struct *mm, pud_t *dir, unsigned long addr) { - if (! pgd_present(*dir)) { + if (! pud_present(*dir)) { pte_t *new; spin_unlock(&mm->page_table_lock); @@ -104,7 +103,7 @@ static pte_t *hugepte_alloc(struct mm_struct *mm, pgd_t *dir, * Because we dropped the lock, we should re-check the * entry, as somebody else could have populated it.. */ - if (pgd_present(*dir)) { + if (pud_present(*dir)) { if (new) kmem_cache_free(zero_cache, new); } else { @@ -115,7 +114,7 @@ static pte_t *hugepte_alloc(struct mm_struct *mm, pgd_t *dir, ptepage = virt_to_page(new); ptepage->mapping = (void *) mm; ptepage->index = addr & HUGEPGDIR_MASK; - pgd_populate(mm, dir, new); + pud_populate(mm, dir, new); } } @@ -124,28 +123,28 @@ static pte_t *hugepte_alloc(struct mm_struct *mm, pgd_t *dir, static pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) { - pgd_t *pgd; + pud_t *pud; BUG_ON(! in_hugepage_area(mm->context, addr)); - pgd = hugepgd_offset(mm, addr); - if (! pgd) + pud = hugepgd_offset(mm, addr); + if (! pud) return NULL; - return hugepte_offset(pgd, addr); + return hugepte_offset(pud, addr); } static pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr) { - pgd_t *pgd; + pud_t *pud; BUG_ON(! in_hugepage_area(mm->context, addr)); - pgd = hugepgd_alloc(mm, addr); - if (! pgd) + pud = hugepgd_alloc(mm, addr); + if (! pud) return NULL; - return hugepte_alloc(mm, pgd, addr); + return hugepte_alloc(mm, pud, addr); } static void set_huge_pte(struct mm_struct *mm, struct vm_area_struct *vma, @@ -709,10 +708,10 @@ void hugetlb_mm_free_pgd(struct mm_struct *mm) /* cleanup any hugepte pages leftover */ for (i = 0; i < PTRS_PER_HUGEPGD; i++) { - pgd_t *pgd = pgdir + i; + pud_t *pud = (pud_t *)(pgdir + i); - if (! pgd_none(*pgd)) { - pte_t *pte = (pte_t *)pgd_page(*pgd); + if (! pud_none(*pud)) { + pte_t *pte = (pte_t *)pud_page(*pud); struct page *ptepage = virt_to_page(pte); ptepage->mapping = NULL; @@ -720,7 +719,7 @@ void hugetlb_mm_free_pgd(struct mm_struct *mm) BUG_ON(memcmp(pte, empty_zero_page, PAGE_SIZE)); kmem_cache_free(zero_cache, pte); } - pgd_clear(pgd); + pud_clear(pud); } BUG_ON(memcmp(pgdir, empty_zero_page, PAGE_SIZE)); diff --git a/arch/ppc64/mm/init.c b/arch/ppc64/mm/init.c index a7149b9..cf33d7e 100644 --- a/arch/ppc64/mm/init.c +++ b/arch/ppc64/mm/init.c @@ -136,14 +136,78 @@ void iounmap(volatile void __iomem *addr) #else +static void unmap_im_area_pte(pmd_t *pmd, unsigned long addr, + unsigned long end) +{ + pte_t *pte; + + pte = pte_offset_kernel(pmd, addr); + do { + pte_t ptent = ptep_get_and_clear(&ioremap_mm, addr, pte); + WARN_ON(!pte_none(ptent) && !pte_present(ptent)); + } while (pte++, addr += PAGE_SIZE, addr != end); +} + +static inline void unmap_im_area_pmd(pud_t *pud, unsigned long addr, + unsigned long end) +{ + pmd_t *pmd; + unsigned long next; + + pmd = pmd_offset(pud, addr); + do { + next = pmd_addr_end(addr, end); + if (pmd_none_or_clear_bad(pmd)) + continue; + unmap_im_area_pte(pmd, addr, next); + } while (pmd++, addr = next, addr != end); +} + +static inline void unmap_im_area_pud(pgd_t *pgd, unsigned long addr, + unsigned long end) +{ + pud_t *pud; + unsigned long next; + + pud = pud_offset(pgd, addr); + do { + next = pud_addr_end(addr, end); + if (pud_none_or_clear_bad(pud)) + continue; + unmap_im_area_pmd(pud, addr, next); + } while (pud++, addr = next, addr != end); +} + +static void unmap_im_area(unsigned long addr, unsigned long end) +{ + struct mm_struct *mm = &ioremap_mm; + unsigned long next; + pgd_t *pgd; + + spin_lock(&mm->page_table_lock); + + pgd = pgd_offset_i(addr); + flush_cache_vunmap(addr, end); + do { + next = pgd_addr_end(addr, end); + if (pgd_none_or_clear_bad(pgd)) + continue; + unmap_im_area_pud(pgd, addr, next); + } while (pgd++, addr = next, addr != end); + flush_tlb_kernel_range(start, end); + + spin_unlock(&mm->page_table_lock); +} + /* * map_io_page currently only called by __ioremap * map_io_page adds an entry to the ioremap page table * and adds an entry to the HPT, possibly bolting it */ -static void map_io_page(unsigned long ea, unsigned long pa, int flags) +static int map_io_page(unsigned long ea, unsigned long pa, int flags) { pgd_t *pgdp; + pud_t *pudp; pmd_t *pmdp; pte_t *ptep; unsigned long vsid; @@ -151,9 +215,15 @@ static void map_io_page(unsigned long ea, unsigned long pa, int flags) if (mem_init_done) { spin_lock(&ioremap_mm.page_table_lock); pgdp = pgd_offset_i(ea); - pmdp = pmd_alloc(&ioremap_mm, pgdp, ea); + pudp = pud_alloc(&ioremap_mm, pgdp, ea); + if (!pudp) + return -ENOMEM; + pmdp = pmd_alloc(&ioremap_mm, pudp, ea); + if (!pmdp) + return -ENOMEM; ptep = pte_alloc_kernel(&ioremap_mm, pmdp, ea); - + if (!ptep) + return -ENOMEM; pa = abs_to_phys(pa); set_pte_at(&ioremap_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, __pgprot(flags))); @@ -181,6 +251,7 @@ static void map_io_page(unsigned long ea, unsigned long pa, int flags) panic("map_io_page: could not insert mapping"); } } + return 0; } @@ -194,9 +265,14 @@ static void __iomem * __ioremap_com(unsigned long addr, unsigned long pa, flags |= pgprot_val(PAGE_KERNEL); for (i = 0; i < size; i += PAGE_SIZE) - map_io_page(ea+i, pa+i, flags); + if (map_io_page(ea+i, pa+i, flags)) + goto failure; return (void __iomem *) (ea + (addr & ~PAGE_MASK)); + failure: + if (mem_init_done) + unmap_im_area(ea, ea + size); + return NULL; } @@ -206,10 +282,11 @@ ioremap(unsigned long addr, unsigned long size) return __ioremap(addr, size, _PAGE_NO_CACHE | _PAGE_GUARDED); } -void __iomem * -__ioremap(unsigned long addr, unsigned long size, unsigned long flags) +void __iomem * __ioremap(unsigned long addr, unsigned long size, + unsigned long flags) { unsigned long pa, ea; + void __iomem *ret; /* * Choose an address to map it to. @@ -232,12 +309,16 @@ __ioremap(unsigned long addr, unsigned long size, unsigned long flags) if (area == NULL) return NULL; ea = (unsigned long)(area->addr); + ret = __ioremap_com(addr, pa, ea, size, flags); + if (!ret) + im_free(area->addr); } else { ea = ioremap_bot; - ioremap_bot += size; + ret = __ioremap_com(addr, pa, ea, size, flags); + if (ret) + ioremap_bot += size; } - - return __ioremap_com(addr, pa, ea, size, flags); + return ret; } #define IS_PAGE_ALIGNED(_val) ((_val) == ((_val) & PAGE_MASK)) @@ -246,6 +327,7 @@ int __ioremap_explicit(unsigned long pa, unsigned long ea, unsigned long size, unsigned long flags) { struct vm_struct *area; + void __iomem *ret; /* For now, require page-aligned values for pa, ea, and size */ if (!IS_PAGE_ALIGNED(pa) || !IS_PAGE_ALIGNED(ea) || @@ -276,7 +358,12 @@ int __ioremap_explicit(unsigned long pa, unsigned long ea, } } - if (__ioremap_com(pa, pa, ea, size, flags) != (void *) ea) { + ret = __ioremap_com(pa, pa, ea, size, flags); + if (ret == NULL) { + printk(KERN_ERR "ioremap_explicit() allocation failure !\n"); + return 1; + } + if (ret != (void *) ea) { printk(KERN_ERR "__ioremap_com() returned unexpected addr\n"); return 1; } @@ -284,69 +371,6 @@ int __ioremap_explicit(unsigned long pa, unsigned long ea, return 0; } -static void unmap_im_area_pte(pmd_t *pmd, unsigned long address, - unsigned long size) -{ - unsigned long base, end; - pte_t *pte; - - if (pmd_none(*pmd)) - return; - if (pmd_bad(*pmd)) { - pmd_ERROR(*pmd); - pmd_clear(pmd); - return; - } - - pte = pte_offset_kernel(pmd, address); - base = address & PMD_MASK; - address &= ~PMD_MASK; - end = address + size; - if (end > PMD_SIZE) - end = PMD_SIZE; - - do { - pte_t page; - page = ptep_get_and_clear(&ioremap_mm, base + address, pte); - address += PAGE_SIZE; - pte++; - if (pte_none(page)) - continue; - if (pte_present(page)) - continue; - printk(KERN_CRIT "Whee.. Swapped out page in kernel page" - " table\n"); - } while (address < end); -} - -static void unmap_im_area_pmd(pgd_t *dir, unsigned long address, - unsigned long size) -{ - unsigned long base, end; - pmd_t *pmd; - - if (pgd_none(*dir)) - return; - if (pgd_bad(*dir)) { - pgd_ERROR(*dir); - pgd_clear(dir); - return; - } - - pmd = pmd_offset(dir, address); - base = address & PGDIR_MASK; - address &= ~PGDIR_MASK; - end = address + size; - if (end > PGDIR_SIZE) - end = PGDIR_SIZE; - - do { - unmap_im_area_pte(pmd, base + address, end - address); - address = (address + PMD_SIZE) & PMD_MASK; - pmd++; - } while (address < end); -} - /* * Unmap an IO region and remove it from imalloc'd list. * Access to IO memory should be serialized by driver. @@ -356,39 +380,19 @@ static void unmap_im_area_pmd(pgd_t *dir, unsigned long address, */ void iounmap(volatile void __iomem *token) { - unsigned long address, start, end, size; - struct mm_struct *mm; - pgd_t *dir; + unsigned long address, size; void *addr; - if (!mem_init_done) { + if (!mem_init_done) return; - } addr = (void *) ((unsigned long __force) token & PAGE_MASK); - if ((size = im_free(addr)) == 0) { + if ((size = im_free(addr)) == 0) return; - } address = (unsigned long)addr; - start = address; - end = address + size; - - mm = &ioremap_mm; - spin_lock(&mm->page_table_lock); - - dir = pgd_offset_i(address); - flush_cache_vunmap(address, end); - do { - unmap_im_area_pmd(dir, address, end - address); - address = (address + PGDIR_SIZE) & PGDIR_MASK; - dir++; - } while (address && (address < end)); - flush_tlb_kernel_range(start, end); - - spin_unlock(&mm->page_table_lock); - return; + unmap_im_area(address, address + size); } static int iounmap_subset_regions(unsigned long addr, unsigned long size) -- cgit v1.1 From 66faf9845a05905d75da380767e93455f3e6d620 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Sun, 1 May 2005 08:58:45 -0700 Subject: [PATCH] ppc64: tell firmware about kernel capabilities On pSeries systems, according to the platform architecture specs, we are supposed to be supplying a structure to firmware that tells firmware about our capabilities, such as which version of the data structures that describe available memory we are expecting to see. The way we end up having to supply this data structure is a bit gross, since it was designed for AIX and doesn't suit us very well. This patch adds the code to supply this data structure to the firmware. Signed-off-by: Paul Mackerras Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ppc64/boot/addnote.c | 60 +++++++++++++++++++---- arch/ppc64/kernel/prom_init.c | 107 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 157 insertions(+), 10 deletions(-) (limited to 'arch/ppc64') diff --git a/arch/ppc64/boot/addnote.c b/arch/ppc64/boot/addnote.c index 66ff810..719663a 100644 --- a/arch/ppc64/boot/addnote.c +++ b/arch/ppc64/boot/addnote.c @@ -19,6 +19,7 @@ #include #include +/* CHRP note section */ char arch[] = "PowerPC"; #define N_DESCR 6 @@ -31,6 +32,29 @@ unsigned int descr[N_DESCR] = { 0x4000, /* load-base */ }; +/* RPA note section */ +char rpaname[] = "IBM,RPA-Client-Config"; + +/* + * Note: setting ignore_my_client_config *should* mean that OF ignores + * all the other fields, but there is a firmware bug which means that + * it looks at the splpar field at least. So these values need to be + * reasonable. + */ +#define N_RPA_DESCR 8 +unsigned int rpanote[N_RPA_DESCR] = { + 0, /* lparaffinity */ + 64, /* min_rmo_size */ + 0, /* min_rmo_percent */ + 40, /* max_pft_size */ + 1, /* splpar */ + -1, /* min_load */ + 0, /* new_mem_def */ + 1, /* ignore_my_client_config */ +}; + +#define ROUNDUP(len) (((len) + 3) & ~3) + unsigned char buf[512]; #define GET_16BE(off) ((buf[off] << 8) + (buf[(off)+1])) @@ -69,7 +93,7 @@ main(int ac, char **av) { int fd, n, i; int ph, ps, np; - int nnote, ns; + int nnote, nnote2, ns; if (ac != 2) { fprintf(stderr, "Usage: %s elf-file\n", av[0]); @@ -81,7 +105,8 @@ main(int ac, char **av) exit(1); } - nnote = strlen(arch) + 1 + (N_DESCR + 3) * 4; + nnote = 12 + ROUNDUP(strlen(arch) + 1) + sizeof(descr); + nnote2 = 12 + ROUNDUP(strlen(rpaname) + 1) + sizeof(rpanote); n = read(fd, buf, sizeof(buf)); if (n < 0) { @@ -104,7 +129,7 @@ main(int ac, char **av) np = GET_16BE(E_PHNUM); if (ph < E_HSIZE || ps < PH_HSIZE || np < 1) goto notelf; - if (ph + (np + 1) * ps + nnote > n) + if (ph + (np + 2) * ps + nnote + nnote2 > n) goto nospace; for (i = 0; i < np; ++i) { @@ -117,12 +142,12 @@ main(int ac, char **av) } /* XXX check that the area we want to use is all zeroes */ - for (i = 0; i < ps + nnote; ++i) + for (i = 0; i < 2 * ps + nnote + nnote2; ++i) if (buf[ph + i] != 0) goto nospace; /* fill in the program header entry */ - ns = ph + ps; + ns = ph + 2 * ps; PUT_32BE(ph + PH_TYPE, PT_NOTE); PUT_32BE(ph + PH_OFFSET, ns); PUT_32BE(ph + PH_FILESZ, nnote); @@ -134,11 +159,26 @@ main(int ac, char **av) PUT_32BE(ns + 8, 0x1275); strcpy(&buf[ns + 12], arch); ns += 12 + strlen(arch) + 1; - for (i = 0; i < N_DESCR; ++i) - PUT_32BE(ns + i * 4, descr[i]); + for (i = 0; i < N_DESCR; ++i, ns += 4) + PUT_32BE(ns, descr[i]); + + /* fill in the second program header entry and the RPA note area */ + ph += ps; + PUT_32BE(ph + PH_TYPE, PT_NOTE); + PUT_32BE(ph + PH_OFFSET, ns); + PUT_32BE(ph + PH_FILESZ, nnote2); + + /* fill in the note area we point to */ + PUT_32BE(ns, strlen(rpaname) + 1); + PUT_32BE(ns + 4, sizeof(rpanote)); + PUT_32BE(ns + 8, 0x12759999); + strcpy(&buf[ns + 12], rpaname); + ns += 12 + ROUNDUP(strlen(rpaname) + 1); + for (i = 0; i < N_RPA_DESCR; ++i, ns += 4) + PUT_32BE(ns, rpanote[i]); /* Update the number of program headers */ - PUT_16BE(E_PHNUM, np + 1); + PUT_16BE(E_PHNUM, np + 2); /* write back */ lseek(fd, (long) 0, SEEK_SET); @@ -155,11 +195,11 @@ main(int ac, char **av) exit(0); notelf: - fprintf(stderr, "%s does not appear to be an ELF file\n", av[0]); + fprintf(stderr, "%s does not appear to be an ELF file\n", av[1]); exit(1); nospace: fprintf(stderr, "sorry, I can't find space in %s to put the note\n", - av[0]); + av[1]); exit(1); } diff --git a/arch/ppc64/kernel/prom_init.c b/arch/ppc64/kernel/prom_init.c index 8dffa9a..b0b784f 100644 --- a/arch/ppc64/kernel/prom_init.c +++ b/arch/ppc64/kernel/prom_init.c @@ -493,6 +493,113 @@ static void __init early_cmdline_parse(void) } /* + * To tell the firmware what our capabilities are, we have to pass + * it a fake 32-bit ELF header containing a couple of PT_NOTE sections + * that contain structures that contain the actual values. + */ +static struct fake_elf { + Elf32_Ehdr elfhdr; + Elf32_Phdr phdr[2]; + struct chrpnote { + u32 namesz; + u32 descsz; + u32 type; + char name[8]; /* "PowerPC" */ + struct chrpdesc { + u32 real_mode; + u32 real_base; + u32 real_size; + u32 virt_base; + u32 virt_size; + u32 load_base; + } chrpdesc; + } chrpnote; + struct rpanote { + u32 namesz; + u32 descsz; + u32 type; + char name[24]; /* "IBM,RPA-Client-Config" */ + struct rpadesc { + u32 lpar_affinity; + u32 min_rmo_size; + u32 min_rmo_percent; + u32 max_pft_size; + u32 splpar; + u32 min_load; + u32 new_mem_def; + u32 ignore_me; + } rpadesc; + } rpanote; +} fake_elf = { + .elfhdr = { + .e_ident = { 0x7f, 'E', 'L', 'F', + ELFCLASS32, ELFDATA2MSB, EV_CURRENT }, + .e_type = ET_EXEC, /* yeah right */ + .e_machine = EM_PPC, + .e_version = EV_CURRENT, + .e_phoff = offsetof(struct fake_elf, phdr), + .e_phentsize = sizeof(Elf32_Phdr), + .e_phnum = 2 + }, + .phdr = { + [0] = { + .p_type = PT_NOTE, + .p_offset = offsetof(struct fake_elf, chrpnote), + .p_filesz = sizeof(struct chrpnote) + }, [1] = { + .p_type = PT_NOTE, + .p_offset = offsetof(struct fake_elf, rpanote), + .p_filesz = sizeof(struct rpanote) + } + }, + .chrpnote = { + .namesz = sizeof("PowerPC"), + .descsz = sizeof(struct chrpdesc), + .type = 0x1275, + .name = "PowerPC", + .chrpdesc = { + .real_mode = ~0U, /* ~0 means "don't care" */ + .real_base = ~0U, + .real_size = ~0U, + .virt_base = ~0U, + .virt_size = ~0U, + .load_base = ~0U + }, + }, + .rpanote = { + .namesz = sizeof("IBM,RPA-Client-Config"), + .descsz = sizeof(struct rpadesc), + .type = 0x12759999, + .name = "IBM,RPA-Client-Config", + .rpadesc = { + .lpar_affinity = 0, + .min_rmo_size = 64, /* in megabytes */ + .min_rmo_percent = 0, + .max_pft_size = 48, /* 2^48 bytes max PFT size */ + .splpar = 1, + .min_load = ~0U, + .new_mem_def = 0 + } + } +}; + +static void __init prom_send_capabilities(void) +{ + unsigned long offset = reloc_offset(); + ihandle elfloader; + int ret; + + elfloader = call_prom("open", 1, 1, ADDR("/packages/elf-loader")); + if (elfloader == 0) { + prom_printf("couldn't open /packages/elf-loader\n"); + return; + } + ret = call_prom("call-method", 3, 1, ADDR("process-elf-header"), + elfloader, ADDR(&fake_elf)); + call_prom("close", 1, 0, elfloader); +} + +/* * Memory allocation strategy... our layout is normally: * * at 14Mb or more we vmlinux, then a gap and initrd. In some rare cases, initrd -- cgit v1.1 From d03853d566fb32c6bb8cab4bf2ecf53e692f001c Mon Sep 17 00:00:00 2001 From: Olof Johansson Date: Sun, 1 May 2005 08:58:45 -0700 Subject: [PATCH] PPC64: Remove hot busy-wait loop in __hash_page It turns out that our current __hash_page code will do a very hot busy-wait loop waiting on _PAGE_BUSY to be cleared. It even does ldarx/stdcx in the loop, which will bounce reservations around like crazy if there's more than one CPU spinning on the same PTE (or even another PTE in the same reservation granule). The end result is that each fault takes longer when there's contention, which in turn increases the chance of another thread hitting the same fault and also piling up. Not pretty. There's two options here: 1. Do an out-of-line busy loop a'la spinlocks with just loads (no reserves) 2. Just bail and refault if needed. (2) makes sense here: If the PTE is busy, chances are it's in flux anyway and the other code path making a change might just be ready to hash it. This fixes a stampede seen on a large-ish system where a multithreaded HPC app faults in the same text pages on several cpus at the same time. Signed-off-by: Olof Johansson Cc: Paul Mackerras Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ppc64/mm/hash_low.S | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'arch/ppc64') diff --git a/arch/ppc64/mm/hash_low.S b/arch/ppc64/mm/hash_low.S index 8c0156a..c23d469 100644 --- a/arch/ppc64/mm/hash_low.S +++ b/arch/ppc64/mm/hash_low.S @@ -85,7 +85,10 @@ _GLOBAL(__hash_page) bne- htab_wrong_access /* Check if PTE is busy */ andi. r0,r31,_PAGE_BUSY - bne- 1b + /* If so, just bail out and refault if needed. Someone else + * is changing this PTE anyway and might hash it. + */ + bne- bail_ok /* Prepare new PTE value (turn access RW into DIRTY, then * add BUSY,HASHPTE and ACCESSED) */ @@ -215,6 +218,10 @@ _GLOBAL(htab_call_hpte_remove) /* Try all again */ b htab_insert_pte +bail_ok: + li r3,0 + b bail + htab_pte_insert_ok: /* Insert slot number & secondary bit in PTE */ rldimi r30,r3,12,63-15 -- cgit v1.1 From c4005e4f661a9ec8d91720a6b570865d060afcaf Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Sun, 1 May 2005 08:58:46 -0700 Subject: [PATCH] ppc64: firmware workaround Recent gcc 4.0 testing uncovered a firmware issue. Some properties are larger than 31 bytes and due to gcc 4.0s better stack allocation this overflow ran over non volatile register storage. Signed-off-by: Anton Blanchard Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ppc64/kernel/prom_init.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'arch/ppc64') diff --git a/arch/ppc64/kernel/prom_init.c b/arch/ppc64/kernel/prom_init.c index b0b784f..35ec42d 100644 --- a/arch/ppc64/kernel/prom_init.c +++ b/arch/ppc64/kernel/prom_init.c @@ -1555,6 +1555,12 @@ static void __init scan_dt_build_strings(phandle node, unsigned long *mem_start, } } +/* + * The Open Firmware 1275 specification states properties must be 31 bytes or + * less, however not all firmwares obey this. Make it 64 bytes to be safe. + */ +#define MAX_PROPERTY_NAME 64 + static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start, unsigned long *mem_end) { @@ -1564,7 +1570,7 @@ static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start, unsigned long soff; unsigned char *valp; unsigned long offset = reloc_offset(); - char pname[32]; + char pname[MAX_PROPERTY_NAME]; char *path; path = RELOC(prom_scratch); -- cgit v1.1 From eeb24de431ac8c80fd13a2c479cd0eb51b70484e Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Sun, 1 May 2005 08:58:46 -0700 Subject: [PATCH] ppc64: enforce medium thread priority in hypervisor calls Calls into the hypervisor do not raise the thread priority. Ensure we are running at medium priority upon entry to the hypervisor. Signed-off-by: Anton Blanchard Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ppc64/kernel/pSeries_hvCall.S | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'arch/ppc64') diff --git a/arch/ppc64/kernel/pSeries_hvCall.S b/arch/ppc64/kernel/pSeries_hvCall.S index 0715d30..176e8da 100644 --- a/arch/ppc64/kernel/pSeries_hvCall.S +++ b/arch/ppc64/kernel/pSeries_hvCall.S @@ -28,6 +28,8 @@ unsigned long *out3); R10 */ _GLOBAL(plpar_hcall) + HMT_MEDIUM + mfcr r0 std r8,STK_PARM(r8)(r1) /* Save out ptrs */ @@ -53,6 +55,8 @@ _GLOBAL(plpar_hcall) /* Simple interface with no output values (other than status) */ _GLOBAL(plpar_hcall_norets) + HMT_MEDIUM + mfcr r0 stw r0,8(r1) @@ -75,6 +79,8 @@ _GLOBAL(plpar_hcall_norets) unsigned long *out1); 120(R1) */ _GLOBAL(plpar_hcall_8arg_2ret) + HMT_MEDIUM + mfcr r0 ld r11,STK_PARM(r11)(r1) /* put arg8 in R11 */ stw r0,8(r1) @@ -99,6 +105,8 @@ _GLOBAL(plpar_hcall_8arg_2ret) unsigned long *out4); 112(R1) */ _GLOBAL(plpar_hcall_4out) + HMT_MEDIUM + mfcr r0 stw r0,8(r1) -- cgit v1.1 From 0d8d4d42f2d00eb65262b49f4edd4cf7ef4eb6fc Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Sun, 1 May 2005 08:58:47 -0700 Subject: [PATCH] ppc64: use smp_mb and smp_wmb Use smp_mb and smp_wmb. In particular smp_wmb is lighter weight than wmb. Signed-off-by: Anton Blanchard Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ppc64/kernel/smp.c | 12 ++++++------ arch/ppc64/kernel/time.c | 12 ++++++------ 2 files changed, 12 insertions(+), 12 deletions(-) (limited to 'arch/ppc64') diff --git a/arch/ppc64/kernel/smp.c b/arch/ppc64/kernel/smp.c index 1c92da3..3b906cd 100644 --- a/arch/ppc64/kernel/smp.c +++ b/arch/ppc64/kernel/smp.c @@ -125,7 +125,7 @@ void __devinit smp_generic_kick_cpu(int nr) * the processor will continue on to secondary_start */ paca[nr].cpu_start = 1; - mb(); + smp_mb(); } #endif /* CONFIG_PPC_MULTIPLATFORM */ @@ -256,7 +256,7 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic, } call_data = &data; - wmb(); + smp_wmb(); /* Send a message to all other CPUs and wait for them to respond */ smp_ops->message_pass(MSG_ALL_BUT_SELF, PPC_MSG_CALL_FUNCTION); @@ -431,7 +431,7 @@ int generic_cpu_enable(unsigned int cpu) /* get the target out of it's holding state */ per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; - wmb(); + smp_wmb(); while (!cpu_online(cpu)) cpu_relax(); @@ -447,7 +447,7 @@ void generic_cpu_die(unsigned int cpu) int i; for (i = 0; i < 100; i++) { - rmb(); + smp_rmb(); if (per_cpu(cpu_state, cpu) == CPU_DEAD) return; msleep(100); @@ -463,7 +463,7 @@ void generic_mach_cpu_die(void) cpu = smp_processor_id(); printk(KERN_DEBUG "CPU%d offline\n", cpu); __get_cpu_var(cpu_state) = CPU_DEAD; - wmb(); + smp_wmb(); while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE) cpu_relax(); @@ -515,7 +515,7 @@ int __devinit __cpu_up(unsigned int cpu) * be written out to main store before we release * the processor. */ - mb(); + smp_mb(); /* wake up cpus */ DBG("smp: kicking cpu %d\n", cpu); diff --git a/arch/ppc64/kernel/time.c b/arch/ppc64/kernel/time.c index 77ded5a..772a465 100644 --- a/arch/ppc64/kernel/time.c +++ b/arch/ppc64/kernel/time.c @@ -221,15 +221,15 @@ static __inline__ void timer_recalc_offset(unsigned long cur_tb) temp_varp->tb_to_xs = do_gtod.varp->tb_to_xs; temp_varp->tb_orig_stamp = new_tb_orig_stamp; temp_varp->stamp_xsec = new_stamp_xsec; - mb(); + smp_mb(); do_gtod.varp = temp_varp; do_gtod.var_idx = temp_idx; ++(systemcfg->tb_update_count); - wmb(); + smp_wmb(); systemcfg->tb_orig_stamp = new_tb_orig_stamp; systemcfg->stamp_xsec = new_stamp_xsec; - wmb(); + smp_wmb(); ++(systemcfg->tb_update_count); } @@ -648,7 +648,7 @@ void ppc_adjtimex(void) temp_varp->tb_to_xs = new_tb_to_xs; temp_varp->stamp_xsec = new_stamp_xsec; temp_varp->tb_orig_stamp = do_gtod.varp->tb_orig_stamp; - mb(); + smp_mb(); do_gtod.varp = temp_varp; do_gtod.var_idx = temp_idx; @@ -662,10 +662,10 @@ void ppc_adjtimex(void) * loops back and reads them again until this criteria is met. */ ++(systemcfg->tb_update_count); - wmb(); + smp_wmb(); systemcfg->tb_to_xs = new_tb_to_xs; systemcfg->stamp_xsec = new_stamp_xsec; - wmb(); + smp_wmb(); ++(systemcfg->tb_update_count); write_sequnlock_irqrestore( &xtime_lock, flags ); -- cgit v1.1 From fbd568a3e61a7decb8a754ad952aaa5b5c82e9e5 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Sun, 1 May 2005 08:59:04 -0700 Subject: [PATCH] Change synchronize_kernel to _rcu and _sched This patch changes calls to synchronize_kernel(), deprecated in the earlier "Deprecate synchronize_kernel, GPL replacement" patch to instead call the new synchronize_rcu() and synchronize_sched() APIs. Signed-off-by: Paul E. McKenney Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ppc64/kernel/HvLpEvent.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/ppc64') diff --git a/arch/ppc64/kernel/HvLpEvent.c b/arch/ppc64/kernel/HvLpEvent.c index 9802bee..f8f1963 100644 --- a/arch/ppc64/kernel/HvLpEvent.c +++ b/arch/ppc64/kernel/HvLpEvent.c @@ -45,7 +45,7 @@ int HvLpEvent_unregisterHandler( HvLpEvent_Type eventType ) /* We now sleep until all other CPUs have scheduled. This ensures that * the deletion is seen by all other CPUs, and that the deleted handler * isn't still running on another CPU when we return. */ - synchronize_kernel(); + synchronize_rcu(); } } return rc; -- cgit v1.1 From 7ed20e1ad521b5f5df61bf6559ae60738e393741 Mon Sep 17 00:00:00 2001 From: Jesper Juhl Date: Sun, 1 May 2005 08:59:14 -0700 Subject: [PATCH] convert that currently tests _NSIG directly to use valid_signal() Convert most of the current code that uses _NSIG directly to instead use valid_signal(). This avoids gcc -W warnings and off-by-one errors. Signed-off-by: Jesper Juhl Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ppc64/kernel/ptrace.c | 5 +++-- arch/ppc64/kernel/ptrace32.c | 5 +++-- 2 files changed, 6 insertions(+), 4 deletions(-) (limited to 'arch/ppc64') diff --git a/arch/ppc64/kernel/ptrace.c b/arch/ppc64/kernel/ptrace.c index 354a287..5a84632 100644 --- a/arch/ppc64/kernel/ptrace.c +++ b/arch/ppc64/kernel/ptrace.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include @@ -162,7 +163,7 @@ int sys_ptrace(long request, long pid, long addr, long data) case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */ case PTRACE_CONT: { /* restart after signal. */ ret = -EIO; - if ((unsigned long) data > _NSIG) + if (!valid_signal(data)) break; if (request == PTRACE_SYSCALL) set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); @@ -194,7 +195,7 @@ int sys_ptrace(long request, long pid, long addr, long data) case PTRACE_SINGLESTEP: { /* set the trap flag. */ ret = -EIO; - if ((unsigned long) data > _NSIG) + if (!valid_signal(data)) break; clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); set_single_step(child); diff --git a/arch/ppc64/kernel/ptrace32.c b/arch/ppc64/kernel/ptrace32.c index ee81b1b..1643642 100644 --- a/arch/ppc64/kernel/ptrace32.c +++ b/arch/ppc64/kernel/ptrace32.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include @@ -293,7 +294,7 @@ int sys32_ptrace(long request, long pid, unsigned long addr, unsigned long data) case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */ case PTRACE_CONT: { /* restart after signal. */ ret = -EIO; - if ((unsigned long) data > _NSIG) + if (!valid_signal(data)) break; if (request == PTRACE_SYSCALL) set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); @@ -325,7 +326,7 @@ int sys32_ptrace(long request, long pid, unsigned long addr, unsigned long data) case PTRACE_SINGLESTEP: { /* set the trap flag. */ ret = -EIO; - if ((unsigned long) data > _NSIG) + if (!valid_signal(data)) break; clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); set_single_step(child); -- cgit v1.1