diff options
Diffstat (limited to 'arch')
54 files changed, 103 insertions, 105 deletions
diff --git a/arch/frv/kernel/break.S b/arch/frv/kernel/break.S index bd0bdf9..cbb6958 100644 --- a/arch/frv/kernel/break.S +++ b/arch/frv/kernel/break.S @@ -21,7 +21,7 @@ # # the break handler has its own stack # - .section .bss.stack + .section .bss..stack .globl __break_user_context .balign THREAD_SIZE __break_stack: @@ -63,7 +63,7 @@ __break_trace_through_exceptions: # entry point for Break Exceptions/Interrupts # ############################################################################### - .section .text.break + .section .text..break .balign 4 .globl __entry_break __entry_break: diff --git a/arch/frv/kernel/entry.S b/arch/frv/kernel/entry.S index 189397e..63d579b 100644 --- a/arch/frv/kernel/entry.S +++ b/arch/frv/kernel/entry.S @@ -38,7 +38,7 @@ #define nr_syscalls ((syscall_table_size)/4) - .section .text.entry + .section .text..entry .balign 4 .macro LEDS val diff --git a/arch/frv/kernel/head.S b/arch/frv/kernel/head.S index b825ef3..e9a8cc6 100644 --- a/arch/frv/kernel/head.S +++ b/arch/frv/kernel/head.S @@ -542,7 +542,7 @@ __head_end: .size _boot, .-_boot # provide a point for GDB to place a break - .section .text.start,"ax" + .section .text..start,"ax" .globl _start .balign 4 _start: diff --git a/arch/frv/kernel/vmlinux.lds.S b/arch/frv/kernel/vmlinux.lds.S index cbe811f..8b973f3 100644 --- a/arch/frv/kernel/vmlinux.lds.S +++ b/arch/frv/kernel/vmlinux.lds.S @@ -57,10 +57,10 @@ SECTIONS _text = .; _stext = .; .text : { - *(.text.start) - *(.text.entry) - *(.text.break) - *(.text.tlbmiss) + *(.text..start) + *(.text..entry) + *(.text..break) + *(.text..tlbmiss) TEXT_TEXT SCHED_TEXT LOCK_TEXT @@ -114,7 +114,7 @@ SECTIONS .sbss : { *(.sbss .sbss.*) } .bss : { *(.bss .bss.*) } - .bss.stack : { *(.bss) } + .bss..stack : { *(.bss) } __bss_stop = .; _end = . ; diff --git a/arch/frv/mm/tlb-miss.S b/arch/frv/mm/tlb-miss.S index 7f392bc..f3ac019 100644 --- a/arch/frv/mm/tlb-miss.S +++ b/arch/frv/mm/tlb-miss.S @@ -15,7 +15,7 @@ #include <asm/pgtable.h> #include <asm/spr-regs.h> - .section .text.tlbmiss + .section .text..tlbmiss .balign 4 .globl __entry_insn_mmu_miss diff --git a/arch/h8300/boot/compressed/head.S b/arch/h8300/boot/compressed/head.S index 985a81a..10e9a2d 100644 --- a/arch/h8300/boot/compressed/head.S +++ b/arch/h8300/boot/compressed/head.S @@ -9,7 +9,7 @@ #define SRAM_START 0xff4000 - .section .text.startup + .section .text..startup .global startup startup: mov.l #SRAM_START+0x8000, sp diff --git a/arch/h8300/boot/compressed/vmlinux.lds b/arch/h8300/boot/compressed/vmlinux.lds index 65e2a0d..a0a3a0e 100644 --- a/arch/h8300/boot/compressed/vmlinux.lds +++ b/arch/h8300/boot/compressed/vmlinux.lds @@ -4,7 +4,7 @@ SECTIONS { __stext = . ; __text = .; - *(.text.startup) + *(.text..startup) *(.text) __etext = . ; } diff --git a/arch/ia64/include/asm/asmmacro.h b/arch/ia64/include/asm/asmmacro.h index c1642fd..3ab6d75 100644 --- a/arch/ia64/include/asm/asmmacro.h +++ b/arch/ia64/include/asm/asmmacro.h @@ -70,12 +70,12 @@ name: * path (ivt.S - TLB miss processing) or in places where it might not be * safe to use a "tpa" instruction (mca_asm.S - error recovery). */ - .section ".data.patch.vtop", "a" // declare section & section attributes + .section ".data..patch.vtop", "a" // declare section & section attributes .previous #define LOAD_PHYSICAL(pr, reg, obj) \ [1:](pr)movl reg = obj; \ - .xdata4 ".data.patch.vtop", 1b-. + .xdata4 ".data..patch.vtop", 1b-. /* * For now, we always put in the McKinley E9 workaround. On CPUs that don't need it, @@ -84,11 +84,11 @@ name: #define DO_MCKINLEY_E9_WORKAROUND #ifdef DO_MCKINLEY_E9_WORKAROUND - .section ".data.patch.mckinley_e9", "a" + .section ".data..patch.mckinley_e9", "a" .previous /* workaround for Itanium 2 Errata 9: */ # define FSYS_RETURN \ - .xdata4 ".data.patch.mckinley_e9", 1f-.; \ + .xdata4 ".data..patch.mckinley_e9", 1f-.; \ 1:{ .mib; \ nop.m 0; \ mov r16=ar.pfs; \ @@ -107,11 +107,11 @@ name: * If physical stack register size is different from DEF_NUM_STACK_REG, * dynamically patch the kernel for correct size. */ - .section ".data.patch.phys_stack_reg", "a" + .section ".data..patch.phys_stack_reg", "a" .previous #define LOAD_PHYS_STACK_REG_SIZE(reg) \ [1:] adds reg=IA64_NUM_PHYS_STACK_REG*8+8,r0; \ - .xdata4 ".data.patch.phys_stack_reg", 1b-. + .xdata4 ".data..patch.phys_stack_reg", 1b-. /* * Up until early 2004, use of .align within a function caused bad unwind info. diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h index e7482bd..988254a 100644 --- a/arch/ia64/include/asm/cache.h +++ b/arch/ia64/include/asm/cache.h @@ -24,6 +24,6 @@ # define SMP_CACHE_BYTES (1 << 3) #endif -#define __read_mostly __attribute__((__section__(".data.read_mostly"))) +#define __read_mostly __attribute__((__section__(".data..read_mostly"))) #endif /* _ASM_IA64_CACHE_H */ diff --git a/arch/ia64/include/asm/percpu.h b/arch/ia64/include/asm/percpu.h index 1bd4082..14aa1c5 100644 --- a/arch/ia64/include/asm/percpu.h +++ b/arch/ia64/include/asm/percpu.h @@ -31,7 +31,7 @@ extern void *per_cpu_init(void); #endif /* SMP */ -#define PER_CPU_BASE_SECTION ".data.percpu" +#define PER_CPU_BASE_SECTION ".data..percpu" /* * Be extremely careful when taking the address of this variable! Due to virtual diff --git a/arch/ia64/kernel/Makefile.gate b/arch/ia64/kernel/Makefile.gate index ab9b03a..ceeffc5 100644 --- a/arch/ia64/kernel/Makefile.gate +++ b/arch/ia64/kernel/Makefile.gate @@ -21,7 +21,7 @@ GATECFLAGS_gate-syms.o = -r $(obj)/gate-syms.o: $(obj)/gate.lds $(obj)/gate.o FORCE $(call if_changed,gate) -# gate-data.o contains the gate DSO image as data in section .data.gate. +# gate-data.o contains the gate DSO image as data in section .data..gate. # We must build gate.so before we can assemble it. # Note: kbuild does not track this dependency due to usage of .incbin $(obj)/gate-data.o: $(obj)/gate.so diff --git a/arch/ia64/kernel/gate-data.S b/arch/ia64/kernel/gate-data.S index 258c0a3..b3ef1c7 100644 --- a/arch/ia64/kernel/gate-data.S +++ b/arch/ia64/kernel/gate-data.S @@ -1,3 +1,3 @@ - .section .data.gate, "aw" + .section .data..gate, "aw" .incbin "arch/ia64/kernel/gate.so" diff --git a/arch/ia64/kernel/gate.S b/arch/ia64/kernel/gate.S index cf5e0a1..245d3e1 100644 --- a/arch/ia64/kernel/gate.S +++ b/arch/ia64/kernel/gate.S @@ -21,18 +21,18 @@ * to targets outside the shared object) and to avoid multi-phase kernel builds, we * simply create minimalistic "patch lists" in special ELF sections. */ - .section ".data.patch.fsyscall_table", "a" + .section ".data..patch.fsyscall_table", "a" .previous #define LOAD_FSYSCALL_TABLE(reg) \ [1:] movl reg=0; \ - .xdata4 ".data.patch.fsyscall_table", 1b-. + .xdata4 ".data..patch.fsyscall_table", 1b-. - .section ".data.patch.brl_fsys_bubble_down", "a" + .section ".data..patch.brl_fsys_bubble_down", "a" .previous #define BRL_COND_FSYS_BUBBLE_DOWN(pr) \ [1:](pr)brl.cond.sptk 0; \ ;; \ - .xdata4 ".data.patch.brl_fsys_bubble_down", 1b-. + .xdata4 ".data..patch.brl_fsys_bubble_down", 1b-. GLOBAL_ENTRY(__kernel_syscall_via_break) .prologue diff --git a/arch/ia64/kernel/gate.lds.S b/arch/ia64/kernel/gate.lds.S index 88c64ed..d32b085 100644 --- a/arch/ia64/kernel/gate.lds.S +++ b/arch/ia64/kernel/gate.lds.S @@ -33,21 +33,21 @@ SECTIONS */ . = GATE_ADDR + 0x600; - .data.patch : { + .data..patch : { __paravirt_start_gate_mckinley_e9_patchlist = .; - *(.data.patch.mckinley_e9) + *(.data..patch.mckinley_e9) __paravirt_end_gate_mckinley_e9_patchlist = .; __paravirt_start_gate_vtop_patchlist = .; - *(.data.patch.vtop) + *(.data..patch.vtop) __paravirt_end_gate_vtop_patchlist = .; __paravirt_start_gate_fsyscall_patchlist = .; - *(.data.patch.fsyscall_table) + *(.data..patch.fsyscall_table) __paravirt_end_gate_fsyscall_patchlist = .; __paravirt_start_gate_brl_fsys_bubble_down_patchlist = .; - *(.data.patch.brl_fsys_bubble_down) + *(.data..patch.brl_fsys_bubble_down) __paravirt_end_gate_brl_fsys_bubble_down_patchlist = .; } :readable diff --git a/arch/ia64/kernel/init_task.c b/arch/ia64/kernel/init_task.c index e253ab8..f9efe97 100644 --- a/arch/ia64/kernel/init_task.c +++ b/arch/ia64/kernel/init_task.c @@ -23,7 +23,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); * Initial task structure. * * We need to make sure that this is properly aligned due to the way process stacks are - * handled. This is done by having a special ".data.init_task" section... + * handled. This is done by having a special ".data..init_task" section... */ #define init_thread_info init_task_mem.s.thread_info diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S index 179fd12..d93e396 100644 --- a/arch/ia64/kernel/ivt.S +++ b/arch/ia64/kernel/ivt.S @@ -82,7 +82,7 @@ mov r19=n;; /* prepare to save predicates */ \ br.sptk.many dispatch_to_fault_handler - .section .text.ivt,"ax" + .section .text..ivt,"ax" .align 32768 // align on 32KB boundary .global ia64_ivt diff --git a/arch/ia64/kernel/minstate.h b/arch/ia64/kernel/minstate.h index 292e214..d56753a1 100644 --- a/arch/ia64/kernel/minstate.h +++ b/arch/ia64/kernel/minstate.h @@ -16,7 +16,7 @@ #define ACCOUNT_SYS_ENTER #endif -.section ".data.patch.rse", "a" +.section ".data..patch.rse", "a" .previous /* @@ -215,7 +215,7 @@ (pUStk) extr.u r17=r18,3,6; \ (pUStk) sub r16=r18,r22; \ [1:](pKStk) br.cond.sptk.many 1f; \ - .xdata4 ".data.patch.rse",1b-. \ + .xdata4 ".data..patch.rse",1b-. \ ;; \ cmp.ge p6,p7 = 33,r17; \ ;; \ diff --git a/arch/ia64/kernel/paravirtentry.S b/arch/ia64/kernel/paravirtentry.S index 6158560..92d880c 100644 --- a/arch/ia64/kernel/paravirtentry.S +++ b/arch/ia64/kernel/paravirtentry.S @@ -28,7 +28,7 @@ #include "entry.h" #define DATA8(sym, init_value) \ - .pushsection .data.read_mostly ; \ + .pushsection .data..read_mostly ; \ .align 8 ; \ .global sym ; \ sym: ; \ diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S index 1295ba3..e07218a 100644 --- a/arch/ia64/kernel/vmlinux.lds.S +++ b/arch/ia64/kernel/vmlinux.lds.S @@ -8,7 +8,7 @@ #define IVT_TEXT \ VMLINUX_SYMBOL(__start_ivt_text) = .; \ - *(.text.ivt) \ + *(.text..ivt) \ VMLINUX_SYMBOL(__end_ivt_text) = .; OUTPUT_FORMAT("elf64-ia64-little") @@ -54,8 +54,8 @@ SECTIONS .text2 : AT(ADDR(.text2) - LOAD_OFFSET) { *(.text2) } #ifdef CONFIG_SMP - .text.lock : AT(ADDR(.text.lock) - LOAD_OFFSET) - { *(.text.lock) } + .text..lock : AT(ADDR(.text..lock) - LOAD_OFFSET) + { *(.text..lock) } #endif _etext = .; @@ -75,10 +75,10 @@ SECTIONS __stop___mca_table = .; } - .data.patch.phys_stack_reg : AT(ADDR(.data.patch.phys_stack_reg) - LOAD_OFFSET) + .data..patch.phys_stack_reg : AT(ADDR(.data..patch.phys_stack_reg) - LOAD_OFFSET) { __start___phys_stack_reg_patchlist = .; - *(.data.patch.phys_stack_reg) + *(.data..patch.phys_stack_reg) __end___phys_stack_reg_patchlist = .; } @@ -110,24 +110,24 @@ SECTIONS INIT_TEXT_SECTION(PAGE_SIZE) INIT_DATA_SECTION(16) - .data.patch.vtop : AT(ADDR(.data.patch.vtop) - LOAD_OFFSET) + .data..patch.vtop : AT(ADDR(.data..patch.vtop) - LOAD_OFFSET) { __start___vtop_patchlist = .; - *(.data.patch.vtop) + *(.data..patch.vtop) __end___vtop_patchlist = .; } - .data.patch.rse : AT(ADDR(.data.patch.rse) - LOAD_OFFSET) + .data..patch.rse : AT(ADDR(.data..patch.rse) - LOAD_OFFSET) { __start___rse_patchlist = .; - *(.data.patch.rse) + *(.data..patch.rse) __end___rse_patchlist = .; } - .data.patch.mckinley_e9 : AT(ADDR(.data.patch.mckinley_e9) - LOAD_OFFSET) + .data..patch.mckinley_e9 : AT(ADDR(.data..patch.mckinley_e9) - LOAD_OFFSET) { __start___mckinley_e9_bundles = .; - *(.data.patch.mckinley_e9) + *(.data..patch.mckinley_e9) __end___mckinley_e9_bundles = .; } @@ -175,17 +175,17 @@ SECTIONS . = ALIGN(PAGE_SIZE); __init_end = .; - .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) + .data..page_aligned : AT(ADDR(.data..page_aligned) - LOAD_OFFSET) { PAGE_ALIGNED_DATA(PAGE_SIZE) . = ALIGN(PAGE_SIZE); __start_gate_section = .; - *(.data.gate) + *(.data..gate) __stop_gate_section = .; #ifdef CONFIG_XEN . = ALIGN(PAGE_SIZE); __xen_start_gate_section = .; - *(.data.gate.xen) + *(.data..gate.xen) __xen_stop_gate_section = .; #endif } diff --git a/arch/ia64/kvm/vmm_ivt.S b/arch/ia64/kvm/vmm_ivt.S index 40920c6..2401848 100644 --- a/arch/ia64/kvm/vmm_ivt.S +++ b/arch/ia64/kvm/vmm_ivt.S @@ -104,7 +104,7 @@ GLOBAL_ENTRY(kvm_vmm_panic) br.call.sptk.many b6=vmm_panic_handler; END(kvm_vmm_panic) - .section .text.ivt,"ax" + .section .text..ivt,"ax" .align 32768 // align on 32KB boundary .global kvm_ia64_ivt diff --git a/arch/ia64/scripts/unwcheck.py b/arch/ia64/scripts/unwcheck.py index c278498..2bfd941 100644 --- a/arch/ia64/scripts/unwcheck.py +++ b/arch/ia64/scripts/unwcheck.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/python # # Usage: unwcheck.py FILE # diff --git a/arch/ia64/xen/gate-data.S b/arch/ia64/xen/gate-data.S index 7d4830a..6f95b6b 100644 --- a/arch/ia64/xen/gate-data.S +++ b/arch/ia64/xen/gate-data.S @@ -1,3 +1,3 @@ - .section .data.gate.xen, "aw" + .section .data..gate.xen, "aw" .incbin "arch/ia64/xen/gate.so" diff --git a/arch/ia64/xen/xensetup.S b/arch/ia64/xen/xensetup.S index aff8346..b820ed0 100644 --- a/arch/ia64/xen/xensetup.S +++ b/arch/ia64/xen/xensetup.S @@ -14,7 +14,7 @@ #include <linux/init.h> #include <xen/interface/elfnote.h> - .section .data.read_mostly + .section .data..read_mostly .align 8 .global xen_domain_type xen_domain_type: diff --git a/arch/m68knommu/kernel/vmlinux.lds.S b/arch/m68knommu/kernel/vmlinux.lds.S index 9f1784f..a91b271 100644 --- a/arch/m68knommu/kernel/vmlinux.lds.S +++ b/arch/m68knommu/kernel/vmlinux.lds.S @@ -57,7 +57,7 @@ SECTIONS { .romvec : { __rom_start = . ; _romvec = .; - *(.data.initvect) + *(.data..initvect) } > romvec #endif @@ -68,7 +68,7 @@ SECTIONS { TEXT_TEXT SCHED_TEXT LOCK_TEXT - *(.text.lock) + *(.text..lock) . = ALIGN(16); /* Exception table */ __start___ex_table = .; diff --git a/arch/m68knommu/platform/68360/head-ram.S b/arch/m68knommu/platform/68360/head-ram.S index 2ef0624..8eb94fb 100644 --- a/arch/m68knommu/platform/68360/head-ram.S +++ b/arch/m68knommu/platform/68360/head-ram.S @@ -280,7 +280,7 @@ _dprbase: * and then overwritten as needed. */ -.section ".data.initvect","awx" +.section ".data..initvect","awx" .long RAMEND /* Reset: Initial Stack Pointer - 0. */ .long _start /* Reset: Initial Program Counter - 1. */ .long buserr /* Bus Error - 2. */ diff --git a/arch/m68knommu/platform/68360/head-rom.S b/arch/m68knommu/platform/68360/head-rom.S index 62ecf41..97510e5 100644 --- a/arch/m68knommu/platform/68360/head-rom.S +++ b/arch/m68knommu/platform/68360/head-rom.S @@ -291,7 +291,7 @@ _dprbase: * and then overwritten as needed. */ -.section ".data.initvect","awx" +.section ".data..initvect","awx" .long RAMEND /* Reset: Initial Stack Pointer - 0. */ .long _start /* Reset: Initial Program Counter - 1. */ .long buserr /* Bus Error - 2. */ diff --git a/arch/mips/lasat/image/head.S b/arch/mips/lasat/image/head.S index efb95f2..e0ecda9 100644 --- a/arch/mips/lasat/image/head.S +++ b/arch/mips/lasat/image/head.S @@ -1,7 +1,7 @@ #include <asm/lasat/head.h> .text - .section .text.start, "ax" + .section .text..start, "ax" .set noreorder .set mips3 diff --git a/arch/mips/lasat/image/romscript.normal b/arch/mips/lasat/image/romscript.normal index 988f8ad..0864c96 100644 --- a/arch/mips/lasat/image/romscript.normal +++ b/arch/mips/lasat/image/romscript.normal @@ -4,7 +4,7 @@ SECTIONS { .text : { - *(.text.start) + *(.text..start) } /* Data in ROM */ diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h index 32c2cca..45effe6 100644 --- a/arch/parisc/include/asm/cache.h +++ b/arch/parisc/include/asm/cache.h @@ -28,7 +28,7 @@ #define SMP_CACHE_BYTES L1_CACHE_BYTES -#define __read_mostly __attribute__((__section__(".data.read_mostly"))) +#define __read_mostly __attribute__((__section__(".data..read_mostly"))) void parisc_cache_init(void); /* initializes cache-flushing */ void disable_sr_hashing_asm(int); /* low level support for above */ diff --git a/arch/parisc/include/asm/system.h b/arch/parisc/include/asm/system.h index 4653c77..2ab4af5 100644 --- a/arch/parisc/include/asm/system.h +++ b/arch/parisc/include/asm/system.h @@ -174,7 +174,7 @@ static inline void set_eiem(unsigned long val) }) #ifdef CONFIG_SMP -# define __lock_aligned __attribute__((__section__(".data.lock_aligned"))) +# define __lock_aligned __attribute__((__section__(".data..lock_aligned"))) #endif #define arch_align_stack(x) (x) diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S index 0e3d9f9..4dbdf0e 100644 --- a/arch/parisc/kernel/head.S +++ b/arch/parisc/kernel/head.S @@ -345,7 +345,7 @@ smp_slave_stext: ENDPROC(stext) #ifndef CONFIG_64BIT - .section .data.read_mostly + .section .data..read_mostly .align 4 .export $global$,data diff --git a/arch/parisc/kernel/init_task.c b/arch/parisc/kernel/init_task.c index d020eae..4a91e4334 100644 --- a/arch/parisc/kernel/init_task.c +++ b/arch/parisc/kernel/init_task.c @@ -53,11 +53,11 @@ union thread_union init_thread_union __init_task_data * guarantee that global objects will be laid out in memory in the same order * as the order of declaration, so put these in different sections and use * the linker script to order them. */ -pmd_t pmd0[PTRS_PER_PMD] __attribute__ ((__section__ (".data.vm0.pmd"), aligned(PAGE_SIZE))); +pmd_t pmd0[PTRS_PER_PMD] __attribute__ ((__section__ (".data..vm0.pmd"), aligned(PAGE_SIZE))); #endif -pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__ ((__section__ (".data.vm0.pgd"), aligned(PAGE_SIZE))); -pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __attribute__ ((__section__ (".data.vm0.pte"), aligned(PAGE_SIZE))); +pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__ ((__section__ (".data..vm0.pgd"), aligned(PAGE_SIZE))); +pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __attribute__ ((__section__ (".data..vm0.pte"), aligned(PAGE_SIZE))); /* * Initial task structure. diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S index 9dab4a4..d64a6bb 100644 --- a/arch/parisc/kernel/vmlinux.lds.S +++ b/arch/parisc/kernel/vmlinux.lds.S @@ -94,8 +94,8 @@ SECTIONS /* PA-RISC locks requires 16-byte alignment */ . = ALIGN(16); - .data.lock_aligned : { - *(.data.lock_aligned) + .data..lock_aligned : { + *(.data..lock_aligned) } /* End of data section */ @@ -105,10 +105,10 @@ SECTIONS __bss_start = .; /* page table entries need to be PAGE_SIZE aligned */ . = ALIGN(PAGE_SIZE); - .data.vmpages : { - *(.data.vm0.pmd) - *(.data.vm0.pgd) - *(.data.vm0.pte) + .data..vmpages : { + *(.data..vm0.pmd) + *(.data..vm0.pgd) + *(.data..vm0.pte) } .bss : { *(.bss) diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index 1a54a3b..42dcd3f4 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile @@ -112,6 +112,11 @@ KBUILD_CFLAGS += $(call cc-option,-mspe=no) # kernel considerably. KBUILD_CFLAGS += $(call cc-option,-funit-at-a-time) +# FIXME: the module load should be taught about the additional relocs +# generated by this. +# revert to pre-gcc-4.4 behaviour of .eh_frame +KBUILD_CFLAGS += $(call cc-option,-fno-dwarf2-cfi-asm) + # Never use string load/store instructions as they are # often slow when they are implemented at all KBUILD_CFLAGS += -mno-string diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h index 725634f..4b50941 100644 --- a/arch/powerpc/include/asm/cache.h +++ b/arch/powerpc/include/asm/cache.h @@ -42,7 +42,7 @@ extern struct ppc64_caches ppc64_caches; #endif /* __powerpc64__ && ! __ASSEMBLY__ */ #if !defined(__ASSEMBLY__) -#define __read_mostly __attribute__((__section__(".data.read_mostly"))) +#define __read_mostly __attribute__((__section__(".data..read_mostly"))) #endif #endif /* __KERNEL__ */ diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h index bfc4e02..358ff14 100644 --- a/arch/powerpc/include/asm/page_64.h +++ b/arch/powerpc/include/asm/page_64.h @@ -162,14 +162,6 @@ do { \ #endif /* !CONFIG_HUGETLB_PAGE */ -#ifdef MODULE -#define __page_aligned __attribute__((__aligned__(PAGE_SIZE))) -#else -#define __page_aligned \ - __attribute__((__aligned__(PAGE_SIZE), \ - __section__(".data.page_aligned"))) -#endif - #define VM_DATA_DEFAULT_FLAGS \ (test_thread_flag(TIF_32BIT) ? \ VM_DATA_DEFAULT_FLAGS32 : VM_DATA_DEFAULT_FLAGS64) diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index dcd01c8..8a0deef 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -223,19 +223,17 @@ SECTIONS #endif /* The initial task and kernel stack */ - .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) { - INIT_TASK_DATA(THREAD_SIZE) - } + INIT_TASK_DATA_SECTION(THREAD_SIZE) - .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) { + .data..page_aligned : AT(ADDR(.data..page_aligned) - LOAD_OFFSET) { PAGE_ALIGNED_DATA(PAGE_SIZE) } - .data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) { + .data..cacheline_aligned : AT(ADDR(.data..cacheline_aligned) - LOAD_OFFSET) { CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES) } - .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) { + .data..read_mostly : AT(ADDR(.data..read_mostly) - LOAD_OFFSET) { READ_MOSTLY_DATA(L1_CACHE_BYTES) } diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h index 9b86681..24aafa6 100644 --- a/arch/s390/include/asm/cache.h +++ b/arch/s390/include/asm/cache.h @@ -14,6 +14,6 @@ #define L1_CACHE_BYTES 256 #define L1_CACHE_SHIFT 8 -#define __read_mostly __attribute__((__section__(".data.read_mostly"))) +#define __read_mostly __attribute__((__section__(".data..read_mostly"))) #endif diff --git a/arch/s390/kernel/swsusp_asm64.S b/arch/s390/kernel/swsusp_asm64.S index c56d3f5..1f066e4 100644 --- a/arch/s390/kernel/swsusp_asm64.S +++ b/arch/s390/kernel/swsusp_asm64.S @@ -264,7 +264,7 @@ restore_registers: lghi %r2,0 br %r14 - .section .data.nosave,"aw",@progbits + .section .data..nosave,"aw",@progbits .align 8 .Ldisabled_wait_31: .long 0x000a0000,0x00000000 diff --git a/arch/sh/boot/compressed/vmlinux.scr b/arch/sh/boot/compressed/vmlinux.scr index f02382a..862d748 100644 --- a/arch/sh/boot/compressed/vmlinux.scr +++ b/arch/sh/boot/compressed/vmlinux.scr @@ -1,6 +1,6 @@ SECTIONS { - .rodata.compressed : { + .rodata..compressed : { input_len = .; LONG(input_data_end - input_data) input_data = .; *(.data) diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h index e461d67..ef9e555 100644 --- a/arch/sh/include/asm/cache.h +++ b/arch/sh/include/asm/cache.h @@ -14,7 +14,7 @@ #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) -#define __read_mostly __attribute__((__section__(".data.read_mostly"))) +#define __read_mostly __attribute__((__section__(".data..read_mostly"))) #ifndef __ASSEMBLY__ struct cache_info { diff --git a/arch/sparc/boot/btfixupprep.c b/arch/sparc/boot/btfixupprep.c index bbf91b9..e7f2940 100644 --- a/arch/sparc/boot/btfixupprep.c +++ b/arch/sparc/boot/btfixupprep.c @@ -325,7 +325,7 @@ main1: (*rr)->next = NULL; } printf("! Generated by btfixupprep. Do not edit.\n\n"); - printf("\t.section\t\".data.init\",#alloc,#write\n\t.align\t4\n\n"); + printf("\t.section\t\".data..init\",#alloc,#write\n\t.align\t4\n\n"); printf("\t.global\t___btfixup_start\n___btfixup_start:\n\n"); for (i = 0; i < last; i++) { f = array + i; diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h index 78b0700..0588b8c 100644 --- a/arch/sparc/include/asm/cache.h +++ b/arch/sparc/include/asm/cache.h @@ -21,7 +21,7 @@ #define SMP_CACHE_BYTES (1 << SMP_CACHE_BYTES_SHIFT) -#define __read_mostly __attribute__((__section__(".data.read_mostly"))) +#define __read_mostly __attribute__((__section__(".data..read_mostly"))) #ifdef CONFIG_SPARC32 #include <asm/asi.h> diff --git a/arch/um/kernel/dyn.lds.S b/arch/um/kernel/dyn.lds.S index 7fcad58..6926801 100644 --- a/arch/um/kernel/dyn.lds.S +++ b/arch/um/kernel/dyn.lds.S @@ -94,7 +94,7 @@ SECTIONS .data : { INIT_TASK_DATA(KERNEL_STACK_SIZE) . = ALIGN(KERNEL_STACK_SIZE); - *(.data.init_irqstack) + *(.data..init_irqstack) DATA_DATA *(.data.* .gnu.linkonce.d.*) SORT(CONSTRUCTORS) diff --git a/arch/um/kernel/init_task.c b/arch/um/kernel/init_task.c index 8aa77b6..ddc9698 100644 --- a/arch/um/kernel/init_task.c +++ b/arch/um/kernel/init_task.c @@ -34,5 +34,5 @@ union thread_union init_thread_union __init_task_data = { INIT_THREAD_INFO(init_task) }; union thread_union cpu0_irqstack - __attribute__((__section__(".data.init_irqstack"))) = + __attribute__((__section__(".data..init_irqstack"))) = { INIT_THREAD_INFO(init_task) }; diff --git a/arch/um/kernel/uml.lds.S b/arch/um/kernel/uml.lds.S index e7a6cca..ec63785 100644 --- a/arch/um/kernel/uml.lds.S +++ b/arch/um/kernel/uml.lds.S @@ -50,7 +50,7 @@ SECTIONS { INIT_TASK_DATA(KERNEL_STACK_SIZE) . = ALIGN(KERNEL_STACK_SIZE); - *(.data.init_irqstack) + *(.data..init_irqstack) DATA_DATA *(.gnu.linkonce.d*) CONSTRUCTORS diff --git a/arch/x86/.gitignore b/arch/x86/.gitignore new file mode 100644 index 0000000..0280790 --- /dev/null +++ b/arch/x86/.gitignore @@ -0,0 +1,3 @@ +boot/compressed/vmlinux +tools/test_get_len + diff --git a/arch/x86/boot/compressed/mkpiggy.c b/arch/x86/boot/compressed/mkpiggy.c index bcbd36c..5c22812 100644 --- a/arch/x86/boot/compressed/mkpiggy.c +++ b/arch/x86/boot/compressed/mkpiggy.c @@ -77,7 +77,7 @@ int main(int argc, char *argv[]) offs += 32*1024 + 18; /* Add 32K + 18 bytes slack */ offs = (offs+4095) & ~4095; /* Round to a 4K boundary */ - printf(".section \".rodata.compressed\",\"a\",@progbits\n"); + printf(".section \".rodata..compressed\",\"a\",@progbits\n"); printf(".globl z_input_len\n"); printf("z_input_len = %lu\n", ilen); printf(".globl z_output_len\n"); diff --git a/arch/x86/boot/compressed/vmlinux.lds.S b/arch/x86/boot/compressed/vmlinux.lds.S index a6f1a59..5ddabce 100644 --- a/arch/x86/boot/compressed/vmlinux.lds.S +++ b/arch/x86/boot/compressed/vmlinux.lds.S @@ -26,8 +26,8 @@ SECTIONS HEAD_TEXT _ehead = . ; } - .rodata.compressed : { - *(.rodata.compressed) + .rodata..compressed : { + *(.rodata..compressed) } .text : { _text = .; /* Text */ diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h index 2f9047c..48f99f1 100644 --- a/arch/x86/include/asm/cache.h +++ b/arch/x86/include/asm/cache.h @@ -7,7 +7,7 @@ #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT) #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) -#define __read_mostly __attribute__((__section__(".data.read_mostly"))) +#define __read_mostly __attribute__((__section__(".data..read_mostly"))) #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT #define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT) diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S index 8ded418..13ab7205 100644 --- a/arch/x86/kernel/acpi/wakeup_32.S +++ b/arch/x86/kernel/acpi/wakeup_32.S @@ -1,4 +1,4 @@ - .section .text.page_aligned + .section .text..page_aligned #include <linux/linkage.h> #include <asm/segment.h> #include <asm/page_types.h> diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c index 3a54dcb..43e9ccf 100644 --- a/arch/x86/kernel/init_task.c +++ b/arch/x86/kernel/init_task.c @@ -34,7 +34,7 @@ EXPORT_SYMBOL(init_task); /* * per-CPU TSS segments. Threads are completely 'soft' on Linux, * no more per-task TSS's. The TSS size is kept cacheline-aligned - * so they are allowed to end up in the .data.cacheline_aligned + * so they are allowed to end up in the .data..cacheline_aligned * section. Since TSS's are completely CPU-local, we want them * on exact cacheline boundaries, to eliminate cacheline ping-pong. */ diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index a867940..cf59276 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -247,7 +247,7 @@ void __init setup_per_cpu_areas(void) #endif #endif /* - * Up to this point, the boot CPU has been using .data.init + * Up to this point, the boot CPU has been using .init.data * area. Reload any changed state for the boot CPU. */ if (cpu == boot_cpu_id) diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index 2cc2497..d0bb522 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -97,7 +97,7 @@ SECTIONS HEAD_TEXT #ifdef CONFIG_X86_32 . = ALIGN(PAGE_SIZE); - *(.text.page_aligned) + *(.text..page_aligned) #endif . = ALIGN(8); _stext = .; @@ -305,7 +305,7 @@ SECTIONS . = ALIGN(PAGE_SIZE); .bss : AT(ADDR(.bss) - LOAD_OFFSET) { __bss_start = .; - *(.bss.page_aligned) + *(.bss..page_aligned) *(.bss) . = ALIGN(4); __bss_stop = .; |