diff options
Diffstat (limited to 'arch/m32r/kernel')
-rw-r--r-- | arch/m32r/kernel/entry.S | 7 | ||||
-rw-r--r-- | arch/m32r/kernel/head.S | 4 | ||||
-rw-r--r-- | arch/m32r/kernel/init_task.c | 5 | ||||
-rw-r--r-- | arch/m32r/kernel/m32r_ksyms.c | 6 | ||||
-rw-r--r-- | arch/m32r/kernel/smp.c | 31 | ||||
-rw-r--r-- | arch/m32r/kernel/smpboot.c | 2 | ||||
-rw-r--r-- | arch/m32r/kernel/time.c | 9 | ||||
-rw-r--r-- | arch/m32r/kernel/traps.c | 4 | ||||
-rw-r--r-- | arch/m32r/kernel/vmlinux.lds.S | 78 |
9 files changed, 48 insertions, 98 deletions
diff --git a/arch/m32r/kernel/entry.S b/arch/m32r/kernel/entry.S index 612d35b..4038698 100644 --- a/arch/m32r/kernel/entry.S +++ b/arch/m32r/kernel/entry.S @@ -118,6 +118,13 @@ #define resume_kernel restore_all #endif +/* how to get the thread information struct from ASM */ +#define GET_THREAD_INFO(reg) GET_THREAD_INFO reg + .macro GET_THREAD_INFO reg + ldi \reg, #-THREAD_SIZE + and \reg, sp + .endm + ENTRY(ret_from_fork) pop r0 bl schedule_tail diff --git a/arch/m32r/kernel/head.S b/arch/m32r/kernel/head.S index 0a71944..a46652d 100644 --- a/arch/m32r/kernel/head.S +++ b/arch/m32r/kernel/head.S @@ -268,13 +268,13 @@ ENTRY(empty_zero_page) /*------------------------------------------------------------------------ * Stack area */ - .section .spi + .section .init.data, "aw" ALIGN .global spi_stack_top .zero 1024 spi_stack_top: - .section .spu + .section .init.data, "aw" ALIGN .global spu_stack_top .zero 1024 diff --git a/arch/m32r/kernel/init_task.c b/arch/m32r/kernel/init_task.c index fce57e5..6c42d5f 100644 --- a/arch/m32r/kernel/init_task.c +++ b/arch/m32r/kernel/init_task.c @@ -20,9 +20,8 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); * way process stacks are handled. This is done by having a special * "init_task" linker map entry.. */ -union thread_union init_thread_union - __attribute__((__section__(".data.init_task"))) = - { INIT_THREAD_INFO(init_task) }; +union thread_union init_thread_union __init_task_data = + { INIT_THREAD_INFO(init_task) }; /* * Initial task structure. diff --git a/arch/m32r/kernel/m32r_ksyms.c b/arch/m32r/kernel/m32r_ksyms.c index 22624b5..7005707 100644 --- a/arch/m32r/kernel/m32r_ksyms.c +++ b/arch/m32r/kernel/m32r_ksyms.c @@ -23,12 +23,6 @@ EXPORT_SYMBOL(__ioremap); EXPORT_SYMBOL(iounmap); EXPORT_SYMBOL(kernel_thread); -/* Networking helper routines. */ -/* Delay loops */ -EXPORT_SYMBOL(__udelay); -EXPORT_SYMBOL(__delay); -EXPORT_SYMBOL(__const_udelay); - EXPORT_SYMBOL(strncpy_from_user); EXPORT_SYMBOL(__strncpy_from_user); EXPORT_SYMBOL(clear_user); diff --git a/arch/m32r/kernel/smp.c b/arch/m32r/kernel/smp.c index 929e5c9d..8a88f1f 100644 --- a/arch/m32r/kernel/smp.c +++ b/arch/m32r/kernel/smp.c @@ -17,6 +17,7 @@ #include <linux/irq.h> #include <linux/interrupt.h> +#include <linux/sched.h> #include <linux/spinlock.h> #include <linux/mm.h> #include <linux/smp.h> @@ -85,7 +86,7 @@ void smp_ipi_timer_interrupt(struct pt_regs *); void smp_local_timer_interrupt(void); static void send_IPI_allbutself(int, int); -static void send_IPI_mask(cpumask_t, int, int); +static void send_IPI_mask(const struct cpumask *, int, int); unsigned long send_IPI_mask_phys(cpumask_t, int, int); /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ @@ -113,7 +114,7 @@ unsigned long send_IPI_mask_phys(cpumask_t, int, int); void smp_send_reschedule(int cpu_id) { WARN_ON(cpu_is_offline(cpu_id)); - send_IPI_mask(cpumask_of_cpu(cpu_id), RESCHEDULE_IPI, 1); + send_IPI_mask(cpumask_of(cpu_id), RESCHEDULE_IPI, 1); } /*==========================================================================* @@ -168,7 +169,7 @@ void smp_flush_cache_all(void) spin_lock(&flushcache_lock); mask=cpus_addr(cpumask); atomic_set_mask(*mask, (atomic_t *)&flushcache_cpumask); - send_IPI_mask(cpumask, INVALIDATE_CACHE_IPI, 0); + send_IPI_mask(&cpumask, INVALIDATE_CACHE_IPI, 0); _flush_cache_copyback_all(); while (flushcache_cpumask) mb(); @@ -264,7 +265,7 @@ void smp_flush_tlb_mm(struct mm_struct *mm) preempt_disable(); cpu_id = smp_processor_id(); mmc = &mm->context[cpu_id]; - cpu_mask = mm->cpu_vm_mask; + cpu_mask = *mm_cpumask(mm); cpu_clear(cpu_id, cpu_mask); if (*mmc != NO_CONTEXT) { @@ -273,7 +274,7 @@ void smp_flush_tlb_mm(struct mm_struct *mm) if (mm == current->mm) activate_context(mm); else - cpu_clear(cpu_id, mm->cpu_vm_mask); + cpumask_clear_cpu(cpu_id, mm_cpumask(mm)); local_irq_restore(flags); } if (!cpus_empty(cpu_mask)) @@ -334,7 +335,7 @@ void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long va) preempt_disable(); cpu_id = smp_processor_id(); mmc = &mm->context[cpu_id]; - cpu_mask = mm->cpu_vm_mask; + cpu_mask = *mm_cpumask(mm); cpu_clear(cpu_id, cpu_mask); #ifdef DEBUG_SMP @@ -424,7 +425,7 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, * We have to send the IPI only to * CPUs affected. */ - send_IPI_mask(cpumask, INVALIDATE_TLB_IPI, 0); + send_IPI_mask(&cpumask, INVALIDATE_TLB_IPI, 0); while (!cpus_empty(flush_cpumask)) { /* nothing. lockup detection does not belong here */ @@ -469,7 +470,7 @@ void smp_invalidate_interrupt(void) if (flush_mm == current->active_mm) activate_context(flush_mm); else - cpu_clear(cpu_id, flush_mm->cpu_vm_mask); + cpumask_clear_cpu(cpu_id, mm_cpumask(flush_mm)); } else { unsigned long va = flush_va; @@ -546,14 +547,14 @@ static void stop_this_cpu(void *dummy) for ( ; ; ); } -void arch_send_call_function_ipi(cpumask_t mask) +void arch_send_call_function_ipi_mask(const struct cpumask *mask) { send_IPI_mask(mask, CALL_FUNCTION_IPI, 0); } void arch_send_call_function_single_ipi(int cpu) { - send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNC_SINGLE_IPI, 0); + send_IPI_mask(cpumask_of(cpu), CALL_FUNC_SINGLE_IPI, 0); } /*==========================================================================* @@ -729,7 +730,7 @@ static void send_IPI_allbutself(int ipi_num, int try) cpumask = cpu_online_map; cpu_clear(smp_processor_id(), cpumask); - send_IPI_mask(cpumask, ipi_num, try); + send_IPI_mask(&cpumask, ipi_num, try); } /*==========================================================================* @@ -752,7 +753,7 @@ static void send_IPI_allbutself(int ipi_num, int try) * ---------- --- -------------------------------------------------------- * *==========================================================================*/ -static void send_IPI_mask(cpumask_t cpumask, int ipi_num, int try) +static void send_IPI_mask(const struct cpumask *cpumask, int ipi_num, int try) { cpumask_t physid_mask, tmp; int cpu_id, phys_id; @@ -761,11 +762,11 @@ static void send_IPI_mask(cpumask_t cpumask, int ipi_num, int try) if (num_cpus <= 1) /* NO MP */ return; - cpus_and(tmp, cpumask, cpu_online_map); - BUG_ON(!cpus_equal(cpumask, tmp)); + cpumask_and(&tmp, cpumask, cpu_online_mask); + BUG_ON(!cpumask_equal(cpumask, &tmp)); physid_mask = CPU_MASK_NONE; - for_each_cpu_mask(cpu_id, cpumask){ + for_each_cpu(cpu_id, cpumask) { if ((phys_id = cpu_to_physid(cpu_id)) != -1) cpu_set(phys_id, physid_mask); } diff --git a/arch/m32r/kernel/smpboot.c b/arch/m32r/kernel/smpboot.c index 655ea1c..e034844 100644 --- a/arch/m32r/kernel/smpboot.c +++ b/arch/m32r/kernel/smpboot.c @@ -178,7 +178,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) for (phys_id = 0 ; phys_id < nr_cpu ; phys_id++) physid_set(phys_id, phys_cpu_present_map); #ifndef CONFIG_HOTPLUG_CPU - cpu_present_map = cpu_possible_map; + init_cpu_present(&cpu_possible_map); #endif show_mp_info(nr_cpu); diff --git a/arch/m32r/kernel/time.c b/arch/m32r/kernel/time.c index ba61c4c..e7fee0f1 100644 --- a/arch/m32r/kernel/time.c +++ b/arch/m32r/kernel/time.c @@ -33,6 +33,15 @@ #include <asm/hw_irq.h> +#if defined(CONFIG_RTC_DRV_CMOS) || defined(CONFIG_RTC_DRV_CMOS_MODULE) +/* this needs a better home */ +DEFINE_SPINLOCK(rtc_lock); + +#ifdef CONFIG_RTC_DRV_CMOS_MODULE +EXPORT_SYMBOL(rtc_lock); +#endif +#endif /* pc-style 'CMOS' RTC support */ + #ifdef CONFIG_SMP extern void smp_local_timer_interrupt(void); #endif diff --git a/arch/m32r/kernel/traps.c b/arch/m32r/kernel/traps.c index 03b14e5..fbd1090 100644 --- a/arch/m32r/kernel/traps.c +++ b/arch/m32r/kernel/traps.c @@ -104,8 +104,8 @@ static void set_eit_vector_entries(void) eit_vector[186] = (unsigned long)smp_call_function_interrupt; eit_vector[187] = (unsigned long)smp_ipi_timer_interrupt; eit_vector[188] = (unsigned long)smp_flush_cache_all_interrupt; - eit_vector[189] = (unsigned long)smp_call_function_single_interrupt; - eit_vector[190] = 0; + eit_vector[189] = 0; /* CPU_BOOT_IPI */ + eit_vector[190] = (unsigned long)smp_call_function_single_interrupt; eit_vector[191] = 0; #endif _flush_cache_copyback_all(); diff --git a/arch/m32r/kernel/vmlinux.lds.S b/arch/m32r/kernel/vmlinux.lds.S index de5e21c..8ceb618 100644 --- a/arch/m32r/kernel/vmlinux.lds.S +++ b/arch/m32r/kernel/vmlinux.lds.S @@ -4,6 +4,7 @@ #include <asm-generic/vmlinux.lds.h> #include <asm/addrspace.h> #include <asm/page.h> +#include <asm/thread_info.h> OUTPUT_ARCH(m32r) #if defined(__LITTLE_ENDIAN__) @@ -40,83 +41,22 @@ SECTIONS #endif _etext = .; /* End of text section */ - . = ALIGN(16); /* Exception table */ - __start___ex_table = .; - __ex_table : { *(__ex_table) } - __stop___ex_table = .; - + EXCEPTION_TABLE(16) RODATA - - /* writeable */ - .data : { /* Data */ - *(.spu) - *(.spi) - DATA_DATA - CONSTRUCTORS - } - - . = ALIGN(4096); - __nosave_begin = .; - .data_nosave : { *(.data.nosave) } - . = ALIGN(4096); - __nosave_end = .; - - . = ALIGN(32); - .data.cacheline_aligned : { *(.data.cacheline_aligned) } - + RW_DATA_SECTION(32, PAGE_SIZE, THREAD_SIZE) _edata = .; /* End of data section */ - . = ALIGN(8192); /* init_task */ - .data.init_task : { *(.data.init_task) } - /* will be freed after init */ - . = ALIGN(4096); /* Init code and data */ + . = ALIGN(PAGE_SIZE); /* Init code and data */ __init_begin = .; - .init.text : { - _sinittext = .; - INIT_TEXT - _einittext = .; - } - .init.data : { INIT_DATA } - . = ALIGN(16); - __setup_start = .; - .init.setup : { *(.init.setup) } - __setup_end = .; - __initcall_start = .; - .initcall.init : { - INITCALLS - } - __initcall_end = .; - __con_initcall_start = .; - .con_initcall.init : { *(.con_initcall.init) } - __con_initcall_end = .; - SECURITY_INIT - . = ALIGN(4); - __alt_instructions = .; - .altinstructions : { *(.altinstructions) } - __alt_instructions_end = .; - .altinstr_replacement : { *(.altinstr_replacement) } - /* .exit.text is discard at runtime, not link time, to deal with references - from .altinstructions and .eh_frame */ - .exit.text : { EXIT_TEXT } - .exit.data : { EXIT_DATA } - -#ifdef CONFIG_BLK_DEV_INITRD - . = ALIGN(4096); - __initramfs_start = .; - .init.ramfs : { *(.init.ramfs) } - __initramfs_end = .; -#endif - - PERCPU(4096) - . = ALIGN(4096); + INIT_TEXT_SECTION(PAGE_SIZE) + INIT_DATA_SECTION(16) + PERCPU(PAGE_SIZE) + . = ALIGN(PAGE_SIZE); __init_end = .; /* freed after init ends here */ - __bss_start = .; /* BSS */ - .bss : { *(.bss) } - . = ALIGN(4); - __bss_stop = .; + BSS_SECTION(0, 0, 4) _end = . ; |