From 099f53cb50e45ef617a9f1d63ceec799e489418b Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Wed, 8 Apr 2009 14:28:37 -0700 Subject: async_tx: rename zero_sum to val 'zero_sum' does not properly describe the operation of generating parity and checking that it validates against an existing buffer. Change the name of the operation to 'val' (for 'validate'). This is in anticipation of the p+q case where it is a requirement to identify the target parity buffers separately from the source buffers, because the target parity buffers will not have corresponding pq coefficients. Reviewed-by: Andre Noll Acked-by: Maciej Sosnowski Signed-off-by: Dan Williams --- arch/arm/mach-iop13xx/setup.c | 8 ++++---- arch/arm/plat-iop/adma.c | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/arm/mach-iop13xx/setup.c b/arch/arm/mach-iop13xx/setup.c index cfd4d2e..9800228 100644 --- a/arch/arm/mach-iop13xx/setup.c +++ b/arch/arm/mach-iop13xx/setup.c @@ -478,7 +478,7 @@ void __init iop13xx_platform_init(void) dma_cap_set(DMA_MEMCPY, plat_data->cap_mask); dma_cap_set(DMA_XOR, plat_data->cap_mask); dma_cap_set(DMA_DUAL_XOR, plat_data->cap_mask); - dma_cap_set(DMA_ZERO_SUM, plat_data->cap_mask); + dma_cap_set(DMA_XOR_VAL, plat_data->cap_mask); dma_cap_set(DMA_MEMSET, plat_data->cap_mask); dma_cap_set(DMA_MEMCPY_CRC32C, plat_data->cap_mask); dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask); @@ -490,7 +490,7 @@ void __init iop13xx_platform_init(void) dma_cap_set(DMA_MEMCPY, plat_data->cap_mask); dma_cap_set(DMA_XOR, plat_data->cap_mask); dma_cap_set(DMA_DUAL_XOR, plat_data->cap_mask); - dma_cap_set(DMA_ZERO_SUM, plat_data->cap_mask); + dma_cap_set(DMA_XOR_VAL, plat_data->cap_mask); dma_cap_set(DMA_MEMSET, plat_data->cap_mask); dma_cap_set(DMA_MEMCPY_CRC32C, plat_data->cap_mask); dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask); @@ -502,13 +502,13 @@ void __init iop13xx_platform_init(void) dma_cap_set(DMA_MEMCPY, plat_data->cap_mask); dma_cap_set(DMA_XOR, plat_data->cap_mask); dma_cap_set(DMA_DUAL_XOR, plat_data->cap_mask); - dma_cap_set(DMA_ZERO_SUM, plat_data->cap_mask); + dma_cap_set(DMA_XOR_VAL, plat_data->cap_mask); dma_cap_set(DMA_MEMSET, plat_data->cap_mask); dma_cap_set(DMA_MEMCPY_CRC32C, plat_data->cap_mask); dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask); dma_cap_set(DMA_PQ_XOR, plat_data->cap_mask); dma_cap_set(DMA_PQ_UPDATE, plat_data->cap_mask); - dma_cap_set(DMA_PQ_ZERO_SUM, plat_data->cap_mask); + dma_cap_set(DMA_PQ_VAL, plat_data->cap_mask); break; } } diff --git a/arch/arm/plat-iop/adma.c b/arch/arm/plat-iop/adma.c index f724208..c040044 100644 --- a/arch/arm/plat-iop/adma.c +++ b/arch/arm/plat-iop/adma.c @@ -198,7 +198,7 @@ static int __init iop3xx_adma_cap_init(void) dma_cap_set(DMA_INTERRUPT, iop3xx_aau_data.cap_mask); #else dma_cap_set(DMA_XOR, iop3xx_aau_data.cap_mask); - dma_cap_set(DMA_ZERO_SUM, iop3xx_aau_data.cap_mask); + dma_cap_set(DMA_XOR_VAL, iop3xx_aau_data.cap_mask); dma_cap_set(DMA_MEMSET, iop3xx_aau_data.cap_mask); dma_cap_set(DMA_INTERRUPT, iop3xx_aau_data.cap_mask); #endif -- cgit v1.1 From 6bc9a3966f0395419b09b2ec90f89f7f00341b37 Mon Sep 17 00:00:00 2001 From: Chen Liqin Date: Fri, 12 Jun 2009 22:01:00 +0800 Subject: score: Add support for Sunplus S+core architecture This is the complete set of new arch Score's files for linux. Score instruction set support 16bits, 32bits and 64bits instruction, Score SOC had been used in game machine and LCD TV. Signed-off-by: Chen Liqin Signed-off-by: Arnd Bergmann --- arch/score/Kconfig | 141 ++++++ arch/score/Kconfig.debug | 37 ++ arch/score/Makefile | 43 ++ arch/score/boot/Makefile | 15 + arch/score/configs/spct6600_defconfig | 717 +++++++++++++++++++++++++++++ arch/score/include/asm/Kbuild | 3 + arch/score/include/asm/asmmacro.h | 161 +++++++ arch/score/include/asm/atomic.h | 6 + arch/score/include/asm/auxvec.h | 4 + arch/score/include/asm/bitops.h | 16 + arch/score/include/asm/bitsperlong.h | 6 + arch/score/include/asm/bug.h | 6 + arch/score/include/asm/bugs.h | 6 + arch/score/include/asm/byteorder.h | 6 + arch/score/include/asm/cache.h | 7 + arch/score/include/asm/cacheflush.h | 47 ++ arch/score/include/asm/checksum.h | 235 ++++++++++ arch/score/include/asm/cputime.h | 6 + arch/score/include/asm/current.h | 6 + arch/score/include/asm/delay.h | 21 + arch/score/include/asm/device.h | 6 + arch/score/include/asm/div64.h | 6 + arch/score/include/asm/dma-mapping.h | 6 + arch/score/include/asm/dma.h | 8 + arch/score/include/asm/elf.h | 99 ++++ arch/score/include/asm/emergency-restart.h | 6 + arch/score/include/asm/errno.h | 7 + arch/score/include/asm/fcntl.h | 6 + arch/score/include/asm/fixmap.h | 82 ++++ arch/score/include/asm/ftrace.h | 4 + arch/score/include/asm/futex.h | 6 + arch/score/include/asm/hardirq.h | 6 + arch/score/include/asm/hw_irq.h | 4 + arch/score/include/asm/io.h | 9 + arch/score/include/asm/ioctl.h | 6 + arch/score/include/asm/ioctls.h | 6 + arch/score/include/asm/ipcbuf.h | 6 + arch/score/include/asm/irq.h | 33 ++ arch/score/include/asm/irq_regs.h | 6 + arch/score/include/asm/irqflags.h | 111 +++++ arch/score/include/asm/kdebug.h | 6 + arch/score/include/asm/kmap_types.h | 6 + arch/score/include/asm/linkage.h | 4 + arch/score/include/asm/local.h | 6 + arch/score/include/asm/mman.h | 6 + arch/score/include/asm/mmu.h | 6 + arch/score/include/asm/mmu_context.h | 113 +++++ arch/score/include/asm/module.h | 39 ++ arch/score/include/asm/msgbuf.h | 6 + arch/score/include/asm/mutex.h | 6 + arch/score/include/asm/page.h | 92 ++++ arch/score/include/asm/param.h | 6 + arch/score/include/asm/pci.h | 4 + arch/score/include/asm/percpu.h | 6 + arch/score/include/asm/pgalloc.h | 83 ++++ arch/score/include/asm/pgtable-bits.h | 23 + arch/score/include/asm/pgtable.h | 267 +++++++++++ arch/score/include/asm/poll.h | 6 + arch/score/include/asm/posix_types.h | 6 + arch/score/include/asm/processor.h | 106 +++++ arch/score/include/asm/ptrace.h | 87 ++++ arch/score/include/asm/resource.h | 6 + arch/score/include/asm/scatterlist.h | 6 + arch/score/include/asm/scoreregs.h | 51 ++ arch/score/include/asm/sections.h | 6 + arch/score/include/asm/segment.h | 21 + arch/score/include/asm/sembuf.h | 6 + arch/score/include/asm/setup.h | 40 ++ arch/score/include/asm/shmbuf.h | 6 + arch/score/include/asm/shmparam.h | 6 + arch/score/include/asm/sigcontext.h | 22 + arch/score/include/asm/siginfo.h | 6 + arch/score/include/asm/signal.h | 6 + arch/score/include/asm/socket.h | 6 + arch/score/include/asm/sockios.h | 6 + arch/score/include/asm/stat.h | 6 + arch/score/include/asm/statfs.h | 6 + arch/score/include/asm/string.h | 8 + arch/score/include/asm/swab.h | 6 + arch/score/include/asm/syscalls.h | 9 + arch/score/include/asm/system.h | 90 ++++ arch/score/include/asm/termbits.h | 6 + arch/score/include/asm/termios.h | 6 + arch/score/include/asm/thread_info.h | 103 +++++ arch/score/include/asm/timex.h | 8 + arch/score/include/asm/tlb.h | 17 + arch/score/include/asm/tlbflush.h | 142 ++++++ arch/score/include/asm/topology.h | 6 + arch/score/include/asm/types.h | 6 + arch/score/include/asm/uaccess.h | 27 ++ arch/score/include/asm/unaligned.h | 6 + arch/score/include/asm/unistd.h | 8 + arch/score/include/asm/user.h | 4 + arch/score/kernel/Makefile | 10 + arch/score/kernel/asm-offsets.c | 216 +++++++++ arch/score/kernel/entry.S | 542 ++++++++++++++++++++++ arch/score/kernel/head.S | 70 +++ arch/score/kernel/init_task.c | 49 ++ arch/score/kernel/irq.c | 135 ++++++ arch/score/kernel/module.c | 164 +++++++ arch/score/kernel/process.c | 165 +++++++ arch/score/kernel/ptrace.c | 465 +++++++++++++++++++ arch/score/kernel/setup.c | 157 +++++++ arch/score/kernel/signal.c | 355 ++++++++++++++ arch/score/kernel/sys_score.c | 147 ++++++ arch/score/kernel/time.c | 99 ++++ arch/score/kernel/traps.c | 349 ++++++++++++++ arch/score/kernel/vmlinux.lds.S | 148 ++++++ arch/score/lib/Makefile | 8 + arch/score/lib/ashldi3.c | 46 ++ arch/score/lib/ashrdi3.c | 48 ++ arch/score/lib/checksum.S | 255 ++++++++++ arch/score/lib/checksum_copy.c | 52 +++ arch/score/lib/cmpdi2.c | 44 ++ arch/score/lib/libgcc.h | 37 ++ arch/score/lib/lshrdi3.c | 47 ++ arch/score/lib/string.S | 196 ++++++++ arch/score/lib/ucmpdi2.c | 38 ++ arch/score/mm/Makefile | 6 + arch/score/mm/cache.c | 308 +++++++++++++ arch/score/mm/extable.c | 38 ++ arch/score/mm/fault.c | 235 ++++++++++ arch/score/mm/init.c | 173 +++++++ arch/score/mm/pgtable.c | 60 +++ arch/score/mm/tlb-miss.S | 199 ++++++++ arch/score/mm/tlb-score.c | 251 ++++++++++ 126 files changed, 8566 insertions(+) create mode 100644 arch/score/Kconfig create mode 100644 arch/score/Kconfig.debug create mode 100644 arch/score/Makefile create mode 100644 arch/score/boot/Makefile create mode 100644 arch/score/configs/spct6600_defconfig create mode 100644 arch/score/include/asm/Kbuild create mode 100644 arch/score/include/asm/asmmacro.h create mode 100644 arch/score/include/asm/atomic.h create mode 100644 arch/score/include/asm/auxvec.h create mode 100644 arch/score/include/asm/bitops.h create mode 100644 arch/score/include/asm/bitsperlong.h create mode 100644 arch/score/include/asm/bug.h create mode 100644 arch/score/include/asm/bugs.h create mode 100644 arch/score/include/asm/byteorder.h create mode 100644 arch/score/include/asm/cache.h create mode 100644 arch/score/include/asm/cacheflush.h create mode 100644 arch/score/include/asm/checksum.h create mode 100644 arch/score/include/asm/cputime.h create mode 100644 arch/score/include/asm/current.h create mode 100644 arch/score/include/asm/delay.h create mode 100644 arch/score/include/asm/device.h create mode 100644 arch/score/include/asm/div64.h create mode 100644 arch/score/include/asm/dma-mapping.h create mode 100644 arch/score/include/asm/dma.h create mode 100644 arch/score/include/asm/elf.h create mode 100644 arch/score/include/asm/emergency-restart.h create mode 100644 arch/score/include/asm/errno.h create mode 100644 arch/score/include/asm/fcntl.h create mode 100644 arch/score/include/asm/fixmap.h create mode 100644 arch/score/include/asm/ftrace.h create mode 100644 arch/score/include/asm/futex.h create mode 100644 arch/score/include/asm/hardirq.h create mode 100644 arch/score/include/asm/hw_irq.h create mode 100644 arch/score/include/asm/io.h create mode 100644 arch/score/include/asm/ioctl.h create mode 100644 arch/score/include/asm/ioctls.h create mode 100644 arch/score/include/asm/ipcbuf.h create mode 100644 arch/score/include/asm/irq.h create mode 100644 arch/score/include/asm/irq_regs.h create mode 100644 arch/score/include/asm/irqflags.h create mode 100644 arch/score/include/asm/kdebug.h create mode 100644 arch/score/include/asm/kmap_types.h create mode 100644 arch/score/include/asm/linkage.h create mode 100644 arch/score/include/asm/local.h create mode 100644 arch/score/include/asm/mman.h create mode 100644 arch/score/include/asm/mmu.h create mode 100644 arch/score/include/asm/mmu_context.h create mode 100644 arch/score/include/asm/module.h create mode 100644 arch/score/include/asm/msgbuf.h create mode 100644 arch/score/include/asm/mutex.h create mode 100644 arch/score/include/asm/page.h create mode 100644 arch/score/include/asm/param.h create mode 100644 arch/score/include/asm/pci.h create mode 100644 arch/score/include/asm/percpu.h create mode 100644 arch/score/include/asm/pgalloc.h create mode 100644 arch/score/include/asm/pgtable-bits.h create mode 100644 arch/score/include/asm/pgtable.h create mode 100644 arch/score/include/asm/poll.h create mode 100644 arch/score/include/asm/posix_types.h create mode 100644 arch/score/include/asm/processor.h create mode 100644 arch/score/include/asm/ptrace.h create mode 100644 arch/score/include/asm/resource.h create mode 100644 arch/score/include/asm/scatterlist.h create mode 100644 arch/score/include/asm/scoreregs.h create mode 100644 arch/score/include/asm/sections.h create mode 100644 arch/score/include/asm/segment.h create mode 100644 arch/score/include/asm/sembuf.h create mode 100644 arch/score/include/asm/setup.h create mode 100644 arch/score/include/asm/shmbuf.h create mode 100644 arch/score/include/asm/shmparam.h create mode 100644 arch/score/include/asm/sigcontext.h create mode 100644 arch/score/include/asm/siginfo.h create mode 100644 arch/score/include/asm/signal.h create mode 100644 arch/score/include/asm/socket.h create mode 100644 arch/score/include/asm/sockios.h create mode 100644 arch/score/include/asm/stat.h create mode 100644 arch/score/include/asm/statfs.h create mode 100644 arch/score/include/asm/string.h create mode 100644 arch/score/include/asm/swab.h create mode 100644 arch/score/include/asm/syscalls.h create mode 100644 arch/score/include/asm/system.h create mode 100644 arch/score/include/asm/termbits.h create mode 100644 arch/score/include/asm/termios.h create mode 100644 arch/score/include/asm/thread_info.h create mode 100644 arch/score/include/asm/timex.h create mode 100644 arch/score/include/asm/tlb.h create mode 100644 arch/score/include/asm/tlbflush.h create mode 100644 arch/score/include/asm/topology.h create mode 100644 arch/score/include/asm/types.h create mode 100644 arch/score/include/asm/uaccess.h create mode 100644 arch/score/include/asm/unaligned.h create mode 100644 arch/score/include/asm/unistd.h create mode 100644 arch/score/include/asm/user.h create mode 100644 arch/score/kernel/Makefile create mode 100644 arch/score/kernel/asm-offsets.c create mode 100644 arch/score/kernel/entry.S create mode 100644 arch/score/kernel/head.S create mode 100644 arch/score/kernel/init_task.c create mode 100644 arch/score/kernel/irq.c create mode 100644 arch/score/kernel/module.c create mode 100644 arch/score/kernel/process.c create mode 100644 arch/score/kernel/ptrace.c create mode 100644 arch/score/kernel/setup.c create mode 100644 arch/score/kernel/signal.c create mode 100644 arch/score/kernel/sys_score.c create mode 100644 arch/score/kernel/time.c create mode 100644 arch/score/kernel/traps.c create mode 100644 arch/score/kernel/vmlinux.lds.S create mode 100644 arch/score/lib/Makefile create mode 100644 arch/score/lib/ashldi3.c create mode 100644 arch/score/lib/ashrdi3.c create mode 100644 arch/score/lib/checksum.S create mode 100644 arch/score/lib/checksum_copy.c create mode 100644 arch/score/lib/cmpdi2.c create mode 100644 arch/score/lib/libgcc.h create mode 100644 arch/score/lib/lshrdi3.c create mode 100644 arch/score/lib/string.S create mode 100644 arch/score/lib/ucmpdi2.c create mode 100644 arch/score/mm/Makefile create mode 100644 arch/score/mm/cache.c create mode 100644 arch/score/mm/extable.c create mode 100644 arch/score/mm/fault.c create mode 100644 arch/score/mm/init.c create mode 100644 arch/score/mm/pgtable.c create mode 100644 arch/score/mm/tlb-miss.S create mode 100644 arch/score/mm/tlb-score.c (limited to 'arch') diff --git a/arch/score/Kconfig b/arch/score/Kconfig new file mode 100644 index 0000000..55d413e --- /dev/null +++ b/arch/score/Kconfig @@ -0,0 +1,141 @@ +# For a description of the syntax of this configuration file, +# see Documentation/kbuild/kconfig-language.txt. + +mainmenu "Linux/SCORE Kernel Configuration" + +menu "Machine selection" + +choice + prompt "System type" + default MACH_SPCT6600 + +config ARCH_SCORE7 + bool "SCORE7 processor" + select SYS_SUPPORTS_32BIT_KERNEL + select CPU_SCORE7 + select GENERIC_HAS_IOMAP + +config MACH_SPCT6600 + bool "SPCT6600 series based machines" + select SYS_SUPPORTS_32BIT_KERNEL + select CPU_SCORE7 + select GENERIC_HAS_IOMAP + +config SCORE_SIM + bool "Score simulator" + select SYS_SUPPORTS_32BIT_KERNEL + select CPU_SCORE7 + select GENERIC_HAS_IOMAP +endchoice + +endmenu + +config CPU_SCORE7 + bool + +config GENERIC_IOMAP + def_bool y + +config NO_DMA + bool + default y + +config RWSEM_GENERIC_SPINLOCK + def_bool y + +config GENERIC_FIND_NEXT_BIT + def_bool y + +config GENERIC_HWEIGHT + def_bool y + +config GENERIC_CALIBRATE_DELAY + def_bool y + +config GENERIC_CLOCKEVENTS + def_bool y + +config GENERIC_TIME + def_bool y + +config SCHED_NO_NO_OMIT_FRAME_POINTER + def_bool y + +config GENERIC_HARDIRQS_NO__DO_IRQ + def_bool y + +config GENERIC_SYSCALL_TABLE + def_bool y + +config SCORE_L1_CACHE_SHIFT + int + default "4" + +menu "Kernel type" + +config 32BIT + def_bool y + +config GENERIC_HARDIRQS + def_bool y + +config ARCH_FLATMEM_ENABLE + def_bool y + +config ARCH_POPULATES_NODE_MAP + def_bool y + +source "mm/Kconfig" + +config MEMORY_START + hex + default 0xa0000000 + +source "kernel/time/Kconfig" +source "kernel/Kconfig.hz" +source "kernel/Kconfig.preempt" + +endmenu + +config RWSEM_GENERIC_SPINLOCK + def_bool y + +config LOCKDEP_SUPPORT + def_bool y + +config STACKTRACE_SUPPORT + def_bool y + +source "init/Kconfig" + +config PROBE_INITRD_HEADER + bool "Probe initrd header created by addinitrd" + depends on BLK_DEV_INITRD + help + Probe initrd header at the last page of kernel image. + Say Y here if you are using arch/score/boot/addinitrd.c to + add initrd or initramfs image to the kernel image. + Otherwise, say N. + +config MMU + def_bool y + +menu "Executable file formats" + +source "fs/Kconfig.binfmt" + +endmenu + +source "net/Kconfig" + +source "drivers/Kconfig" + +source "fs/Kconfig" + +source "arch/score/Kconfig.debug" + +source "security/Kconfig" + +source "crypto/Kconfig" + +source "lib/Kconfig" diff --git a/arch/score/Kconfig.debug b/arch/score/Kconfig.debug new file mode 100644 index 0000000..451ed54 --- /dev/null +++ b/arch/score/Kconfig.debug @@ -0,0 +1,37 @@ +menu "Kernel hacking" + +config TRACE_IRQFLAGS_SUPPORT + bool + default y + +source "lib/Kconfig.debug" + +config CMDLINE + string "Default kernel command string" + default "" + help + On some platforms, there is currently no way for the boot loader to + pass arguments to the kernel. For these platforms, you can supply + some command-line options at build time by entering them here. In + other cases you can specify kernel args so that you don't have + to set them up in board prom initialization routines. + +config DEBUG_STACK_USAGE + bool "Enable stack utilization instrumentation" + depends on DEBUG_KERNEL + help + Enables the display of the minimum amount of free stack which each + task has ever had available in the sysrq-T and sysrq-P debug output. + + This option will slow down process creation somewhat. + +config RUNTIME_DEBUG + bool "Enable run-time debugging" + depends on DEBUG_KERNEL + help + If you say Y here, some debugging macros will do run-time checking. + If you say N here, those macros will mostly turn to no-ops. See + include/asm-score/debug.h for debuging macros. + If unsure, say N. + +endmenu diff --git a/arch/score/Makefile b/arch/score/Makefile new file mode 100644 index 0000000..68e0cd0 --- /dev/null +++ b/arch/score/Makefile @@ -0,0 +1,43 @@ +# +# arch/score/Makefile +# +# This file is subject to the terms and conditions of the GNU General Public +# License. See the file "COPYING" in the main directory of this archive +# for more details. +# + +KBUILD_DEFCONFIG := spct6600_defconfig +CROSS_COMPILE := score-linux- + +# +# CPU-dependent compiler/assembler options for optimization. +# +cflags-y += -G0 -pipe -mel -mnhwloop -D__SCOREEL__ \ + -D__linux__ -ffunction-sections -ffreestanding + +# +# Board-dependent options and extra files +# +KBUILD_AFLAGS += $(cflags-y) +KBUILD_CFLAGS += $(cflags-y) +MODFLAGS += -mlong-calls +LDFLAGS += --oformat elf32-littlescore +LDFLAGS_vmlinux += -G0 -static -nostdlib + +head-y := arch/score/kernel/head.o +libs-y += arch/score/lib/ +core-y += arch/score/kernel/ arch/score/mm/ + +boot := arch/score/boot + +vmlinux.bin: vmlinux + $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ + +archclean: + @$(MAKE) $(clean)=$(boot) + +define archhelp + echo ' vmlinux.bin - Raw binary boot image' + echo + echo ' These will be default as apropriate for a configured platform.' +endef diff --git a/arch/score/boot/Makefile b/arch/score/boot/Makefile new file mode 100644 index 0000000..0c5fbd0 --- /dev/null +++ b/arch/score/boot/Makefile @@ -0,0 +1,15 @@ +# +# arch/score/boot/Makefile +# +# This file is subject to the terms and conditions of the GNU General Public +# License. See the file "COPYING" in the main directory of this archive +# for more details. +# + +targets := vmlinux.bin + +$(obj)/vmlinux.bin: vmlinux FORCE + $(call if_changed,objcopy) + @echo 'Kernel: $@ is ready' ' (#'`cat .version`')' + +clean-files += vmlinux.bin diff --git a/arch/score/configs/spct6600_defconfig b/arch/score/configs/spct6600_defconfig new file mode 100644 index 0000000..e064943 --- /dev/null +++ b/arch/score/configs/spct6600_defconfig @@ -0,0 +1,717 @@ +# +# Automatically generated make config: don't edit +# Linux kernel version: 2.6.30-rc5 +# Fri Jun 12 18:57:07 2009 +# + +# +# Machine selection +# +# CONFIG_ARCH_SCORE7 is not set +CONFIG_MACH_SPCT6600=y +# CONFIG_SCORE_SIM is not set +CONFIG_CPU_SCORE7=y +CONFIG_GENERIC_IOMAP=y +CONFIG_NO_DMA=y +CONFIG_RWSEM_GENERIC_SPINLOCK=y +CONFIG_GENERIC_FIND_NEXT_BIT=y +CONFIG_GENERIC_HWEIGHT=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_GENERIC_TIME=y +CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y +CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y +CONFIG_GENERIC_SYSCALL_TABLE=y +CONFIG_SCORE_L1_CACHE_SHIFT=4 + +# +# Kernel type +# +CONFIG_32BIT=y +CONFIG_GENERIC_HARDIRQS=y +CONFIG_ARCH_FLATMEM_ENABLE=y +CONFIG_ARCH_POPULATES_NODE_MAP=y +CONFIG_SELECT_MEMORY_MODEL=y +CONFIG_FLATMEM_MANUAL=y +# CONFIG_DISCONTIGMEM_MANUAL is not set +# CONFIG_SPARSEMEM_MANUAL is not set +CONFIG_FLATMEM=y +CONFIG_FLAT_NODE_MEM_MAP=y +CONFIG_PAGEFLAGS_EXTENDED=y +CONFIG_SPLIT_PTLOCK_CPUS=4 +# CONFIG_PHYS_ADDR_T_64BIT is not set +CONFIG_ZONE_DMA_FLAG=0 +CONFIG_VIRT_TO_BUS=y +CONFIG_UNEVICTABLE_LRU=y +CONFIG_HAVE_MLOCK=y +CONFIG_HAVE_MLOCKED_PAGE_BIT=y +CONFIG_MEMORY_START=0xa0000000 +# CONFIG_NO_HZ is not set +# CONFIG_HIGH_RES_TIMERS is not set +CONFIG_GENERIC_CLOCKEVENTS_BUILD=y +CONFIG_HZ_100=y +# CONFIG_HZ_250 is not set +# CONFIG_HZ_300 is not set +# CONFIG_HZ_1000 is not set +CONFIG_HZ=100 +# CONFIG_SCHED_HRTICK is not set +# CONFIG_PREEMPT_NONE is not set +CONFIG_PREEMPT_VOLUNTARY=y +# CONFIG_PREEMPT is not set +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" + +# +# General setup +# +CONFIG_EXPERIMENTAL=y +CONFIG_BROKEN_ON_SMP=y +CONFIG_INIT_ENV_ARG_LIMIT=32 +CONFIG_LOCALVERSION="" +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +CONFIG_POSIX_MQUEUE=y +CONFIG_POSIX_MQUEUE_SYSCTL=y +CONFIG_BSD_PROCESS_ACCT=y +# CONFIG_BSD_PROCESS_ACCT_V3 is not set +# CONFIG_TASKSTATS is not set +# CONFIG_AUDIT is not set + +# +# RCU Subsystem +# +CONFIG_CLASSIC_RCU=y +# CONFIG_TREE_RCU is not set +# CONFIG_PREEMPT_RCU is not set +# CONFIG_TREE_RCU_TRACE is not set +# CONFIG_PREEMPT_RCU_TRACE is not set +# CONFIG_IKCONFIG is not set +CONFIG_LOG_BUF_SHIFT=12 +# CONFIG_GROUP_SCHED is not set +# CONFIG_CGROUPS is not set +CONFIG_SYSFS_DEPRECATED=y +CONFIG_SYSFS_DEPRECATED_V2=y +# CONFIG_RELAY is not set +# CONFIG_NAMESPACES is not set +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +# CONFIG_RD_BZIP2 is not set +# CONFIG_RD_LZMA is not set +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +CONFIG_SYSCTL=y +CONFIG_ANON_INODES=y +CONFIG_EMBEDDED=y +CONFIG_SYSCTL_SYSCALL=y +# CONFIG_KALLSYMS is not set +# CONFIG_STRIP_ASM_SYMS is not set +# CONFIG_HOTPLUG is not set +CONFIG_PRINTK=y +CONFIG_BUG=y +CONFIG_ELF_CORE=y +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_SHMEM=y +CONFIG_AIO=y +CONFIG_VM_EVENT_COUNTERS=y +CONFIG_COMPAT_BRK=y +CONFIG_SLAB=y +# CONFIG_SLUB is not set +# CONFIG_SLOB is not set +# CONFIG_PROFILING is not set +# CONFIG_MARKERS is not set +# CONFIG_SLOW_WORK is not set +# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set +CONFIG_SLABINFO=y +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULES=y +CONFIG_MODULE_FORCE_LOAD=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +# CONFIG_MODVERSIONS is not set +# CONFIG_MODULE_SRCVERSION_ALL is not set +CONFIG_BLOCK=y +CONFIG_LBD=y +# CONFIG_BLK_DEV_BSG is not set +# CONFIG_BLK_DEV_INTEGRITY is not set + +# +# IO Schedulers +# +CONFIG_IOSCHED_NOOP=y +CONFIG_IOSCHED_AS=y +CONFIG_IOSCHED_DEADLINE=y +CONFIG_IOSCHED_CFQ=y +# CONFIG_DEFAULT_AS is not set +# CONFIG_DEFAULT_DEADLINE is not set +CONFIG_DEFAULT_CFQ=y +# CONFIG_DEFAULT_NOOP is not set +CONFIG_DEFAULT_IOSCHED="cfq" +# CONFIG_PROBE_INITRD_HEADER is not set +CONFIG_MMU=y + +# +# Executable file formats +# +CONFIG_BINFMT_ELF=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +# CONFIG_HAVE_AOUT is not set +CONFIG_BINFMT_MISC=y +CONFIG_NET=y + +# +# Networking options +# +# CONFIG_PACKET is not set +CONFIG_UNIX=y +CONFIG_XFRM=y +# CONFIG_XFRM_USER is not set +# CONFIG_XFRM_SUB_POLICY is not set +# CONFIG_XFRM_MIGRATE is not set +# CONFIG_XFRM_STATISTICS is not set +CONFIG_NET_KEY=y +# CONFIG_NET_KEY_MIGRATE is not set +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +# CONFIG_IP_ADVANCED_ROUTER is not set +CONFIG_IP_FIB_HASH=y +# CONFIG_IP_PNP is not set +# CONFIG_NET_IPIP is not set +# CONFIG_NET_IPGRE is not set +# CONFIG_IP_MROUTE is not set +CONFIG_ARPD=y +# CONFIG_SYN_COOKIES is not set +# CONFIG_INET_AH is not set +# CONFIG_INET_ESP is not set +# CONFIG_INET_IPCOMP is not set +# CONFIG_INET_XFRM_TUNNEL is not set +# CONFIG_INET_TUNNEL is not set +CONFIG_INET_XFRM_MODE_TRANSPORT=y +CONFIG_INET_XFRM_MODE_TUNNEL=y +CONFIG_INET_XFRM_MODE_BEET=y +# CONFIG_INET_LRO is not set +CONFIG_INET_DIAG=y +CONFIG_INET_TCP_DIAG=y +# CONFIG_TCP_CONG_ADVANCED is not set +CONFIG_TCP_CONG_CUBIC=y +CONFIG_DEFAULT_TCP_CONG="cubic" +# CONFIG_TCP_MD5SIG is not set +# CONFIG_IPV6 is not set +# CONFIG_NETLABEL is not set +# CONFIG_NETWORK_SECMARK is not set +# CONFIG_NETFILTER is not set +# CONFIG_IP_DCCP is not set +# CONFIG_IP_SCTP is not set +# CONFIG_TIPC is not set +# CONFIG_ATM is not set +# CONFIG_BRIDGE is not set +# CONFIG_NET_DSA is not set +# CONFIG_VLAN_8021Q is not set +# CONFIG_DECNET is not set +# CONFIG_LLC2 is not set +# CONFIG_IPX is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_ECONET is not set +# CONFIG_WAN_ROUTER is not set +# CONFIG_PHONET is not set +# CONFIG_NET_SCHED is not set +# CONFIG_DCB is not set + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set +# CONFIG_HAMRADIO is not set +# CONFIG_CAN is not set +# CONFIG_IRDA is not set +# CONFIG_BT is not set +# CONFIG_AF_RXRPC is not set +# CONFIG_WIRELESS is not set +# CONFIG_WIMAX is not set +# CONFIG_RFKILL is not set +# CONFIG_NET_9P is not set + +# +# Device Drivers +# + +# +# Generic Driver Options +# +# CONFIG_STANDALONE is not set +# CONFIG_PREVENT_FIRMWARE_BUILD is not set +# CONFIG_SYS_HYPERVISOR is not set +# CONFIG_CONNECTOR is not set +# CONFIG_MTD is not set +# CONFIG_PARPORT is not set +CONFIG_BLK_DEV=y +# CONFIG_BLK_DEV_COW_COMMON is not set +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_CRYPTOLOOP=y +# CONFIG_BLK_DEV_NBD is not set +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_COUNT=1 +CONFIG_BLK_DEV_RAM_SIZE=4096 +# CONFIG_BLK_DEV_XIP is not set +# CONFIG_CDROM_PKTCDVD is not set +# CONFIG_ATA_OVER_ETH is not set +# CONFIG_MISC_DEVICES is not set + +# +# SCSI device support +# +# CONFIG_RAID_ATTRS is not set +# CONFIG_SCSI is not set +# CONFIG_SCSI_DMA is not set +# CONFIG_SCSI_NETLINK is not set +# CONFIG_MD is not set +CONFIG_NETDEVICES=y +CONFIG_COMPAT_NET_DEV_OPS=y +# CONFIG_DUMMY is not set +# CONFIG_BONDING is not set +# CONFIG_MACVLAN is not set +# CONFIG_EQUALIZER is not set +# CONFIG_TUN is not set +# CONFIG_VETH is not set +# CONFIG_NET_ETHERNET is not set +# CONFIG_NETDEV_1000 is not set +# CONFIG_NETDEV_10000 is not set + +# +# Wireless LAN +# +# CONFIG_WLAN_PRE80211 is not set +# CONFIG_WLAN_80211 is not set + +# +# Enable WiMAX (Networking options) to see the WiMAX drivers +# +# CONFIG_WAN is not set +# CONFIG_PPP is not set +# CONFIG_SLIP is not set +# CONFIG_NETCONSOLE is not set +# CONFIG_NETPOLL is not set +# CONFIG_NET_POLL_CONTROLLER is not set +# CONFIG_ISDN is not set +# CONFIG_PHONE is not set + +# +# Input device support +# +CONFIG_INPUT=y +# CONFIG_INPUT_FF_MEMLESS is not set +# CONFIG_INPUT_POLLDEV is not set + +# +# Userland interfaces +# +# CONFIG_INPUT_MOUSEDEV is not set +# CONFIG_INPUT_JOYDEV is not set +# CONFIG_INPUT_EVDEV is not set +# CONFIG_INPUT_EVBUG is not set + +# +# Input Device Drivers +# +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TABLET is not set +# CONFIG_INPUT_TOUCHSCREEN is not set +# CONFIG_INPUT_MISC is not set + +# +# Hardware I/O ports +# +# CONFIG_SERIO is not set +# CONFIG_GAMEPORT is not set + +# +# Character devices +# +CONFIG_VT=y +CONFIG_CONSOLE_TRANSLATIONS=y +CONFIG_VT_CONSOLE=y +CONFIG_HW_CONSOLE=y +# CONFIG_VT_HW_CONSOLE_BINDING is not set +CONFIG_DEVKMEM=y +CONFIG_SERIAL_NONSTANDARD=y +# CONFIG_N_HDLC is not set +# CONFIG_RISCOM8 is not set +# CONFIG_SPECIALIX is not set +# CONFIG_RIO is not set +CONFIG_STALDRV=y + +# +# Serial drivers +# +# CONFIG_SERIAL_8250 is not set + +# +# Non-8250 serial port support +# +CONFIG_UNIX98_PTYS=y +# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set +CONFIG_LEGACY_PTYS=y +CONFIG_LEGACY_PTY_COUNT=256 +# CONFIG_IPMI_HANDLER is not set +# CONFIG_HW_RANDOM is not set +# CONFIG_RTC is not set +# CONFIG_GEN_RTC is not set +# CONFIG_R3964 is not set +CONFIG_RAW_DRIVER=y +CONFIG_MAX_RAW_DEVS=8192 +# CONFIG_TCG_TPM is not set +# CONFIG_I2C is not set +# CONFIG_SPI is not set +# CONFIG_W1 is not set +# CONFIG_POWER_SUPPLY is not set +# CONFIG_HWMON is not set +# CONFIG_THERMAL is not set +# CONFIG_THERMAL_HWMON is not set +# CONFIG_WATCHDOG is not set + +# +# Multifunction device drivers +# +# CONFIG_MFD_CORE is not set +# CONFIG_MFD_SM501 is not set +# CONFIG_HTC_PASIC3 is not set +# CONFIG_MFD_TMIO is not set +# CONFIG_REGULATOR is not set + +# +# Multimedia devices +# + +# +# Multimedia core support +# +# CONFIG_VIDEO_DEV is not set +# CONFIG_DVB_CORE is not set +# CONFIG_VIDEO_MEDIA is not set + +# +# Multimedia drivers +# +# CONFIG_DAB is not set + +# +# Graphics support +# +# CONFIG_VGASTATE is not set +# CONFIG_VIDEO_OUTPUT_CONTROL is not set +# CONFIG_FB is not set +# CONFIG_BACKLIGHT_LCD_SUPPORT is not set + +# +# Display device support +# +# CONFIG_DISPLAY_SUPPORT is not set + +# +# Console display driver support +# +# CONFIG_VGA_CONSOLE is not set +CONFIG_DUMMY_CONSOLE=y +# CONFIG_SOUND is not set +# CONFIG_HID_SUPPORT is not set +# CONFIG_USB_SUPPORT is not set +# CONFIG_MMC is not set +# CONFIG_MEMSTICK is not set +# CONFIG_NEW_LEDS is not set +# CONFIG_ACCESSIBILITY is not set +# CONFIG_RTC_CLASS is not set +# CONFIG_AUXDISPLAY is not set +# CONFIG_UIO is not set +# CONFIG_STAGING is not set + +# +# File systems +# +CONFIG_EXT2_FS=y +CONFIG_EXT2_FS_XATTR=y +CONFIG_EXT2_FS_POSIX_ACL=y +# CONFIG_EXT2_FS_SECURITY is not set +# CONFIG_EXT2_FS_XIP is not set +CONFIG_EXT3_FS=y +# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set +CONFIG_EXT3_FS_XATTR=y +CONFIG_EXT3_FS_POSIX_ACL=y +# CONFIG_EXT3_FS_SECURITY is not set +# CONFIG_EXT4_FS is not set +CONFIG_JBD=y +CONFIG_FS_MBCACHE=y +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +CONFIG_FS_POSIX_ACL=y +CONFIG_FILE_LOCKING=y +# CONFIG_XFS_FS is not set +# CONFIG_GFS2_FS is not set +# CONFIG_OCFS2_FS is not set +# CONFIG_BTRFS_FS is not set +CONFIG_DNOTIFY=y +CONFIG_INOTIFY=y +CONFIG_INOTIFY_USER=y +# CONFIG_QUOTA is not set +CONFIG_AUTOFS_FS=y +CONFIG_AUTOFS4_FS=y +# CONFIG_FUSE_FS is not set +CONFIG_GENERIC_ACL=y + +# +# Caches +# +# CONFIG_FSCACHE is not set + +# +# CD-ROM/DVD Filesystems +# +# CONFIG_ISO9660_FS is not set +# CONFIG_UDF_FS is not set + +# +# DOS/FAT/NT Filesystems +# +# CONFIG_MSDOS_FS is not set +# CONFIG_VFAT_FS is not set +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_PROC_SYSCTL=y +# CONFIG_PROC_PAGE_MONITOR is not set +CONFIG_SYSFS=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +# CONFIG_HUGETLB_PAGE is not set +# CONFIG_CONFIGFS_FS is not set +CONFIG_MISC_FILESYSTEMS=y +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_ECRYPT_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_CRAMFS is not set +# CONFIG_SQUASHFS is not set +# CONFIG_VXFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_OMFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_ROMFS_FS is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set +# CONFIG_NILFS2_FS is not set +CONFIG_NETWORK_FILESYSTEMS=y +CONFIG_NFS_FS=y +CONFIG_NFS_V3=y +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=y +CONFIG_NFSD=y +CONFIG_NFSD_V2_ACL=y +CONFIG_NFSD_V3=y +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +CONFIG_LOCKD=y +CONFIG_LOCKD_V4=y +CONFIG_EXPORTFS=y +CONFIG_NFS_ACL_SUPPORT=y +CONFIG_NFS_COMMON=y +CONFIG_SUNRPC=y +CONFIG_SUNRPC_GSS=y +CONFIG_RPCSEC_GSS_KRB5=y +# CONFIG_RPCSEC_GSS_SPKM3 is not set +# CONFIG_SMB_FS is not set +# CONFIG_CIFS is not set +# CONFIG_NCP_FS is not set +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set + +# +# Partition Types +# +# CONFIG_PARTITION_ADVANCED is not set +CONFIG_MSDOS_PARTITION=y +# CONFIG_NLS is not set +# CONFIG_DLM is not set + +# +# Kernel hacking +# +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +# CONFIG_PRINTK_TIME is not set +CONFIG_ENABLE_WARN_DEPRECATED=y +CONFIG_ENABLE_MUST_CHECK=y +CONFIG_FRAME_WARN=1024 +# CONFIG_MAGIC_SYSRQ is not set +# CONFIG_UNUSED_SYMBOLS is not set +# CONFIG_DEBUG_FS is not set +# CONFIG_HEADERS_CHECK is not set +# CONFIG_DEBUG_KERNEL is not set +# CONFIG_DEBUG_MEMORY_INIT is not set +# CONFIG_RCU_CPU_STALL_DETECTOR is not set +# CONFIG_SYSCTL_SYSCALL_CHECK is not set +CONFIG_TRACING_SUPPORT=y + +# +# Tracers +# +# CONFIG_IRQSOFF_TRACER is not set +# CONFIG_SCHED_TRACER is not set +# CONFIG_CONTEXT_SWITCH_TRACER is not set +# CONFIG_EVENT_TRACER is not set +# CONFIG_BOOT_TRACER is not set +# CONFIG_TRACE_BRANCH_PROFILING is not set +# CONFIG_KMEMTRACE is not set +# CONFIG_WORKQUEUE_TRACER is not set +# CONFIG_BLK_DEV_IO_TRACE is not set +# CONFIG_SAMPLES is not set +CONFIG_CMDLINE="" + +# +# Security options +# +CONFIG_KEYS=y +CONFIG_KEYS_DEBUG_PROC_KEYS=y +CONFIG_SECURITY=y +# CONFIG_SECURITYFS is not set +CONFIG_SECURITY_NETWORK=y +# CONFIG_SECURITY_NETWORK_XFRM is not set +# CONFIG_SECURITY_PATH is not set +CONFIG_SECURITY_FILE_CAPABILITIES=y +CONFIG_SECURITY_DEFAULT_MMAP_MIN_ADDR=0 +# CONFIG_SECURITY_TOMOYO is not set +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +# CONFIG_CRYPTO_FIPS is not set +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_BLKCIPHER=y +CONFIG_CRYPTO_BLKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_PCOMP=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +# CONFIG_CRYPTO_GF128MUL is not set +CONFIG_CRYPTO_NULL=y +CONFIG_CRYPTO_WORKQUEUE=y +CONFIG_CRYPTO_CRYPTD=y +# CONFIG_CRYPTO_AUTHENC is not set +# CONFIG_CRYPTO_TEST is not set + +# +# Authenticated Encryption with Associated Data +# +# CONFIG_CRYPTO_CCM is not set +# CONFIG_CRYPTO_GCM is not set +CONFIG_CRYPTO_SEQIV=y + +# +# Block modes +# +CONFIG_CRYPTO_CBC=y +# CONFIG_CRYPTO_CTR is not set +# CONFIG_CRYPTO_CTS is not set +# CONFIG_CRYPTO_ECB is not set +# CONFIG_CRYPTO_LRW is not set +# CONFIG_CRYPTO_PCBC is not set +# CONFIG_CRYPTO_XTS is not set + +# +# Hash modes +# +# CONFIG_CRYPTO_HMAC is not set +# CONFIG_CRYPTO_XCBC is not set + +# +# Digest +# +CONFIG_CRYPTO_CRC32C=y +CONFIG_CRYPTO_MD4=y +CONFIG_CRYPTO_MD5=y +CONFIG_CRYPTO_MICHAEL_MIC=y +# CONFIG_CRYPTO_RMD128 is not set +# CONFIG_CRYPTO_RMD160 is not set +# CONFIG_CRYPTO_RMD256 is not set +# CONFIG_CRYPTO_RMD320 is not set +# CONFIG_CRYPTO_SHA1 is not set +# CONFIG_CRYPTO_SHA256 is not set +# CONFIG_CRYPTO_SHA512 is not set +# CONFIG_CRYPTO_TGR192 is not set +# CONFIG_CRYPTO_WP512 is not set + +# +# Ciphers +# +# CONFIG_CRYPTO_AES is not set +# CONFIG_CRYPTO_ANUBIS is not set +# CONFIG_CRYPTO_ARC4 is not set +# CONFIG_CRYPTO_BLOWFISH is not set +# CONFIG_CRYPTO_CAMELLIA is not set +# CONFIG_CRYPTO_CAST5 is not set +# CONFIG_CRYPTO_CAST6 is not set +CONFIG_CRYPTO_DES=y +# CONFIG_CRYPTO_FCRYPT is not set +# CONFIG_CRYPTO_KHAZAD is not set +# CONFIG_CRYPTO_SALSA20 is not set +# CONFIG_CRYPTO_SEED is not set +# CONFIG_CRYPTO_SERPENT is not set +# CONFIG_CRYPTO_TEA is not set +# CONFIG_CRYPTO_TWOFISH is not set + +# +# Compression +# +# CONFIG_CRYPTO_DEFLATE is not set +# CONFIG_CRYPTO_ZLIB is not set +# CONFIG_CRYPTO_LZO is not set + +# +# Random Number Generation +# +# CONFIG_CRYPTO_ANSI_CPRNG is not set +# CONFIG_CRYPTO_HW is not set +# CONFIG_BINARY_PRINTF is not set + +# +# Library routines +# +CONFIG_BITREVERSE=y +CONFIG_GENERIC_FIND_LAST_BIT=y +CONFIG_CRC_CCITT=y +CONFIG_CRC16=y +# CONFIG_CRC_T10DIF is not set +# CONFIG_CRC_ITU_T is not set +CONFIG_CRC32=y +# CONFIG_CRC7 is not set +CONFIG_LIBCRC32C=y +CONFIG_ZLIB_INFLATE=y +CONFIG_DECOMPRESS_GZIP=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT=y +CONFIG_NLATTR=y diff --git a/arch/score/include/asm/Kbuild b/arch/score/include/asm/Kbuild new file mode 100644 index 0000000..b367abd --- /dev/null +++ b/arch/score/include/asm/Kbuild @@ -0,0 +1,3 @@ +include include/asm-generic/Kbuild.asm + +header-y += diff --git a/arch/score/include/asm/asmmacro.h b/arch/score/include/asm/asmmacro.h new file mode 100644 index 0000000..a04a54c --- /dev/null +++ b/arch/score/include/asm/asmmacro.h @@ -0,0 +1,161 @@ +#ifndef _ASM_SCORE_ASMMACRO_H +#define _ASM_SCORE_ASMMACRO_H + +#include + +#ifdef __ASSEMBLY__ + +.macro SAVE_ALL + mfcr r30, cr0 + mv r31, r0 + nop + /* if UMs == 1, change stack. */ + slli.c r30, r30, 28 + bpl 1f + la r31, kernelsp + lw r31, [r31] +1: + mv r30, r0 + addri r0, r31, -PT_SIZE + + sw r30, [r0, PT_R0] + .set r1 + sw r1, [r0, PT_R1] + .set nor1 + sw r2, [r0, PT_R2] + sw r3, [r0, PT_R3] + sw r4, [r0, PT_R4] + sw r5, [r0, PT_R5] + sw r6, [r0, PT_R6] + sw r7, [r0, PT_R7] + + sw r8, [r0, PT_R8] + sw r9, [r0, PT_R9] + sw r10, [r0, PT_R10] + sw r11, [r0, PT_R11] + sw r12, [r0, PT_R12] + sw r13, [r0, PT_R13] + sw r14, [r0, PT_R14] + sw r15, [r0, PT_R15] + + sw r16, [r0, PT_R16] + sw r17, [r0, PT_R17] + sw r18, [r0, PT_R18] + sw r19, [r0, PT_R19] + sw r20, [r0, PT_R20] + sw r21, [r0, PT_R21] + sw r22, [r0, PT_R22] + sw r23, [r0, PT_R23] + + sw r24, [r0, PT_R24] + sw r25, [r0, PT_R25] + sw r25, [r0, PT_R25] + sw r26, [r0, PT_R26] + sw r27, [r0, PT_R27] + + sw r28, [r0, PT_R28] + sw r29, [r0, PT_R29] + orri r28, r0, 0x1fff + li r31, 0x00001fff + xor r28, r28, r31 + + mfcehl r30, r31 + sw r30, [r0, PT_CEH] + sw r31, [r0, PT_CEL] + + mfcr r31, cr0 + sw r31, [r0, PT_PSR] + + mfcr r31, cr1 + sw r31, [r0, PT_CONDITION] + + mfcr r31, cr2 + sw r31, [r0, PT_ECR] + + mfcr r31, cr5 + srli r31, r31, 1 + slli r31, r31, 1 + sw r31, [r0, PT_EPC] +.endm + +.macro RESTORE_ALL_AND_RET + mfcr r30, cr0 + srli r30, r30, 1 + slli r30, r30, 1 + mtcr r30, cr0 + nop + nop + nop + nop + nop + + .set r1 + ldis r1, 0x00ff + and r30, r30, r1 + not r1, r1 + lw r31, [r0, PT_PSR] + and r31, r31, r1 + .set nor1 + or r31, r31, r30 + mtcr r31, cr0 + nop + nop + nop + nop + nop + + lw r30, [r0, PT_CONDITION] + mtcr r30, cr1 + nop + nop + nop + nop + nop + + lw r30, [r0, PT_CEH] + lw r31, [r0, PT_CEL] + mtcehl r30, r31 + + .set r1 + lw r1, [r0, PT_R1] + .set nor1 + lw r2, [r0, PT_R2] + lw r3, [r0, PT_R3] + lw r4, [r0, PT_R4] + lw r5, [r0, PT_R5] + lw r6, [r0, PT_R6] + lw r7, [r0, PT_R7] + + lw r8, [r0, PT_R8] + lw r9, [r0, PT_R9] + lw r10, [r0, PT_R10] + lw r11, [r0, PT_R11] + lw r12, [r0, PT_R12] + lw r13, [r0, PT_R13] + lw r14, [r0, PT_R14] + lw r15, [r0, PT_R15] + + lw r16, [r0, PT_R16] + lw r17, [r0, PT_R17] + lw r18, [r0, PT_R18] + lw r19, [r0, PT_R19] + lw r20, [r0, PT_R20] + lw r21, [r0, PT_R21] + lw r22, [r0, PT_R22] + lw r23, [r0, PT_R23] + + lw r24, [r0, PT_R24] + lw r25, [r0, PT_R25] + lw r26, [r0, PT_R26] + lw r27, [r0, PT_R27] + lw r28, [r0, PT_R28] + lw r29, [r0, PT_R29] + + lw r30, [r0, PT_EPC] + lw r0, [r0, PT_R0] + mtcr r30, cr5 + rte +.endm + +#endif /* __ASSEMBLY__ */ +#endif /* _ASM_SCORE_ASMMACRO_H */ diff --git a/arch/score/include/asm/atomic.h b/arch/score/include/asm/atomic.h new file mode 100644 index 0000000..84eb8dd --- /dev/null +++ b/arch/score/include/asm/atomic.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_ATOMIC_H +#define _ASM_SCORE_ATOMIC_H + +#include + +#endif /* _ASM_SCORE_ATOMIC_H */ diff --git a/arch/score/include/asm/auxvec.h b/arch/score/include/asm/auxvec.h new file mode 100644 index 0000000..f691515 --- /dev/null +++ b/arch/score/include/asm/auxvec.h @@ -0,0 +1,4 @@ +#ifndef _ASM_SCORE_AUXVEC_H +#define _ASM_SCORE_AUXVEC_H + +#endif /* _ASM_SCORE_AUXVEC_H */ diff --git a/arch/score/include/asm/bitops.h b/arch/score/include/asm/bitops.h new file mode 100644 index 0000000..2763b05 --- /dev/null +++ b/arch/score/include/asm/bitops.h @@ -0,0 +1,16 @@ +#ifndef _ASM_SCORE_BITOPS_H +#define _ASM_SCORE_BITOPS_H + +#include /* swab32 */ +#include /* save_flags */ + +/* + * clear_bit() doesn't provide any barrier for the compiler. + */ +#define smp_mb__before_clear_bit() barrier() +#define smp_mb__after_clear_bit() barrier() + +#include +#include + +#endif /* _ASM_SCORE_BITOPS_H */ diff --git a/arch/score/include/asm/bitsperlong.h b/arch/score/include/asm/bitsperlong.h new file mode 100644 index 0000000..86ff337 --- /dev/null +++ b/arch/score/include/asm/bitsperlong.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_BITSPERLONG_H +#define _ASM_SCORE_BITSPERLONG_H + +#include + +#endif /* _ASM_SCORE_BITSPERLONG_H */ diff --git a/arch/score/include/asm/bug.h b/arch/score/include/asm/bug.h new file mode 100644 index 0000000..bb76a33 --- /dev/null +++ b/arch/score/include/asm/bug.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_BUG_H +#define _ASM_SCORE_BUG_H + +#include + +#endif /* _ASM_SCORE_BUG_H */ diff --git a/arch/score/include/asm/bugs.h b/arch/score/include/asm/bugs.h new file mode 100644 index 0000000..a062e10 --- /dev/null +++ b/arch/score/include/asm/bugs.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_BUGS_H +#define _ASM_SCORE_BUGS_H + +#include + +#endif /* _ASM_SCORE_BUGS_H */ diff --git a/arch/score/include/asm/byteorder.h b/arch/score/include/asm/byteorder.h new file mode 100644 index 0000000..88cbebc --- /dev/null +++ b/arch/score/include/asm/byteorder.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_BYTEORDER_H +#define _ASM_SCORE_BYTEORDER_H + +#include + +#endif /* _ASM_SCORE_BYTEORDER_H */ diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h new file mode 100644 index 0000000..ae3d59f --- /dev/null +++ b/arch/score/include/asm/cache.h @@ -0,0 +1,7 @@ +#ifndef _ASM_SCORE_CACHE_H +#define _ASM_SCORE_CACHE_H + +#define L1_CACHE_SHIFT 4 +#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) + +#endif /* _ASM_SCORE_CACHE_H */ diff --git a/arch/score/include/asm/cacheflush.h b/arch/score/include/asm/cacheflush.h new file mode 100644 index 0000000..1c74628 --- /dev/null +++ b/arch/score/include/asm/cacheflush.h @@ -0,0 +1,47 @@ +#ifndef _ASM_SCORE_CACHEFLUSH_H +#define _ASM_SCORE_CACHEFLUSH_H + +/* Keep includes the same across arches. */ +#include + +extern void (*flush_cache_all)(void); +extern void (*flush_cache_mm)(struct mm_struct *mm); +extern void (*flush_cache_range)(struct vm_area_struct *vma, + unsigned long start, unsigned long end); +extern void (*flush_cache_page)(struct vm_area_struct *vma, + unsigned long page, unsigned long pfn); +extern void (*flush_cache_sigtramp)(unsigned long addr); +extern void (*flush_icache_all)(void); +extern void (*flush_icache_range)(unsigned long start, unsigned long end); +extern void (*flush_data_cache_page)(unsigned long addr); + +extern void s7_flush_cache_all(void); + +#define flush_cache_dup_mm(mm) do {} while (0) +#define flush_dcache_page(page) do {} while (0) +#define flush_dcache_mmap_lock(mapping) do {} while (0) +#define flush_dcache_mmap_unlock(mapping) do {} while (0) +#define flush_cache_vmap(start, end) do {} while (0) +#define flush_cache_vunmap(start, end) do {} while (0) + +static inline void flush_icache_page(struct vm_area_struct *vma, + struct page *page) +{ + if (vma->vm_flags & VM_EXEC) { + void *v = page_address(page); + flush_icache_range((unsigned long) v, + (unsigned long) v + PAGE_SIZE); + } +} + +#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ + memcpy(dst, src, len) + +#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ + do { \ + memcpy(dst, src, len); \ + if ((vma->vm_flags & VM_EXEC)) \ + flush_cache_page(vma, vaddr, page_to_pfn(page));\ + } while (0) + +#endif /* _ASM_SCORE_CACHEFLUSH_H */ diff --git a/arch/score/include/asm/checksum.h b/arch/score/include/asm/checksum.h new file mode 100644 index 0000000..f909ac3 --- /dev/null +++ b/arch/score/include/asm/checksum.h @@ -0,0 +1,235 @@ +#ifndef _ASM_SCORE_CHECKSUM_H +#define _ASM_SCORE_CHECKSUM_H + +#include +#include + +/* + * computes the checksum of a memory block at buff, length len, + * and adds in "sum" (32-bit) + * + * returns a 32-bit number suitable for feeding into itself + * or csum_tcpudp_magic + * + * this function must be called with even lengths, except + * for the last fragment, which may be odd + * + * it's best to have buff aligned on a 32-bit boundary + */ +unsigned int csum_partial(const void *buff, int len, __wsum sum); +unsigned int csum_partial_copy_from_user(const char *src, char *dst, int len, + unsigned int sum, int *csum_err); +unsigned int csum_partial_copy(const char *src, char *dst, + int len, unsigned int sum); + +/* + * this is a new version of the above that records errors it finds in *errp, + * but continues and zeros the rest of the buffer. + */ + +/* + * Copy and checksum to user + */ +#define HAVE_CSUM_COPY_USER +static inline +__wsum csum_and_copy_to_user(const void *src, void __user *dst, int len, + __wsum sum, int *err_ptr) +{ + sum = csum_partial(src, len, sum); + if (copy_to_user(dst, src, len)) { + *err_ptr = -EFAULT; + return (__force __wsum) -1; /* invalid checksum */ + } + return sum; +} + + +#define csum_partial_copy_nocheck csum_partial_copy +/* + * Fold a partial checksum without adding pseudo headers + */ + +static inline __sum16 csum_fold(__wsum sum) +{ + /* the while loop is unnecessary really, it's always enough with two + iterations */ + __asm__ __volatile__( + ".set volatile\n\t" + ".set\tr1\n\t" + "slli\tr1,%0, 16\n\t" + "add\t%0,%0, r1\n\t" + "cmp.c\tr1, %0\n\t" + "srli\t%0, %0, 16\n\t" + "bleu\t1f\n\t" + "addi\t%0, 0x1\n\t" + "1:ldi\tr30, 0xffff\n\t" + "xor\t%0, %0, r30\n\t" + "slli\t%0, %0, 16\n\t" + "srli\t%0, %0, 16\n\t" + ".set\tnor1\n\t" + ".set optimize\n\t" + : "=r" (sum) + : "0" (sum)); + return sum; +} + +/* + * This is a version of ip_compute_csum() optimized for IP headers, + * which always checksum on 4 octet boundaries. + * + * By Jorge Cwik , adapted for linux by + * Arnt Gulbrandsen. + */ +static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) +{ + unsigned int sum; + unsigned long dummy; + + __asm__ __volatile__( + ".set volatile\n\t" + ".set\tnor1\n\t" + "lw\t%0, [%1]\n\t" + "subri\t%2, %2, 4\n\t" + "slli\t%2, %2, 2\n\t" + "lw\t%3, [%1, 4]\n\t" + "add\t%2, %2, %1\n\t" + "add\t%0, %0, %3\n\t" + "cmp.c\t%3, %0\n\t" + "lw\t%3, [%1, 8]\n\t" + "bleu\t1f\n\t" + "addi\t%0, 0x1\n\t" + "1:\n\t" + "add\t%0, %0, %3\n\t" + "cmp.c\t%3, %0\n\t" + "lw\t%3, [%1, 12]\n\t" + "bleu\t1f\n\t" + "addi\t%0, 0x1\n\t" + "1:add\t%0, %0, %3\n\t" + "cmp.c\t%3, %0\n\t" + "bleu\t1f\n\t" + "addi\t%0, 0x1\n" + + "1:\tlw\t%3, [%1, 16]\n\t" + "addi\t%1, 4\n\t" + "add\t%0, %0, %3\n\t" + "cmp.c\t%3, %0\n\t" + "bleu\t2f\n\t" + "addi\t%0, 0x1\n" + "2:cmp.c\t%2, %1\n\t" + "bne\t1b\n\t" + + ".set\tr1\n\t" + ".set optimize\n\t" + : "=&r" (sum), "=&r" (iph), "=&r" (ihl), "=&r" (dummy) + : "1" (iph), "2" (ihl)); + + return csum_fold(sum); +} + +static inline __wsum +csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len, + unsigned short proto, __wsum sum) +{ + unsigned long tmp = (ntohs(len) << 16) + proto * 256; + __asm__ __volatile__( + ".set volatile\n\t" + "add\t%0, %0, %2\n\t" + "cmp.c\t%2, %0\n\t" + "bleu\t1f\n\t" + "addi\t%0, 0x1\n\t" + "1:\n\t" + "add\t%0, %0, %3\n\t" + "cmp.c\t%3, %0\n\t" + "bleu\t1f\n\t" + "addi\t%0, 0x1\n\t" + "1:\n\t" + "add\t%0, %0, %4\n\t" + "cmp.c\t%4, %0\n\t" + "bleu\t1f\n\t" + "addi\t%0, 0x1\n\t" + "1:\n\t" + ".set optimize\n\t" + : "=r" (sum) + : "0" (daddr), "r"(saddr), + "r" (tmp), + "r" (sum)); + return sum; +} + +/* + * computes the checksum of the TCP/UDP pseudo-header + * returns a 16-bit checksum, already complemented + */ +static inline __sum16 +csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len, + unsigned short proto, __wsum sum) +{ + return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum)); +} + +/* + * this routine is used for miscellaneous IP-like checksums, mainly + * in icmp.c + */ + +static inline unsigned short ip_compute_csum(const void *buff, int len) +{ + return csum_fold(csum_partial(buff, len, 0)); +} + +#define _HAVE_ARCH_IPV6_CSUM +static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr, + const struct in6_addr *daddr, + __u32 len, unsigned short proto, + __wsum sum) +{ + __asm__ __volatile__( + ".set\tnoreorder\t\t\t# csum_ipv6_magic\n\t" + ".set\tnoat\n\t" + "addu\t%0, %5\t\t\t# proto (long in network byte order)\n\t" + "sltu\t$1, %0, %5\n\t" + "addu\t%0, $1\n\t" + "addu\t%0, %6\t\t\t# csum\n\t" + "sltu\t$1, %0, %6\n\t" + "lw\t%1, 0(%2)\t\t\t# four words source address\n\t" + "addu\t%0, $1\n\t" + "addu\t%0, %1\n\t" + "sltu\t$1, %0, %1\n\t" + "lw\t%1, 4(%2)\n\t" + "addu\t%0, $1\n\t" + "addu\t%0, %1\n\t" + "sltu\t$1, %0, %1\n\t" + "lw\t%1, 8(%2)\n\t" + "addu\t%0, $1\n\t" + "addu\t%0, %1\n\t" + "sltu\t$1, %0, %1\n\t" + "lw\t%1, 12(%2)\n\t" + "addu\t%0, $1\n\t" + "addu\t%0, %1\n\t" + "sltu\t$1, %0, %1\n\t" + "lw\t%1, 0(%3)\n\t" + "addu\t%0, $1\n\t" + "addu\t%0, %1\n\t" + "sltu\t$1, %0, %1\n\t" + "lw\t%1, 4(%3)\n\t" + "addu\t%0, $1\n\t" + "addu\t%0, %1\n\t" + "sltu\t$1, %0, %1\n\t" + "lw\t%1, 8(%3)\n\t" + "addu\t%0, $1\n\t" + "addu\t%0, %1\n\t" + "sltu\t$1, %0, %1\n\t" + "lw\t%1, 12(%3)\n\t" + "addu\t%0, $1\n\t" + "addu\t%0, %1\n\t" + "sltu\t$1, %0, %1\n\t" + "addu\t%0, $1\t\t\t# Add final carry\n\t" + ".set\tnoat\n\t" + ".set\tnoreorder" + : "=r" (sum), "=r" (proto) + : "r" (saddr), "r" (daddr), + "0" (htonl(len)), "1" (htonl(proto)), "r" (sum)); + + return csum_fold(sum); +} +#endif /* _ASM_SCORE_CHECKSUM_H */ diff --git a/arch/score/include/asm/cputime.h b/arch/score/include/asm/cputime.h new file mode 100644 index 0000000..1fced99 --- /dev/null +++ b/arch/score/include/asm/cputime.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_CPUTIME_H +#define _ASM_SCORE_CPUTIME_H + +#include + +#endif /* _ASM_SCORE_CPUTIME_H */ diff --git a/arch/score/include/asm/current.h b/arch/score/include/asm/current.h new file mode 100644 index 0000000..16eae9c --- /dev/null +++ b/arch/score/include/asm/current.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_CURRENT_H +#define _ASM_SCORE_CURRENT_H + +#include + +#endif /* _ASM_SCORE_CURRENT_H */ diff --git a/arch/score/include/asm/delay.h b/arch/score/include/asm/delay.h new file mode 100644 index 0000000..ad716f6 --- /dev/null +++ b/arch/score/include/asm/delay.h @@ -0,0 +1,21 @@ +#ifndef _ASM_SCORE_DELAY_H +#define _ASM_SCORE_DELAY_H + +static inline void __delay(unsigned long loops) +{ + __asm__ __volatile__ ( + "1:\tsubi\t%0,1\n\t" + "cmpz.c\t%0\n\t" + "bne\t1b\n\t" + : "=r" (loops) + : "0" (loops)); +} + +static inline void __udelay(unsigned long usecs) +{ + __delay(usecs); +} + +#define udelay(usecs) __udelay(usecs) + +#endif /* _ASM_SCORE_DELAY_H */ diff --git a/arch/score/include/asm/device.h b/arch/score/include/asm/device.h new file mode 100644 index 0000000..2dc7cc5 --- /dev/null +++ b/arch/score/include/asm/device.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_DEVICE_H +#define _ASM_SCORE_DEVICE_H + +#include + +#endif /* _ASM_SCORE_DEVICE_H */ diff --git a/arch/score/include/asm/div64.h b/arch/score/include/asm/div64.h new file mode 100644 index 0000000..75fae19 --- /dev/null +++ b/arch/score/include/asm/div64.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_DIV64_H +#define _ASM_SCORE_DIV64_H + +#include + +#endif /* _ASM_SCORE_DIV64_H */ diff --git a/arch/score/include/asm/dma-mapping.h b/arch/score/include/asm/dma-mapping.h new file mode 100644 index 0000000..f9c0193 --- /dev/null +++ b/arch/score/include/asm/dma-mapping.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_DMA_MAPPING_H +#define _ASM_SCORE_DMA_MAPPING_H + +#include + +#endif /* _ASM_SCORE_DMA_MAPPING_H */ diff --git a/arch/score/include/asm/dma.h b/arch/score/include/asm/dma.h new file mode 100644 index 0000000..9f44185 --- /dev/null +++ b/arch/score/include/asm/dma.h @@ -0,0 +1,8 @@ +#ifndef _ASM_SCORE_DMA_H +#define _ASM_SCORE_DMA_H + +#include + +#define MAX_DMA_ADDRESS (0) + +#endif /* _ASM_SCORE_DMA_H */ diff --git a/arch/score/include/asm/elf.h b/arch/score/include/asm/elf.h new file mode 100644 index 0000000..8324363 --- /dev/null +++ b/arch/score/include/asm/elf.h @@ -0,0 +1,99 @@ +#ifndef _ASM_SCORE_ELF_H +#define _ASM_SCORE_ELF_H + +/* ELF register definitions */ +#define ELF_NGREG 45 +#define ELF_NFPREG 33 +#define EM_SCORE7 135 + +/* Relocation types. */ +#define R_SCORE_NONE 0 +#define R_SCORE_HI16 1 +#define R_SCORE_LO16 2 +#define R_SCORE_BCMP 3 +#define R_SCORE_24 4 +#define R_SCORE_PC19 5 +#define R_SCORE16_11 6 +#define R_SCORE16_PC8 7 +#define R_SCORE_ABS32 8 +#define R_SCORE_ABS16 9 +#define R_SCORE_DUMMY2 10 +#define R_SCORE_GP15 11 +#define R_SCORE_GNU_VTINHERIT 12 +#define R_SCORE_GNU_VTENTRY 13 +#define R_SCORE_GOT15 14 +#define R_SCORE_GOT_LO16 15 +#define R_SCORE_CALL15 16 +#define R_SCORE_GPREL32 17 +#define R_SCORE_REL32 18 +#define R_SCORE_DUMMY_HI16 19 +#define R_SCORE_IMM30 20 +#define R_SCORE_IMM32 21 + +typedef unsigned long elf_greg_t; +typedef elf_greg_t elf_gregset_t[ELF_NGREG]; + +typedef double elf_fpreg_t; +typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; + +#define elf_check_arch(x) ((x)->e_machine == EM_SCORE7) + +/* + * These are used to set parameters in the core dumps. + */ +#define ELF_CLASS ELFCLASS32 + +/* + * These are used to set parameters in the core dumps. + */ +#define ELF_DATA ELFDATA2LSB +#define ELF_ARCH EM_SCORE7 + +#define SET_PERSONALITY(ex) \ +do { \ + set_personality(PER_LINUX); \ +} while (0) + +struct task_struct; +struct pt_regs; + +#define USE_ELF_CORE_DUMP +#define ELF_EXEC_PAGESIZE PAGE_SIZE + +/* This yields a mask that user programs can use to figure out what + instruction set this cpu supports. This could be done in userspace, + but it's not easy, and we've already done it here. */ + +#define ELF_HWCAP (0) + +/* This yields a string that ld.so will use to load implementation + specific libraries for optimization. This is more specific in + intent than poking at uname or /proc/cpuinfo. + + For the moment, we have only optimizations for the Intel generations, + but that could change... */ + +#define ELF_PLATFORM (NULL) + +#define ELF_PLAT_INIT(_r, load_addr) \ +do { \ + _r->regs[1] = _r->regs[2] = _r->regs[3] = _r->regs[4] = 0; \ + _r->regs[5] = _r->regs[6] = _r->regs[7] = _r->regs[8] = 0; \ + _r->regs[9] = _r->regs[10] = _r->regs[11] = _r->regs[12] = 0; \ + _r->regs[13] = _r->regs[14] = _r->regs[15] = _r->regs[16] = 0; \ + _r->regs[17] = _r->regs[18] = _r->regs[19] = _r->regs[20] = 0; \ + _r->regs[21] = _r->regs[22] = _r->regs[23] = _r->regs[24] = 0; \ + _r->regs[25] = _r->regs[26] = _r->regs[27] = _r->regs[28] = 0; \ + _r->regs[30] = _r->regs[31] = 0; \ +} while (0) + +/* This is the location that an ET_DYN program is loaded if exec'ed. Typical + use of this is to invoke "./ld.so someprog" to test out a new version of + the loader. We need to make sure that it is out of the way of the program + that it will "exec", and that there is sufficient room for the brk. */ + +#ifndef ELF_ET_DYN_BASE +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) +#endif + +#endif /* _ASM_SCORE_ELF_H */ diff --git a/arch/score/include/asm/emergency-restart.h b/arch/score/include/asm/emergency-restart.h new file mode 100644 index 0000000..ca31e98 --- /dev/null +++ b/arch/score/include/asm/emergency-restart.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_EMERGENCY_RESTART_H +#define _ASM_SCORE_EMERGENCY_RESTART_H + +#include + +#endif /* _ASM_SCORE_EMERGENCY_RESTART_H */ diff --git a/arch/score/include/asm/errno.h b/arch/score/include/asm/errno.h new file mode 100644 index 0000000..7cd3e1f --- /dev/null +++ b/arch/score/include/asm/errno.h @@ -0,0 +1,7 @@ +#ifndef _ASM_SCORE_ERRNO_H +#define _ASM_SCORE_ERRNO_H + +#include +#define EMAXERRNO 1024 + +#endif /* _ASM_SCORE_ERRNO_H */ diff --git a/arch/score/include/asm/fcntl.h b/arch/score/include/asm/fcntl.h new file mode 100644 index 0000000..03968a3 --- /dev/null +++ b/arch/score/include/asm/fcntl.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_FCNTL_H +#define _ASM_SCORE_FCNTL_H + +#include + +#endif /* _ASM_SCORE_FCNTL_H */ diff --git a/arch/score/include/asm/fixmap.h b/arch/score/include/asm/fixmap.h new file mode 100644 index 0000000..ee16766 --- /dev/null +++ b/arch/score/include/asm/fixmap.h @@ -0,0 +1,82 @@ +#ifndef _ASM_SCORE_FIXMAP_H +#define _ASM_SCORE_FIXMAP_H + +#include + +#define PHY_RAM_BASE 0x00000000 +#define PHY_IO_BASE 0x10000000 + +#define VIRTUAL_RAM_BASE 0xa0000000 +#define VIRTUAL_IO_BASE 0xb0000000 + +#define RAM_SPACE_SIZE 0x10000000 +#define IO_SPACE_SIZE 0x10000000 + +/* Kernel unmapped, cached 512MB */ +#define KSEG1 0xa0000000 + +/* + * Here we define all the compile-time 'special' virtual + * addresses. The point is to have a constant address at + * compile time, but to set the physical address only + * in the boot process. We allocate these special addresses + * from the end of virtual memory (0xfffff000) backwards. + * Also this lets us do fail-safe vmalloc(), we + * can guarantee that these special addresses and + * vmalloc()-ed addresses never overlap. + * + * these 'compile-time allocated' memory buffers are + * fixed-size 4k pages. (or larger if used with an increment + * highger than 1) use fixmap_set(idx,phys) to associate + * physical memory with fixmap indices. + * + * TLB entries of such buffers will not be flushed across + * task switches. + */ + +/* + * on UP currently we will have no trace of the fixmap mechanizm, + * no page table allocations, etc. This might change in the + * future, say framebuffers for the console driver(s) could be + * fix-mapped? + */ +enum fixed_addresses { +#define FIX_N_COLOURS 8 + FIX_CMAP_BEGIN, + FIX_CMAP_END = FIX_CMAP_BEGIN + FIX_N_COLOURS, + __end_of_fixed_addresses +}; + +/* + * used by vmalloc.c. + * + * Leave one empty page between vmalloc'ed areas and + * the start of the fixmap, and leave one page empty + * at the top of mem.. + */ +#define FIXADDR_TOP ((unsigned long)(long)(int)0xfefe0000) +#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) +#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) + +#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT)) +#define __virt_to_fix(x) \ + ((FIXADDR_TOP - ((x) & PAGE_MASK)) >> PAGE_SHIFT) + +extern void __this_fixmap_does_not_exist(void); + +/* + * 'index to address' translation. If anyone tries to use the idx + * directly without tranlation, we catch the bug with a NULL-deference + * kernel oops. Illegal ranges of incoming indices are caught too. + */ +static inline unsigned long fix_to_virt(const unsigned int idx) +{ + return __fix_to_virt(idx); +} + +static inline unsigned long virt_to_fix(const unsigned long vaddr) +{ + return __virt_to_fix(vaddr); +} + +#endif /* _ASM_SCORE_FIXMAP_H */ diff --git a/arch/score/include/asm/ftrace.h b/arch/score/include/asm/ftrace.h new file mode 100644 index 0000000..79d6f10 --- /dev/null +++ b/arch/score/include/asm/ftrace.h @@ -0,0 +1,4 @@ +#ifndef _ASM_SCORE_FTRACE_H +#define _ASM_SCORE_FTRACE_H + +#endif /* _ASM_SCORE_FTRACE_H */ diff --git a/arch/score/include/asm/futex.h b/arch/score/include/asm/futex.h new file mode 100644 index 0000000..1dca242 --- /dev/null +++ b/arch/score/include/asm/futex.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_FUTEX_H +#define _ASM_SCORE_FUTEX_H + +#include + +#endif /* _ASM_SCORE_FUTEX_H */ diff --git a/arch/score/include/asm/hardirq.h b/arch/score/include/asm/hardirq.h new file mode 100644 index 0000000..dc932c5 --- /dev/null +++ b/arch/score/include/asm/hardirq.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_HARDIRQ_H +#define _ASM_SCORE_HARDIRQ_H + +#include + +#endif /* _ASM_SCORE_HARDIRQ_H */ diff --git a/arch/score/include/asm/hw_irq.h b/arch/score/include/asm/hw_irq.h new file mode 100644 index 0000000..4caafb2 --- /dev/null +++ b/arch/score/include/asm/hw_irq.h @@ -0,0 +1,4 @@ +#ifndef _ASM_SCORE_HW_IRQ_H +#define _ASM_SCORE_HW_IRQ_H + +#endif /* _ASM_SCORE_HW_IRQ_H */ diff --git a/arch/score/include/asm/io.h b/arch/score/include/asm/io.h new file mode 100644 index 0000000..fbbfd71 --- /dev/null +++ b/arch/score/include/asm/io.h @@ -0,0 +1,9 @@ +#ifndef _ASM_SCORE_IO_H +#define _ASM_SCORE_IO_H + +#include + +#define virt_to_bus virt_to_phys +#define bus_to_virt phys_to_virt + +#endif /* _ASM_SCORE_IO_H */ diff --git a/arch/score/include/asm/ioctl.h b/arch/score/include/asm/ioctl.h new file mode 100644 index 0000000..a351d21 --- /dev/null +++ b/arch/score/include/asm/ioctl.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_IOCTL_H +#define _ASM_SCORE_IOCTL_H + +#include + +#endif /* _ASM_SCORE_IOCTL_H */ diff --git a/arch/score/include/asm/ioctls.h b/arch/score/include/asm/ioctls.h new file mode 100644 index 0000000..ed01d2b --- /dev/null +++ b/arch/score/include/asm/ioctls.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_IOCTLS_H +#define _ASM_SCORE_IOCTLS_H + +#include + +#endif /* _ASM_SCORE_IOCTLS_H */ diff --git a/arch/score/include/asm/ipcbuf.h b/arch/score/include/asm/ipcbuf.h new file mode 100644 index 0000000..e082cef --- /dev/null +++ b/arch/score/include/asm/ipcbuf.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_IPCBUF_H +#define _ASM_SCORE_IPCBUF_H + +#include + +#endif /* _ASM_SCORE_IPCBUF_H */ diff --git a/arch/score/include/asm/irq.h b/arch/score/include/asm/irq.h new file mode 100644 index 0000000..401f670 --- /dev/null +++ b/arch/score/include/asm/irq.h @@ -0,0 +1,33 @@ +#ifndef _ASM_SCORE_IRQ_H +#define _ASM_SCORE_IRQ_H + +#define EXCEPTION_VECTOR_BASE_ADDR 0xa0000000 +#define VECTOR_ADDRESS_OFFSET_MODE4 0 +#define VECTOR_ADDRESS_OFFSET_MODE16 1 + +#define DEBUG_VECTOR_SIZE (0x4) +#define DEBUG_VECTOR_BASE_ADDR ((EXCEPTION_VECTOR_BASE_ADDR) + 0x1fc) + +#define GENERAL_VECTOR_SIZE (0x10) +#define GENERAL_VECTOR_BASE_ADDR ((EXCEPTION_VECTOR_BASE_ADDR) + 0x200) + +#define NR_IRQS 64 +#define IRQ_VECTOR_SIZE (0x10) +#define IRQ_VECTOR_BASE_ADDR ((EXCEPTION_VECTOR_BASE_ADDR) + 0x210) +#define IRQ_VECTOR_END_ADDR ((EXCEPTION_VECTOR_BASE_ADDR) + 0x5f0) + +#define irq_canonicalize(irq) (irq) + +#define P_INT_PNDL 0x95F50000 +#define P_INT_PNDH 0x95F50004 +#define P_INT_PRIORITY_M 0x95F50008 +#define P_INT_PRIORITY_SG0 0x95F50010 +#define P_INT_PRIORITY_SG1 0x95F50014 +#define P_INT_PRIORITY_SG2 0x95F50018 +#define P_INT_PRIORITY_SG3 0x95F5001C +#define P_INT_MASKL 0x95F50020 +#define P_INT_MASKH 0x95F50024 + +#define IRQ_TIMER (7) /* Timer IRQ number of SPCT6600 */ + +#endif /* _ASM_SCORE_IRQ_H */ diff --git a/arch/score/include/asm/irq_regs.h b/arch/score/include/asm/irq_regs.h new file mode 100644 index 0000000..905b7b0 --- /dev/null +++ b/arch/score/include/asm/irq_regs.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_IRQ_REGS_H +#define _ASM_SCORE_IRQ_REGS_H + +#include + +#endif /* _ASM_SCORE_IRQ_REGS_H */ diff --git a/arch/score/include/asm/irqflags.h b/arch/score/include/asm/irqflags.h new file mode 100644 index 0000000..92eeb33 --- /dev/null +++ b/arch/score/include/asm/irqflags.h @@ -0,0 +1,111 @@ +#ifndef _ASM_SCORE_IRQFLAGS_H +#define _ASM_SCORE_IRQFLAGS_H + +#ifndef __ASSEMBLY__ + +#define raw_local_irq_save(x) \ +{ \ + __asm__ __volatile__( \ + "mfcr r8, cr0;" \ + "li r9, 0xfffffffe;" \ + "nop;" \ + "mv %0, r8;" \ + "and r8, r8, r9;" \ + "mtcr r8, cr0;" \ + "nop;" \ + "nop;" \ + "nop;" \ + "nop;" \ + "nop;" \ + "ldi r9, 0x1;" \ + "and %0, %0, r9;" \ + : "=r" (x) \ + : \ + : "r8", "r9" \ + ); \ +} + +#define raw_local_irq_restore(x) \ +{ \ + __asm__ __volatile__( \ + "mfcr r8, cr0;" \ + "ldi r9, 0x1;" \ + "and %0, %0, r9;" \ + "or r8, r8, %0;" \ + "mtcr r8, cr0;" \ + "nop;" \ + "nop;" \ + "nop;" \ + "nop;" \ + "nop;" \ + : \ + : "r"(x) \ + : "r8", "r9" \ + ); \ +} + +#define raw_local_irq_enable(void) \ +{ \ + __asm__ __volatile__( \ + "mfcr\tr8,cr0;" \ + "nop;" \ + "nop;" \ + "ori\tr8,0x1;" \ + "mtcr\tr8,cr0;" \ + "nop;" \ + "nop;" \ + "nop;" \ + "nop;" \ + "nop;" \ + : \ + : \ + : "r8"); \ +} + +#define raw_local_irq_disable(void) \ +{ \ + __asm__ __volatile__( \ + "mfcr\tr8,cr0;" \ + "nop;" \ + "nop;" \ + "srli\tr8,r8,1;" \ + "slli\tr8,r8,1;" \ + "mtcr\tr8,cr0;" \ + "nop;" \ + "nop;" \ + "nop;" \ + "nop;" \ + "nop;" \ + : \ + : \ + : "r8"); \ +} + +#define raw_local_save_flags(x) \ +{ \ + __asm__ __volatile__( \ + "mfcr r8, cr0;" \ + "nop;" \ + "nop;" \ + "mv %0, r8;" \ + "nop;" \ + "nop;" \ + "nop;" \ + "nop;" \ + "nop;" \ + "ldi r9, 0x1;" \ + "and %0, %0, r9;" \ + : "=r" (x) \ + : \ + : "r8", "r9" \ + ); \ +} + +static inline int raw_irqs_disabled_flags(unsigned long flags) +{ + return !(flags & 1); +} + +#endif + +#endif /* _ASM_SCORE_IRQFLAGS_H */ diff --git a/arch/score/include/asm/kdebug.h b/arch/score/include/asm/kdebug.h new file mode 100644 index 0000000..a666e51 --- /dev/null +++ b/arch/score/include/asm/kdebug.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_KDEBUG_H +#define _ASM_SCORE_KDEBUG_H + +#include + +#endif /* _ASM_SCORE_KDEBUG_H */ diff --git a/arch/score/include/asm/kmap_types.h b/arch/score/include/asm/kmap_types.h new file mode 100644 index 0000000..6c46eb5 --- /dev/null +++ b/arch/score/include/asm/kmap_types.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_KMAP_TYPES_H +#define _ASM_SCORE_KMAP_TYPES_H + +#include + +#endif /* _ASM_SCORE_KMAP_TYPES_H */ diff --git a/arch/score/include/asm/linkage.h b/arch/score/include/asm/linkage.h new file mode 100644 index 0000000..2580fbb --- /dev/null +++ b/arch/score/include/asm/linkage.h @@ -0,0 +1,4 @@ +#ifndef _ASM_SCORE_LINKAGE_H +#define _ASM_SCORE_LINKAGE_H + +#endif /* _ASM_SCORE_LINKAGE_H */ diff --git a/arch/score/include/asm/local.h b/arch/score/include/asm/local.h new file mode 100644 index 0000000..7e02f13 --- /dev/null +++ b/arch/score/include/asm/local.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_LOCAL_H +#define _ASM_SCORE_LOCAL_H + +#include + +#endif /* _ASM_SCORE_LOCAL_H */ diff --git a/arch/score/include/asm/mman.h b/arch/score/include/asm/mman.h new file mode 100644 index 0000000..84d85dd --- /dev/null +++ b/arch/score/include/asm/mman.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_MMAN_H +#define _ASM_SCORE_MMAN_H + +#include + +#endif /* _ASM_SCORE_MMAN_H */ diff --git a/arch/score/include/asm/mmu.h b/arch/score/include/asm/mmu.h new file mode 100644 index 0000000..676828e --- /dev/null +++ b/arch/score/include/asm/mmu.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_MMU_H +#define _ASM_SCORE_MMU_H + +typedef unsigned long mm_context_t; + +#endif /* _ASM_SCORE_MMU_H */ diff --git a/arch/score/include/asm/mmu_context.h b/arch/score/include/asm/mmu_context.h new file mode 100644 index 0000000..2644577 --- /dev/null +++ b/arch/score/include/asm/mmu_context.h @@ -0,0 +1,113 @@ +#ifndef _ASM_SCORE_MMU_CONTEXT_H +#define _ASM_SCORE_MMU_CONTEXT_H + +#include +#include +#include +#include + +#include +#include +#include + +/* + * For the fast tlb miss handlers, we keep a per cpu array of pointers + * to the current pgd for each processor. Also, the proc. id is stuffed + * into the context register. + */ +extern unsigned long asid_cache; +extern unsigned long pgd_current; + +#define TLBMISS_HANDLER_SETUP_PGD(pgd) (pgd_current = (unsigned long)(pgd)) + +#define TLBMISS_HANDLER_SETUP() \ +do { \ + write_c0_context(0); \ + TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir) \ +} while (0) + +/* + * All unused by hardware upper bits will be considered + * as a software asid extension. + */ +#define ASID_VERSION_MASK 0xfffff000 +#define ASID_FIRST_VERSION 0x1000 + +/* PEVN --------- VPN ---------- --ASID--- -NA- */ +/* binary: 0000 0000 0000 0000 0000 0000 0001 0000 */ +/* binary: 0000 0000 0000 0000 0000 1111 1111 0000 */ +#define ASID_INC 0x10 +#define ASID_MASK 0xff0 + +static inline void enter_lazy_tlb(struct mm_struct *mm, + struct task_struct *tsk) +{} + +static inline void +get_new_mmu_context(struct mm_struct *mm) +{ + unsigned long asid = asid_cache + ASID_INC; + + if (!(asid & ASID_MASK)) { + local_flush_tlb_all(); /* start new asid cycle */ + if (!asid) /* fix version if needed */ + asid = ASID_FIRST_VERSION; + } + + mm->context = asid; + asid_cache = asid; +} + +/* + * Initialize the context related info for a new mm_struct + * instance. + */ +static inline int +init_new_context(struct task_struct *tsk, struct mm_struct *mm) +{ + mm->context = 0; + return 0; +} + +static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, + struct task_struct *tsk) +{ + unsigned long flags; + + local_irq_save(flags); + if ((next->context ^ asid_cache) & ASID_VERSION_MASK) + get_new_mmu_context(next); + + pevn_set(next->context); + TLBMISS_HANDLER_SETUP_PGD(next->pgd); + local_irq_restore(flags); +} + +/* + * Destroy context related info for an mm_struct that is about + * to be put to rest. + */ +static inline void destroy_context(struct mm_struct *mm) +{} + +static inline void +deactivate_mm(struct task_struct *task, struct mm_struct *mm) +{} + +/* + * After we have set current->mm to a new value, this activates + * the context for the new mm so we see the new mappings. + */ +static inline void +activate_mm(struct mm_struct *prev, struct mm_struct *next) +{ + unsigned long flags; + + local_irq_save(flags); + get_new_mmu_context(next); + pevn_set(next->context); + TLBMISS_HANDLER_SETUP_PGD(next->pgd); + local_irq_restore(flags); +} + +#endif /* _ASM_SCORE_MMU_CONTEXT_H */ diff --git a/arch/score/include/asm/module.h b/arch/score/include/asm/module.h new file mode 100644 index 0000000..f0b5dc0 --- /dev/null +++ b/arch/score/include/asm/module.h @@ -0,0 +1,39 @@ +#ifndef _ASM_SCORE_MODULE_H +#define _ASM_SCORE_MODULE_H + +#include +#include + +struct mod_arch_specific { + /* Data Bus Error exception tables */ + struct list_head dbe_list; + const struct exception_table_entry *dbe_start; + const struct exception_table_entry *dbe_end; +}; + +typedef uint8_t Elf64_Byte; /* Type for a 8-bit quantity. */ + +#define Elf_Shdr Elf32_Shdr +#define Elf_Sym Elf32_Sym +#define Elf_Ehdr Elf32_Ehdr +#define Elf_Addr Elf32_Addr + +/* Given an address, look for it in the exception tables. */ +#ifdef CONFIG_MODULES +const struct exception_table_entry *search_module_dbetables(unsigned long addr); +#else +static inline const struct exception_table_entry +*search_module_dbetables(unsigned long addr) +{ + return NULL; +} +#endif + +#define MODULE_PROC_FAMILY "SCORE7" +#define MODULE_KERNEL_TYPE "32BIT " +#define MODULE_KERNEL_SMTC "" + +#define MODULE_ARCH_VERMAGIC \ + MODULE_PROC_FAMILY MODULE_KERNEL_TYPE MODULE_KERNEL_SMTC + +#endif /* _ASM_SCORE_MODULE_H */ diff --git a/arch/score/include/asm/msgbuf.h b/arch/score/include/asm/msgbuf.h new file mode 100644 index 0000000..7506721 --- /dev/null +++ b/arch/score/include/asm/msgbuf.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_MSGBUF_H +#define _ASM_SCORE_MSGBUF_H + +#include + +#endif /* _ASM_SCORE_MSGBUF_H */ diff --git a/arch/score/include/asm/mutex.h b/arch/score/include/asm/mutex.h new file mode 100644 index 0000000..10d48fe --- /dev/null +++ b/arch/score/include/asm/mutex.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_MUTEX_H +#define _ASM_SCORE_MUTEX_H + +#include + +#endif /* _ASM_SCORE_MUTEX_H */ diff --git a/arch/score/include/asm/page.h b/arch/score/include/asm/page.h new file mode 100644 index 0000000..67e9820 --- /dev/null +++ b/arch/score/include/asm/page.h @@ -0,0 +1,92 @@ +#ifndef _ASM_SCORE_PAGE_H +#define _ASM_SCORE_PAGE_H + +#include + +/* PAGE_SHIFT determines the page size */ +#define PAGE_SHIFT (12) +#define PAGE_SIZE (1UL << PAGE_SHIFT) +#define PAGE_MASK (~(PAGE_SIZE-1)) + +#ifdef __KERNEL__ + +#ifndef __ASSEMBLY__ + +#define PAGE_UP(addr) (((addr)+((PAGE_SIZE)-1))&(~((PAGE_SIZE)-1))) +#define PAGE_DOWN(addr) ((addr)&(~((PAGE_SIZE)-1))) + +/* align addr on a size boundary - adjust address up/down if needed */ +#define _ALIGN_UP(addr, size) (((addr)+((size)-1))&(~((size)-1))) +#define _ALIGN_DOWN(addr, size) ((addr)&(~((size)-1))) + +/* align addr on a size boundary - adjust address up if needed */ +#define _ALIGN(addr, size) _ALIGN_UP(addr, size) + +/* + * PAGE_OFFSET -- the first address of the first page of memory. When not + * using MMU this corresponds to the first free page in physical memory (aligned + * on a page boundary). + */ +#define PAGE_OFFSET (0xA0000000UL) + +#define clear_page(pgaddr) memset((pgaddr), 0, PAGE_SIZE) +#define copy_page(to, from) memcpy((to), (from), PAGE_SIZE) + +#define clear_user_page(pgaddr, vaddr, page) memset((pgaddr), 0, PAGE_SIZE) +#define copy_user_page(vto, vfrom, vaddr, topg) \ + memcpy((vto), (vfrom), PAGE_SIZE) + +/* + * These are used to make use of C type-checking.. + */ + +typedef struct { unsigned long pte; } pte_t; /* page table entry */ +typedef struct { unsigned long pgd; } pgd_t; /* PGD table entry */ +typedef struct { unsigned long pgprot; } pgprot_t; +typedef struct page *pgtable_t; + +#define pte_val(x) ((x).pte) +#define pgd_val(x) ((x).pgd) +#define pgprot_val(x) ((x).pgprot) + +#define __pte(x) ((pte_t) { (x) }) +#define __pgd(x) ((pgd_t) { (x) }) +#define __pgprot(x) ((pgprot_t) { (x) }) + +extern unsigned long max_low_pfn; +extern unsigned long min_low_pfn; +extern unsigned long max_pfn; + +#define __pa(vaddr) ((unsigned long) (vaddr)) +#define __va(paddr) ((void *) (paddr)) + +#define phys_to_pfn(phys) (PFN_DOWN(phys)) +#define pfn_to_phys(pfn) (PFN_PHYS(pfn)) + +#define virt_to_pfn(vaddr) (phys_to_pfn((__pa(vaddr)))) +#define pfn_to_virt(pfn) __va(pfn_to_phys((pfn))) + +#define virt_to_page(vaddr) (pfn_to_page(virt_to_pfn(vaddr))) +#define page_to_virt(page) (pfn_to_virt(page_to_pfn(page))) + +#define page_to_phys(page) (pfn_to_phys(page_to_pfn(page))) +#define page_to_bus(page) (page_to_phys(page)) +#define phys_to_page(paddr) (pfn_to_page(phys_to_pfn(paddr))) + +#define pfn_valid(pfn) ((pfn) >= min_low_pfn && (pfn) < max_mapnr) + +#define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT) + +#endif /* __ASSEMBLY__ */ + +#define virt_addr_valid(vaddr) (pfn_valid(virt_to_pfn(vaddr))) + +#endif /* __KERNEL__ */ + +#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ + VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) + +#include +#include + +#endif /* _ASM_SCORE_PAGE_H */ diff --git a/arch/score/include/asm/param.h b/arch/score/include/asm/param.h new file mode 100644 index 0000000..916b869 --- /dev/null +++ b/arch/score/include/asm/param.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_PARAM_H +#define _ASM_SCORE_PARAM_H + +#include + +#endif /* _ASM_SCORE_PARAM_H */ diff --git a/arch/score/include/asm/pci.h b/arch/score/include/asm/pci.h new file mode 100644 index 0000000..3f3cfd8 --- /dev/null +++ b/arch/score/include/asm/pci.h @@ -0,0 +1,4 @@ +#ifndef _ASM_SCORE_PCI_H +#define _ASM_SCORE_PCI_H + +#endif /* _ASM_SCORE_PCI_H */ diff --git a/arch/score/include/asm/percpu.h b/arch/score/include/asm/percpu.h new file mode 100644 index 0000000..e7bd4e0 --- /dev/null +++ b/arch/score/include/asm/percpu.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_PERCPU_H +#define _ASM_SCORE_PERCPU_H + +#include + +#endif /* _ASM_SCORE_PERCPU_H */ diff --git a/arch/score/include/asm/pgalloc.h b/arch/score/include/asm/pgalloc.h new file mode 100644 index 0000000..28dacc1 --- /dev/null +++ b/arch/score/include/asm/pgalloc.h @@ -0,0 +1,83 @@ +#ifndef _ASM_SCORE_PGALLOC_H +#define _ASM_SCORE_PGALLOC_H + +#include + +static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, + pte_t *pte) +{ + set_pmd(pmd, __pmd((unsigned long)pte)); +} + +static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, + pgtable_t pte) +{ + set_pmd(pmd, __pmd((unsigned long)page_address(pte))); +} + +#define pmd_pgtable(pmd) pmd_page(pmd) + +static inline pgd_t *pgd_alloc(struct mm_struct *mm) +{ + pgd_t *ret, *init; + + ret = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER); + if (ret) { + init = pgd_offset(&init_mm, 0UL); + pgd_init((unsigned long)ret); + memcpy(ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD, + (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); + } + + return ret; +} + +static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) +{ + free_pages((unsigned long)pgd, PGD_ORDER); +} + +static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, + unsigned long address) +{ + pte_t *pte; + + pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, + PTE_ORDER); + + return pte; +} + +static inline struct page *pte_alloc_one(struct mm_struct *mm, + unsigned long address) +{ + struct page *pte; + + pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER); + if (pte) { + clear_highpage(pte); + pgtable_page_ctor(pte); + } + return pte; +} + +static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) +{ + free_pages((unsigned long)pte, PTE_ORDER); +} + +static inline void pte_free(struct mm_struct *mm, pgtable_t pte) +{ + pgtable_page_dtor(pte); + __free_pages(pte, PTE_ORDER); +} + +#define __pte_free_tlb(tlb, pte) \ +do { \ + pgtable_page_dtor(pte); \ + tlb_remove_page((tlb), pte); \ +} while (0) + +#define check_pgt_cache() do {} while (0) + +#endif /* _ASM_SCORE_PGALLOC_H */ diff --git a/arch/score/include/asm/pgtable-bits.h b/arch/score/include/asm/pgtable-bits.h new file mode 100644 index 0000000..ca16d35 --- /dev/null +++ b/arch/score/include/asm/pgtable-bits.h @@ -0,0 +1,23 @@ +#ifndef _ASM_SCORE_PGTABLE_BITS_H +#define _ASM_SCORE_PGTABLE_BITS_H + +#define _PAGE_ACCESSED (1<<5) /* implemented in software */ +#define _PAGE_READ (1<<6) /* implemented in software */ +#define _PAGE_WRITE (1<<7) /* implemented in software */ +#define _PAGE_PRESENT (1<<9) /* implemented in software */ +#define _PAGE_MODIFIED (1<<10) /* implemented in software */ +#define _PAGE_FILE (1<<10) + +#define _PAGE_GLOBAL (1<<0) +#define _PAGE_VALID (1<<1) +#define _PAGE_SILENT_READ (1<<1) /* synonym */ +#define _PAGE_DIRTY (1<<2) /* Write bit */ +#define _PAGE_SILENT_WRITE (1<<2) +#define _PAGE_CACHE (1<<3) /* cache */ +#define _CACHE_MASK (1<<3) +#define _PAGE_BUFFERABLE (1<<4) /*Fallow Spec. */ + +#define _PAGE_CHG_MASK \ + (PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_CACHE) + +#endif /* _ASM_SCORE_PGTABLE_BITS_H */ diff --git a/arch/score/include/asm/pgtable.h b/arch/score/include/asm/pgtable.h new file mode 100644 index 0000000..0f7177a --- /dev/null +++ b/arch/score/include/asm/pgtable.h @@ -0,0 +1,267 @@ +#ifndef _ASM_SCORE_PGTABLE_H +#define _ASM_SCORE_PGTABLE_H + +#include +#include + +#include +#include +#include + +extern void load_pgd(unsigned long pg_dir); +extern pte_t invalid_pte_table[PAGE_SIZE/sizeof(pte_t)]; + +/* PGDIR_SHIFT determines what a third-level page table entry can map */ +#define PGDIR_SHIFT 22 +#define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT) +#define PGDIR_MASK (~(PGDIR_SIZE - 1)) + +/* + * Entries per page directory level: we use two-level, so + * we don't really have any PUD/PMD directory physically. + */ +#define PGD_ORDER 0 +#define PTE_ORDER 0 + +#define PTRS_PER_PGD 1024 +#define PTRS_PER_PTE 1024 + +#define USER_PTRS_PER_PGD (0x80000000UL/PGDIR_SIZE) +#define FIRST_USER_ADDRESS 0 + +#define VMALLOC_START (0xc0000000UL) + +#define PKMAP_BASE (0xfd000000UL) + +#define VMALLOC_END (FIXADDR_START - 2*PAGE_SIZE) + +#define pte_ERROR(e) \ + printk(KERN_ERR "%s:%d: bad pte %08lx.\n", \ + __FILE__, __LINE__, pte_val(e)) +#define pgd_ERROR(e) \ + printk(KERN_ERR "%s:%d: bad pgd %08lx.\n", \ + __FILE__, __LINE__, pgd_val(e)) + +/* + * Empty pgd/pmd entries point to the invalid_pte_table. + */ +static inline int pmd_none(pmd_t pmd) +{ + return pmd_val(pmd) == (unsigned long) invalid_pte_table; +} + +#define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK) + +static inline int pmd_present(pmd_t pmd) +{ + return pmd_val(pmd) != (unsigned long) invalid_pte_table; +} + +static inline void pmd_clear(pmd_t *pmdp) +{ + pmd_val(*pmdp) = ((unsigned long) invalid_pte_table); +} + +#define pte_page(x) pfn_to_page(pte_pfn(x)) +#define pte_pfn(x) ((unsigned long)((x).pte >> PAGE_SHIFT)) +#define pfn_pte(pfn, prot) \ + __pte(((unsigned long long)(pfn) << PAGE_SHIFT) | pgprot_val(prot)) + +#define __pgd_offset(address) pgd_index(address) +#define __pud_offset(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) +#define __pmd_offset(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) + +/* to find an entry in a kernel page-table-directory */ +#define pgd_offset_k(address) pgd_offset(&init_mm, address) +#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) + +/* to find an entry in a page-table-directory */ +#define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr)) + +/* Find an entry in the third-level page table.. */ +#define __pte_offset(address) \ + (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) +#define pte_offset(dir, address) \ + ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address)) +#define pte_offset_kernel(dir, address) \ + ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address)) + +#define pte_offset_map(dir, address) \ + ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address)) +#define pte_offset_map_nested(dir, address) \ + ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address)) +#define pte_unmap(pte) ((void)(pte)) +#define pte_unmap_nested(pte) ((void)(pte)) + +/* + * Bits 9(_PAGE_PRESENT) and 10(_PAGE_FILE)are taken, + * split up 30 bits of offset into this range: + */ +#define PTE_FILE_MAX_BITS 30 +#define pte_to_pgoff(_pte) \ + (((_pte).pte & 0x1ff) | (((_pte).pte >> 11) << 9)) +#define pgoff_to_pte(off) \ + ((pte_t) {((off) & 0x1ff) | (((off) >> 9) << 11) | _PAGE_FILE}) +#define __pte_to_swp_entry(pte) \ + ((swp_entry_t) { pte_val(pte)}) +#define __swp_entry_to_pte(x) ((pte_t) {(x).val}) + +#define __P000 __pgprot(0) +#define __P001 __pgprot(0) +#define __P010 __pgprot(0) +#define __P011 __pgprot(0) +#define __P100 __pgprot(0) +#define __P101 __pgprot(0) +#define __P110 __pgprot(0) +#define __P111 __pgprot(0) + +#define __S000 __pgprot(0) +#define __S001 __pgprot(0) +#define __S010 __pgprot(0) +#define __S011 __pgprot(0) +#define __S100 __pgprot(0) +#define __S101 __pgprot(0) +#define __S110 __pgprot(0) +#define __S111 __pgprot(0) + +#define pmd_page(pmd) virt_to_page(__va(pmd_val(pmd))) +#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot) +static inline pte_t pte_mkspecial(pte_t pte) { return pte; } + +#define set_pte(pteptr, pteval) (*(pteptr) = pteval) +#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval) +#define pte_clear(mm, addr, xp) \ + do { set_pte_at(mm, addr, xp, __pte(0)); } while (0) + +#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ + remap_pfn_range(vma, vaddr, pfn, size, prot) + +#define pgd_present(pgd) (1) /* pages are always present on non MMU */ +#define pgd_none(pgd) (0) +#define pgd_bad(pgd) (0) +#define pgd_clear(pgdp) + +#define kern_addr_valid(addr) (1) +#define pmd_offset(a, b) ((void *) 0) +#define pmd_page_vaddr(pmd) pmd_val(pmd) + +#define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL)) +#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) + +#define pud_offset(pgd, address) ((pud_t *) pgd) + +#define PAGE_NONE __pgprot(0) /* these mean nothing to non MMU */ +#define PAGE_SHARED __pgprot(0) /* these mean nothing to non MMU */ +#define PAGE_COPY __pgprot(0) /* these mean nothing to non MMU */ +#define PAGE_READONLY __pgprot(0) /* these mean nothing to non MMU */ +#define PAGE_KERNEL __pgprot(0) /* these mean nothing to non MMU */ + +#define pgprot_noncached(x) (x) + +#define __swp_type(x) (0) +#define __swp_offset(x) (0) +#define __swp_entry(typ, off) ((swp_entry_t) { ((typ) | ((off) << 7)) }) + +#define ZERO_PAGE(vaddr) ({ BUG(); NULL; }) + +#define swapper_pg_dir ((pgd_t *) NULL) + +#define pgtable_cache_init() do {} while (0) + +#define arch_enter_lazy_cpu_mode() do {} while (0) + +static inline int pte_write(pte_t pte) +{ + return pte_val(pte) & _PAGE_WRITE; +} + +static inline int pte_dirty(pte_t pte) +{ + return pte_val(pte) & _PAGE_MODIFIED; +} + +static inline int pte_young(pte_t pte) +{ + return pte_val(pte) & _PAGE_ACCESSED; +} + +static inline int pte_file(pte_t pte) +{ + return pte_val(pte) & _PAGE_FILE; +} + +#define pte_special(pte) (0) + +static inline pte_t pte_wrprotect(pte_t pte) +{ + pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE); + return pte; +} + +static inline pte_t pte_mkclean(pte_t pte) +{ + pte_val(pte) &= ~(_PAGE_MODIFIED|_PAGE_SILENT_WRITE); + return pte; +} + +static inline pte_t pte_mkold(pte_t pte) +{ + pte_val(pte) &= ~(_PAGE_ACCESSED|_PAGE_SILENT_READ); + return pte; +} + +static inline pte_t pte_mkwrite(pte_t pte) +{ + pte_val(pte) |= _PAGE_WRITE; + if (pte_val(pte) & _PAGE_MODIFIED) + pte_val(pte) |= _PAGE_SILENT_WRITE; + return pte; +} + +static inline pte_t pte_mkdirty(pte_t pte) +{ + pte_val(pte) |= _PAGE_MODIFIED; + if (pte_val(pte) & _PAGE_WRITE) + pte_val(pte) |= _PAGE_SILENT_WRITE; + return pte; +} + +static inline pte_t pte_mkyoung(pte_t pte) +{ + pte_val(pte) |= _PAGE_ACCESSED; + if (pte_val(pte) & _PAGE_READ) + pte_val(pte) |= _PAGE_SILENT_READ; + return pte; +} + +#define set_pmd(pmdptr, pmdval) \ + do { *(pmdptr) = (pmdval); } while (0) +#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) + +extern unsigned long pgd_current; +extern void paging_init(void); + +static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) +{ + return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)); +} + +extern void __update_tlb(struct vm_area_struct *vma, + unsigned long address, pte_t pte); +extern void __update_cache(struct vm_area_struct *vma, + unsigned long address, pte_t pte); + +static inline void update_mmu_cache(struct vm_area_struct *vma, + unsigned long address, pte_t pte) +{ + __update_tlb(vma, address, pte); + __update_cache(vma, address, pte); +} + +#ifndef __ASSEMBLY__ +#include + +void setup_memory(void); +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_SCORE_PGTABLE_H */ diff --git a/arch/score/include/asm/poll.h b/arch/score/include/asm/poll.h new file mode 100644 index 0000000..18532db --- /dev/null +++ b/arch/score/include/asm/poll.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_POLL_H +#define _ASM_SCORE_POLL_H + +#include + +#endif /* _ASM_SCORE_POLL_H */ diff --git a/arch/score/include/asm/posix_types.h b/arch/score/include/asm/posix_types.h new file mode 100644 index 0000000..b88acf8 --- /dev/null +++ b/arch/score/include/asm/posix_types.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_POSIX_TYPES_H +#define _ASM_SCORE_POSIX_TYPES_H + +#include + +#endif /* _ASM_SCORE_POSIX_TYPES_H */ diff --git a/arch/score/include/asm/processor.h b/arch/score/include/asm/processor.h new file mode 100644 index 0000000..7e22f21 --- /dev/null +++ b/arch/score/include/asm/processor.h @@ -0,0 +1,106 @@ +#ifndef _ASM_SCORE_PROCESSOR_H +#define _ASM_SCORE_PROCESSOR_H + +#include +#include + +#include + +struct task_struct; + +/* + * System setup and hardware flags.. + */ +extern void (*cpu_wait)(void); + +extern long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); +extern unsigned long thread_saved_pc(struct task_struct *tsk); +extern void start_thread(struct pt_regs *regs, + unsigned long pc, unsigned long sp); +extern unsigned long get_wchan(struct task_struct *p); + +/* + * Return current * instruction pointer ("program counter"). + */ +#define current_text_addr() ({ __label__ _l; _l: &&_l; }) + +#define cpu_relax() barrier() +#define release_thread(thread) do {} while (0) +#define prepare_to_copy(tsk) do {} while (0) + +/* + * User space process size: 2GB. This is hardcoded into a few places, + * so don't change it unless you know what you are doing. + */ +#define TASK_SIZE 0x7fff8000UL + +/* + * This decides where the kernel will search for a free chunk of vm + * space during mmap's. + */ +#define TASK_UNMAPPED_BASE ((TASK_SIZE / 3) & ~(PAGE_SIZE)) + +#ifdef __KERNEL__ +#define STACK_TOP TASK_SIZE +#define STACK_TOP_MAX TASK_SIZE +#endif + +/* + * If you change thread_struct remember to change the #defines below too! + */ +struct thread_struct { + unsigned long reg0, reg2, reg3; + unsigned long reg12, reg13, reg14, reg15, reg16; + unsigned long reg17, reg18, reg19, reg20, reg21; + + unsigned long cp0_psr; + unsigned long cp0_ema; /* Last user fault */ + unsigned long cp0_badvaddr; /* Last user fault */ + unsigned long cp0_baduaddr; /* Last kernel fault accessing USEG */ + unsigned long error_code; + unsigned long trap_no; + + unsigned long mflags; + unsigned long reg29; + + unsigned long single_step; + unsigned long ss_nextcnt; + + unsigned long insn1_type; + unsigned long addr1; + unsigned long insn1; + + unsigned long insn2_type; + unsigned long addr2; + unsigned long insn2; + + mm_segment_t current_ds; +}; + +#define INIT_THREAD { \ + .reg0 = 0, \ + .reg2 = 0, \ + .reg3 = 0, \ + .reg12 = 0, \ + .reg13 = 0, \ + .reg14 = 0, \ + .reg15 = 0, \ + .reg16 = 0, \ + .reg17 = 0, \ + .reg18 = 0, \ + .reg19 = 0, \ + .reg20 = 0, \ + .reg21 = 0, \ + .cp0_psr = 0, \ + .error_code = 0, \ + .trap_no = 0, \ +} + +#define kstk_tos(tsk) \ + ((unsigned long)task_stack_page(tsk) + THREAD_SIZE - 32) +#define task_pt_regs(tsk) ((struct pt_regs *)kstk_tos(tsk) - 1) + +#define KSTK_EIP(tsk) (task_pt_regs(tsk)->cp0_epc) +#define KSTK_ESP(tsk) (task_pt_regs(tsk)->regs[29]) + +#endif /* _ASM_SCORE_PROCESSOR_H */ diff --git a/arch/score/include/asm/ptrace.h b/arch/score/include/asm/ptrace.h new file mode 100644 index 0000000..1a4900a --- /dev/null +++ b/arch/score/include/asm/ptrace.h @@ -0,0 +1,87 @@ +#ifndef _ASM_SCORE_PTRACE_H +#define _ASM_SCORE_PTRACE_H + +#define PC 32 +#define CONDITION 33 +#define ECR 34 +#define EMA 35 +#define CEH 36 +#define CEL 37 +#define COUNTER 38 +#define LDCR 39 +#define STCR 40 +#define PSR 41 + +#define SINGLESTEP16_INSN 0x7006 +#define SINGLESTEP32_INSN 0x840C8000 +#define BREAKPOINT16_INSN 0x7002 /* work on SPG300 */ +#define BREAKPOINT32_INSN 0x84048000 /* work on SPG300 */ + +/* Define instruction mask */ +#define INSN32_MASK 0x80008000 + +#define J32 0x88008000 /* 1_00010_0000000000_1_000000000000000 */ +#define J32M 0xFC008000 /* 1_11111_0000000000_1_000000000000000 */ + +#define B32 0x90008000 /* 1_00100_0000000000_1_000000000000000 */ +#define B32M 0xFC008000 +#define BL32 0x90008001 /* 1_00100_0000000000_1_000000000000001 */ +#define BL32M B32 +#define BR32 0x80008008 /* 1_00000_0000000000_1_00000000_000100_0 */ +#define BR32M 0xFFE0807E +#define BRL32 0x80008009 /* 1_00000_0000000000_1_00000000_000100_1 */ +#define BRL32M BR32M + +#define B32_SET (J32 | B32 | BL32 | BR32 | BRL32) + +#define J16 0x3000 /* 0_011_....... */ +#define J16M 0xF000 +#define B16 0x4000 /* 0_100_....... */ +#define B16M 0xF000 +#define BR16 0x0004 /* 0_000.......0100 */ +#define BR16M 0xF00F +#define B16_SET (J16 | B16 | BR16) + + +/* + * This struct defines the way the registers are stored on the stack during a + * system call/exception. As usual the registers k0/k1 aren't being saved. + */ +struct pt_regs { + unsigned long pad0[6]; + unsigned long orig_r4; + unsigned long orig_r7; + unsigned long regs[32]; + + unsigned long cel; + unsigned long ceh; + + unsigned long sr0; /* cnt */ + unsigned long sr1; /* lcr */ + unsigned long sr2; /* scr */ + + unsigned long cp0_epc; + unsigned long cp0_ema; + unsigned long cp0_psr; + unsigned long cp0_ecr; + unsigned long cp0_condition; + + long is_syscall; +}; + +#ifdef __KERNEL__ + +/* + * Does the process account for user or for system time? + */ +#define user_mode(regs) ((regs->cp0_psr & 8) == 8) + +#define instruction_pointer(regs) (0) +#define profile_pc(regs) instruction_pointer(regs) + +extern asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit); +extern int read_tsk_long(struct task_struct *, unsigned long, unsigned long *); +extern void clear_single_step(struct task_struct *); +#endif + +#endif /* _ASM_SCORE_PTRACE_H */ diff --git a/arch/score/include/asm/resource.h b/arch/score/include/asm/resource.h new file mode 100644 index 0000000..9ce22bc --- /dev/null +++ b/arch/score/include/asm/resource.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_RESOURCE_H +#define _ASM_SCORE_RESOURCE_H + +#include + +#endif /* _ASM_SCORE_RESOURCE_H */ diff --git a/arch/score/include/asm/scatterlist.h b/arch/score/include/asm/scatterlist.h new file mode 100644 index 0000000..9f533b8 --- /dev/null +++ b/arch/score/include/asm/scatterlist.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_SCATTERLIST_H +#define _ASM_SCORE_SCATTERLIST_H + +#include + +#endif /* _ASM_SCORE_SCATTERLIST_H */ diff --git a/arch/score/include/asm/scoreregs.h b/arch/score/include/asm/scoreregs.h new file mode 100644 index 0000000..d0ad292 --- /dev/null +++ b/arch/score/include/asm/scoreregs.h @@ -0,0 +1,51 @@ +#ifndef _ASM_SCORE_SCOREREGS_H +#define _ASM_SCORE_SCOREREGS_H + +#include + +/* TIMER register */ +#define TIME0BASE 0x96080000 +#define P_TIMER0_CTRL (TIME0BASE + 0x00) +#define P_TIMER0_CPP_CTRL (TIME0BASE + 0x04) +#define P_TIMER0_PRELOAD (TIME0BASE + 0x08) +#define P_TIMER0_CPP_REG (TIME0BASE + 0x0C) +#define P_TIMER0_UPCNT (TIME0BASE + 0x10) + +/* Timer Controller Register */ +/* bit 0 Timer enable */ +#define TMR_DISABLE 0x0000 +#define TMR_ENABLE 0x0001 + +/* bit 1 Interrupt enable */ +#define TMR_IE_DISABLE 0x0000 +#define TMR_IE_ENABLE 0x0002 + +/* bit 2 Output enable */ +#define TMR_OE_DISABLE 0x0004 +#define TMR_OE_ENABLE 0x0000 + +/* bit4 Up/Down counting selection */ +#define TMR_UD_DOWN 0x0000 +#define TMR_UD_UP 0x0010 + +/* bit5 Up/Down counting control selection */ +#define TMR_UDS_UD 0x0000 +#define TMR_UDS_EXTUD 0x0020 + +/* bit6 Time output mode */ +#define TMR_OM_TOGGLE 0x0000 +#define TMR_OM_PILSE 0x0040 + +/* bit 8..9 External input active edge selection */ +#define TMR_ES_PE 0x0000 +#define TMR_ES_NE 0x0100 +#define TMR_ES_BOTH 0x0200 + +/* bit 10..11 Operating mode */ +#define TMR_M_FREE 0x0000 /* free running timer mode */ +#define TMR_M_PERIODIC 0x0400 /* periodic timer mode */ +#define TMR_M_FC 0x0800 /* free running counter mode */ +#define TMR_M_PC 0x0c00 /* periodic counter mode */ + +#define SYSTEM_CLOCK (27*1000000/4) /* 27 MHz */ +#endif /* _ASM_SCORE_SCOREREGS_H */ diff --git a/arch/score/include/asm/sections.h b/arch/score/include/asm/sections.h new file mode 100644 index 0000000..9441d23 --- /dev/null +++ b/arch/score/include/asm/sections.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_SECTIONS_H +#define _ASM_SCORE_SECTIONS_H + +#include + +#endif /* _ASM_SCORE_SECTIONS_H */ diff --git a/arch/score/include/asm/segment.h b/arch/score/include/asm/segment.h new file mode 100644 index 0000000..e16cf6a --- /dev/null +++ b/arch/score/include/asm/segment.h @@ -0,0 +1,21 @@ +#ifndef _ASM_SCORE_SEGMENT_H +#define _ASM_SCORE_SEGMENT_H + +#ifndef __ASSEMBLY__ + +typedef struct { + unsigned long seg; +} mm_segment_t; + +#define KERNEL_DS ((mm_segment_t){0}) +#define USER_DS KERNEL_DS + +# define get_ds() (KERNEL_DS) +# define get_fs() (current_thread_info()->addr_limit) +# define set_fs(x) \ + do { current_thread_info()->addr_limit = (x); } while (0) + +# define segment_eq(a, b) ((a).seg == (b).seg) + +# endif /* __ASSEMBLY__ */ +#endif /* _ASM_SCORE_SEGMENT_H */ diff --git a/arch/score/include/asm/sembuf.h b/arch/score/include/asm/sembuf.h new file mode 100644 index 0000000..dae5e83 --- /dev/null +++ b/arch/score/include/asm/sembuf.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_SEMBUF_H +#define _ASM_SCORE_SEMBUF_H + +#include + +#endif /* _ASM_SCORE_SEMBUF_H */ diff --git a/arch/score/include/asm/setup.h b/arch/score/include/asm/setup.h new file mode 100644 index 0000000..de89eff --- /dev/null +++ b/arch/score/include/asm/setup.h @@ -0,0 +1,40 @@ +#ifndef _ASM_SCORE_SETUP_H +#define _ASM_SCORE_SETUP_H + +#define COMMAND_LINE_SIZE 256 +#define MEM_SIZE 0x2000000 + +#ifdef __KERNEL__ + +extern void pagetable_init(void); +extern void pgd_init(unsigned long page); + +extern void setup_early_printk(void); +extern void cpu_cache_init(void); +extern void tlb_init(void); + +extern void handle_nmi(void); +extern void handle_adelinsn(void); +extern void handle_adedata(void); +extern void handle_ibe(void); +extern void handle_pel(void); +extern void handle_sys(void); +extern void handle_ccu(void); +extern void handle_ri(void); +extern void handle_tr(void); +extern void handle_ades(void); +extern void handle_cee(void); +extern void handle_cpe(void); +extern void handle_dve(void); +extern void handle_dbe(void); +extern void handle_reserved(void); +extern void handle_tlb_refill(void); +extern void handle_tlb_invaild(void); +extern void handle_mod(void); +extern void debug_exception_vector(void); +extern void general_exception_vector(void); +extern void interrupt_exception_vector(void); + +#endif /* __KERNEL__ */ + +#endif /* _ASM_SCORE_SETUP_H */ diff --git a/arch/score/include/asm/shmbuf.h b/arch/score/include/asm/shmbuf.h new file mode 100644 index 0000000..c85b242 --- /dev/null +++ b/arch/score/include/asm/shmbuf.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_SHMBUF_H +#define _ASM_SCORE_SHMBUF_H + +#include + +#endif /* _ASM_SCORE_SHMBUF_H */ diff --git a/arch/score/include/asm/shmparam.h b/arch/score/include/asm/shmparam.h new file mode 100644 index 0000000..1d60813 --- /dev/null +++ b/arch/score/include/asm/shmparam.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_SHMPARAM_H +#define _ASM_SCORE_SHMPARAM_H + +#include + +#endif /* _ASM_SCORE_SHMPARAM_H */ diff --git a/arch/score/include/asm/sigcontext.h b/arch/score/include/asm/sigcontext.h new file mode 100644 index 0000000..5ffda39 --- /dev/null +++ b/arch/score/include/asm/sigcontext.h @@ -0,0 +1,22 @@ +#ifndef _ASM_SCORE_SIGCONTEXT_H +#define _ASM_SCORE_SIGCONTEXT_H + +/* + * Keep this struct definition in sync with the sigcontext fragment + * in arch/score/tools/offset.c + */ +struct sigcontext { + unsigned int sc_regmask; + unsigned int sc_psr; + unsigned int sc_condition; + unsigned long sc_pc; + unsigned long sc_regs[32]; + unsigned int sc_ssflags; + unsigned int sc_mdceh; + unsigned int sc_mdcel; + unsigned int sc_ecr; + unsigned long sc_ema; + unsigned long sc_sigset[4]; +}; + +#endif /* _ASM_SCORE_SIGCONTEXT_H */ diff --git a/arch/score/include/asm/siginfo.h b/arch/score/include/asm/siginfo.h new file mode 100644 index 0000000..87ca356 --- /dev/null +++ b/arch/score/include/asm/siginfo.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_SIGINFO_H +#define _ASM_SCORE_SIGINFO_H + +#include + +#endif /* _ASM_SCORE_SIGINFO_H */ diff --git a/arch/score/include/asm/signal.h b/arch/score/include/asm/signal.h new file mode 100644 index 0000000..2605bc0 --- /dev/null +++ b/arch/score/include/asm/signal.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_SIGNAL_H +#define _ASM_SCORE_SIGNAL_H + +#include + +#endif /* _ASM_SCORE_SIGNAL_H */ diff --git a/arch/score/include/asm/socket.h b/arch/score/include/asm/socket.h new file mode 100644 index 0000000..612a70e --- /dev/null +++ b/arch/score/include/asm/socket.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_SOCKET_H +#define _ASM_SCORE_SOCKET_H + +#include + +#endif /* _ASM_SCORE_SOCKET_H */ diff --git a/arch/score/include/asm/sockios.h b/arch/score/include/asm/sockios.h new file mode 100644 index 0000000..ba825648 --- /dev/null +++ b/arch/score/include/asm/sockios.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_SOCKIOS_H +#define _ASM_SCORE_SOCKIOS_H + +#include + +#endif /* _ASM_SCORE_SOCKIOS_H */ diff --git a/arch/score/include/asm/stat.h b/arch/score/include/asm/stat.h new file mode 100644 index 0000000..5037055 --- /dev/null +++ b/arch/score/include/asm/stat.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_STAT_H +#define _ASM_SCORE_STAT_H + +#include + +#endif /* _ASM_SCORE_STAT_H */ diff --git a/arch/score/include/asm/statfs.h b/arch/score/include/asm/statfs.h new file mode 100644 index 0000000..36e4100 --- /dev/null +++ b/arch/score/include/asm/statfs.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_STATFS_H +#define _ASM_SCORE_STATFS_H + +#include + +#endif /* _ASM_SCORE_STATFS_H */ diff --git a/arch/score/include/asm/string.h b/arch/score/include/asm/string.h new file mode 100644 index 0000000..8a6bf50 --- /dev/null +++ b/arch/score/include/asm/string.h @@ -0,0 +1,8 @@ +#ifndef _ASM_SCORE_STRING_H +#define _ASM_SCORE_STRING_H + +extern void *memset(void *__s, int __c, size_t __count); +extern void *memcpy(void *__to, __const__ void *__from, size_t __n); +extern void *memmove(void *__dest, __const__ void *__src, size_t __n); + +#endif /* _ASM_SCORE_STRING_H */ diff --git a/arch/score/include/asm/swab.h b/arch/score/include/asm/swab.h new file mode 100644 index 0000000..fadc3cc --- /dev/null +++ b/arch/score/include/asm/swab.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_SWAB_H +#define _ASM_SCORE_SWAB_H + +#include + +#endif /* _ASM_SCORE_SWAB_H */ diff --git a/arch/score/include/asm/syscalls.h b/arch/score/include/asm/syscalls.h new file mode 100644 index 0000000..00c28e0 --- /dev/null +++ b/arch/score/include/asm/syscalls.h @@ -0,0 +1,9 @@ +#ifndef _ASM_SCORE_SYSCALLS_H +#define _ASM_SCORE_SYSCALLS_H + +asmlinkage long sys_clone(int flags, unsigned long stack, struct pt_regs *regs); +#define sys_clone sys_clone + +#include + +#endif /* _ASM_SCORE_SYSCALLS_H */ diff --git a/arch/score/include/asm/system.h b/arch/score/include/asm/system.h new file mode 100644 index 0000000..589d5c7 --- /dev/null +++ b/arch/score/include/asm/system.h @@ -0,0 +1,90 @@ +#ifndef _ASM_SCORE_SYSTEM_H +#define _ASM_SCORE_SYSTEM_H + +#include +#include + +struct pt_regs; +struct task_struct; + +extern void *resume(void *last, void *next, void *next_ti); + +#define switch_to(prev, next, last) \ +do { \ + (last) = resume(prev, next, task_thread_info(next)); \ +} while (0) + +#define finish_arch_switch(prev) do {} while (0) + +typedef void (*vi_handler_t)(void); +extern unsigned long arch_align_stack(unsigned long sp); + +#define mb() barrier() +#define rmb() barrier() +#define wmb() barrier() +#define smp_mb() barrier() +#define smp_rmb() barrier() +#define smp_wmb() barrier() + +#define read_barrier_depends() do {} while (0) +#define smp_read_barrier_depends() do {} while (0) + +#define set_mb(var, value) do {var = value; wmb(); } while (0) + +#define __HAVE_ARCH_CMPXCHG 1 + +#include + +#ifndef __ASSEMBLY__ + +struct __xchg_dummy { unsigned long a[100]; }; +#define __xg(x) ((struct __xchg_dummy *)(x)) + +static inline +unsigned long __xchg(volatile unsigned long *m, unsigned long val) +{ + unsigned long retval; + unsigned long flags; + + local_irq_save(flags); + retval = *m; + *m = val; + local_irq_restore(flags); + return retval; +} + +#define xchg(ptr, v) \ + ((__typeof__(*(ptr))) __xchg((unsigned long *)(ptr), \ + (unsigned long)(v))) + +static inline unsigned long __cmpxchg(volatile unsigned long *m, + unsigned long old, unsigned long new) +{ + unsigned long retval; + unsigned long flags; + + local_irq_save(flags); + retval = *m; + if (retval == old) + *m = new; + local_irq_restore(flags); + return retval; +} + +#define cmpxchg(ptr, o, n) \ + ((__typeof__(*(ptr))) __cmpxchg((unsigned long *)(ptr), \ + (unsigned long)(o), \ + (unsigned long)(n))) + +extern void __die(const char *, struct pt_regs *, const char *, + const char *, unsigned long) __attribute__((noreturn)); +extern void __die_if_kernel(const char *, struct pt_regs *, const char *, + const char *, unsigned long); + +#define die(msg, regs) \ + __die(msg, regs, __FILE__ ":", __func__, __LINE__) +#define die_if_kernel(msg, regs) \ + __die_if_kernel(msg, regs, __FILE__ ":", __func__, __LINE__) + +#endif /* !__ASSEMBLY__ */ +#endif /* _ASM_SCORE_SYSTEM_H */ diff --git a/arch/score/include/asm/termbits.h b/arch/score/include/asm/termbits.h new file mode 100644 index 0000000..9a95c14 --- /dev/null +++ b/arch/score/include/asm/termbits.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_TERMBITS_H +#define _ASM_SCORE_TERMBITS_H + +#include + +#endif /* _ASM_SCORE_TERMBITS_H */ diff --git a/arch/score/include/asm/termios.h b/arch/score/include/asm/termios.h new file mode 100644 index 0000000..40984e8 --- /dev/null +++ b/arch/score/include/asm/termios.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_TERMIOS_H +#define _ASM_SCORE_TERMIOS_H + +#include + +#endif /* _ASM_SCORE_TERMIOS_H */ diff --git a/arch/score/include/asm/thread_info.h b/arch/score/include/asm/thread_info.h new file mode 100644 index 0000000..0af8ca0 --- /dev/null +++ b/arch/score/include/asm/thread_info.h @@ -0,0 +1,103 @@ +#ifndef _ASM_SCORE_THREAD_INFO_H +#define _ASM_SCORE_THREAD_INFO_H + +#ifdef __KERNEL__ + +#define KU_MASK 0x08 +#define KU_USER 0x08 +#define KU_KERN 0x00 + +#ifndef __ASSEMBLY__ + +#include + +/* + * low level task data that entry.S needs immediate access to + * - this struct should fit entirely inside of one cache line + * - this struct shares the supervisor stack pages + * - if the contents of this structure are changed, the assembly constants + * must also be changed + */ +struct thread_info { + struct task_struct *task; /* main task structure */ + struct exec_domain *exec_domain; /* execution domain */ + unsigned long flags; /* low level flags */ + unsigned long tp_value; /* thread pointer */ + __u32 cpu; /* current CPU */ + + /* 0 => preemptable, < 0 => BUG */ + int preempt_count; + + /* + * thread address space: + * 0-0xBFFFFFFF for user-thead + * 0-0xFFFFFFFF for kernel-thread + */ + mm_segment_t addr_limit; + struct restart_block restart_block; + struct pt_regs *regs; +}; + +/* + * macros/functions for gaining access to the thread information structure + * + * preempt_count needs to be 1 initially, until the scheduler is functional. + */ +#define INIT_THREAD_INFO(tsk) \ +{ \ + .task = &tsk, \ + .exec_domain = &default_exec_domain, \ + .cpu = 0, \ + .preempt_count = 1, \ + .addr_limit = KERNEL_DS, \ + .restart_block = { \ + .fn = do_no_restart_syscall, \ + }, \ +} + +#define init_thread_info (init_thread_union.thread_info) +#define init_stack (init_thread_union.stack) + +/* How to get the thread information struct from C. */ +register struct thread_info *__current_thread_info __asm__("r28"); +#define current_thread_info() __current_thread_info + +/* thread information allocation */ +#define THREAD_SIZE_ORDER (1) +#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) +#define THREAD_MASK (THREAD_SIZE - 1UL) +#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR + +#define alloc_thread_info(tsk) kmalloc(THREAD_SIZE, GFP_KERNEL) +#define free_thread_info(info) kfree(info) + +#endif /* !__ASSEMBLY__ */ + +#define PREEMPT_ACTIVE 0x10000000 + +/* + * thread information flags + * - these are process state flags that various assembly files may need to + * access + * - pending work-to-be-done flags are in LSW + * - other flags in MSW + */ +#define TIF_SYSCALL_TRACE 0 /* syscall trace active */ +#define TIF_SIGPENDING 1 /* signal pending */ +#define TIF_NEED_RESCHED 2 /* rescheduling necessary */ +#define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */ +#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling + TIF_NEED_RESCHED */ +#define TIF_MEMDIE 18 + +#define _TIF_SYSCALL_TRACE (1< + +#endif /* _ASM_SCORE_TIMEX_H */ diff --git a/arch/score/include/asm/tlb.h b/arch/score/include/asm/tlb.h new file mode 100644 index 0000000..46882ed --- /dev/null +++ b/arch/score/include/asm/tlb.h @@ -0,0 +1,17 @@ +#ifndef _ASM_SCORE_TLB_H +#define _ASM_SCORE_TLB_H + +/* + * SCORE doesn't need any special per-pte or per-vma handling, except + * we need to flush cache for area to be unmapped. + */ +#define tlb_start_vma(tlb, vma) do {} while (0) +#define tlb_end_vma(tlb, vma) do {} while (0) +#define __tlb_remove_tlb_entry(tlb, ptep, address) do {} while (0) +#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) + +extern void score7_FTLB_refill_Handler(void); + +#include + +#endif /* _ASM_SCORE_TLB_H */ diff --git a/arch/score/include/asm/tlbflush.h b/arch/score/include/asm/tlbflush.h new file mode 100644 index 0000000..9cce978 --- /dev/null +++ b/arch/score/include/asm/tlbflush.h @@ -0,0 +1,142 @@ +#ifndef _ASM_SCORE_TLBFLUSH_H +#define _ASM_SCORE_TLBFLUSH_H + +#include + +/* + * TLB flushing: + * + * - flush_tlb_all() flushes all processes TLB entries + * - flush_tlb_mm(mm) flushes the specified mm context TLB entries + * - flush_tlb_page(vma, vmaddr) flushes one page + * - flush_tlb_range(vma, start, end) flushes a range of pages + * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages + */ +extern void local_flush_tlb_all(void); +extern void local_flush_tlb_mm(struct mm_struct *mm); +extern void local_flush_tlb_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end); +extern void local_flush_tlb_kernel_range(unsigned long start, + unsigned long end); +extern void local_flush_tlb_page(struct vm_area_struct *vma, + unsigned long page); +extern void local_flush_tlb_one(unsigned long vaddr); + +#define flush_tlb_all() local_flush_tlb_all() +#define flush_tlb_mm(mm) local_flush_tlb_mm(mm) +#define flush_tlb_range(vma, vmaddr, end) \ + local_flush_tlb_range(vma, vmaddr, end) +#define flush_tlb_kernel_range(vmaddr, end) \ + local_flush_tlb_kernel_range(vmaddr, end) +#define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page) +#define flush_tlb_one(vaddr) local_flush_tlb_one(vaddr) + +#ifndef __ASSEMBLY__ + +static inline unsigned long pevn_get(void) +{ + unsigned long val; + + __asm__ __volatile__( + "mfcr %0, cr11\n" + "nop\nnop\n" + : "=r" (val)); + + return val; +} + +static inline void pevn_set(unsigned long val) +{ + __asm__ __volatile__( + "mtcr %0, cr11\n" + "nop\nnop\nnop\nnop\nnop\n" + : : "r" (val)); +} + +static inline void pectx_set(unsigned long val) +{ + __asm__ __volatile__( + "mtcr %0, cr12\n" + "nop\nnop\nnop\nnop\nnop\n" + : : "r" (val)); +} + +static inline unsigned long pectx_get(void) +{ + unsigned long val; + __asm__ __volatile__( + "mfcr %0, cr12\n" + "nop\nnop\n" + : "=r" (val)); + return val; +} +static inline unsigned long tlblock_get(void) +{ + unsigned long val; + + __asm__ __volatile__( + "mfcr %0, cr7\n" + "nop\nnop\n" + : "=r" (val)); + return val; +} +static inline void tlblock_set(unsigned long val) +{ + __asm__ __volatile__( + "mtcr %0, cr7\n" + "nop\nnop\nnop\nnop\nnop\n" + : : "r" (val)); +} + +static inline void tlbpt_set(unsigned long val) +{ + __asm__ __volatile__( + "mtcr %0, cr8\n" + "nop\nnop\nnop\nnop\nnop\n" + : : "r" (val)); +} + +static inline long tlbpt_get(void) +{ + long val; + + __asm__ __volatile__( + "mfcr %0, cr8\n" + "nop\nnop\n" + : "=r" (val)); + + return val; +} + +static inline void peaddr_set(unsigned long val) +{ + __asm__ __volatile__( + "mtcr %0, cr9\n" + "nop\nnop\nnop\nnop\nnop\n" + : : "r" (val)); +} + +/* TLB operations. */ +static inline void tlb_probe(void) +{ + __asm__ __volatile__("stlb;nop;nop;nop;nop;nop"); +} + +static inline void tlb_read(void) +{ + __asm__ __volatile__("mftlb;nop;nop;nop;nop;nop"); +} + +static inline void tlb_write_indexed(void) +{ + __asm__ __volatile__("mtptlb;nop;nop;nop;nop;nop"); +} + +static inline void tlb_write_random(void) +{ + __asm__ __volatile__("mtrtlb;nop;nop;nop;nop;nop"); +} + +#endif /* Not __ASSEMBLY__ */ + +#endif /* _ASM_SCORE_TLBFLUSH_H */ diff --git a/arch/score/include/asm/topology.h b/arch/score/include/asm/topology.h new file mode 100644 index 0000000..425fba3 --- /dev/null +++ b/arch/score/include/asm/topology.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_TOPOLOGY_H +#define _ASM_SCORE_TOPOLOGY_H + +#include + +#endif /* _ASM_SCORE_TOPOLOGY_H */ diff --git a/arch/score/include/asm/types.h b/arch/score/include/asm/types.h new file mode 100644 index 0000000..2140032 --- /dev/null +++ b/arch/score/include/asm/types.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_TYPES_H +#define _ASM_SCORE_TYPES_H + +#include + +#endif /* _ASM_SCORE_TYPES_H */ diff --git a/arch/score/include/asm/uaccess.h b/arch/score/include/asm/uaccess.h new file mode 100644 index 0000000..43ce28a1d --- /dev/null +++ b/arch/score/include/asm/uaccess.h @@ -0,0 +1,27 @@ +#ifndef _ASM_SCORE_UACCESS_H +#define _ASM_SCORE_UACCESS_H +/* + * Copyright (C) 2006 Atmark Techno, Inc. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +struct pt_regs; +extern int fixup_exception(struct pt_regs *regs); + +#ifndef __ASSEMBLY__ + +#define __range_ok(addr, size) \ + ((((unsigned long)(addr) >= 0x80000000) \ + || ((unsigned long)(size) > 0x80000000) \ + || (((unsigned long)(addr) + (unsigned long)(size)) > 0x80000000))) + +#define __access_ok(addr, size) \ + (__range_ok((addr), (size)) == 0) + +#include + +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_SCORE_UACCESS_H */ diff --git a/arch/score/include/asm/unaligned.h b/arch/score/include/asm/unaligned.h new file mode 100644 index 0000000..2fc06de --- /dev/null +++ b/arch/score/include/asm/unaligned.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_UNALIGNED_H +#define _ASM_SCORE_UNALIGNED_H + +#include + +#endif /* _ASM_SCORE_UNALIGNED_H */ diff --git a/arch/score/include/asm/unistd.h b/arch/score/include/asm/unistd.h new file mode 100644 index 0000000..9aa3a15 --- /dev/null +++ b/arch/score/include/asm/unistd.h @@ -0,0 +1,8 @@ +#ifndef _ASM_SCORE_UNISTD_H +#define _ASM_SCORE_UNISTD_H + +#define __ARCH_HAVE_MMU + +#include + +#endif /* _ASM_SCORE_UNISTD_H */ diff --git a/arch/score/include/asm/user.h b/arch/score/include/asm/user.h new file mode 100644 index 0000000..3cf7572 --- /dev/null +++ b/arch/score/include/asm/user.h @@ -0,0 +1,4 @@ +#ifndef _ASM_SCORE_USER_H +#define _ASM_SCORE_USER_H + +#endif /* _ASM_SCORE_USER_H */ diff --git a/arch/score/kernel/Makefile b/arch/score/kernel/Makefile new file mode 100644 index 0000000..1e5de89 --- /dev/null +++ b/arch/score/kernel/Makefile @@ -0,0 +1,10 @@ +# +# Makefile for the Linux/SCORE kernel. +# + +extra-y := head.o vmlinux.lds + +obj-y += entry.o init_task.o irq.o process.o ptrace.o \ + setup.o signal.o sys_score.o time.o traps.o + +obj-$(CONFIG_MODULES) += module.o diff --git a/arch/score/kernel/asm-offsets.c b/arch/score/kernel/asm-offsets.c new file mode 100644 index 0000000..57788f4 --- /dev/null +++ b/arch/score/kernel/asm-offsets.c @@ -0,0 +1,216 @@ +/* + * arch/score/kernel/asm-offsets.c + * + * Score Processor version. + * + * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. + * Chen Liqin + * Lennox Wu + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include +#include + +#include + +void output_ptreg_defines(void) +{ + COMMENT("SCORE pt_regs offsets."); + OFFSET(PT_R0, pt_regs, regs[0]); + OFFSET(PT_R1, pt_regs, regs[1]); + OFFSET(PT_R2, pt_regs, regs[2]); + OFFSET(PT_R3, pt_regs, regs[3]); + OFFSET(PT_R4, pt_regs, regs[4]); + OFFSET(PT_R5, pt_regs, regs[5]); + OFFSET(PT_R6, pt_regs, regs[6]); + OFFSET(PT_R7, pt_regs, regs[7]); + OFFSET(PT_R8, pt_regs, regs[8]); + OFFSET(PT_R9, pt_regs, regs[9]); + OFFSET(PT_R10, pt_regs, regs[10]); + OFFSET(PT_R11, pt_regs, regs[11]); + OFFSET(PT_R12, pt_regs, regs[12]); + OFFSET(PT_R13, pt_regs, regs[13]); + OFFSET(PT_R14, pt_regs, regs[14]); + OFFSET(PT_R15, pt_regs, regs[15]); + OFFSET(PT_R16, pt_regs, regs[16]); + OFFSET(PT_R17, pt_regs, regs[17]); + OFFSET(PT_R18, pt_regs, regs[18]); + OFFSET(PT_R19, pt_regs, regs[19]); + OFFSET(PT_R20, pt_regs, regs[20]); + OFFSET(PT_R21, pt_regs, regs[21]); + OFFSET(PT_R22, pt_regs, regs[22]); + OFFSET(PT_R23, pt_regs, regs[23]); + OFFSET(PT_R24, pt_regs, regs[24]); + OFFSET(PT_R25, pt_regs, regs[25]); + OFFSET(PT_R26, pt_regs, regs[26]); + OFFSET(PT_R27, pt_regs, regs[27]); + OFFSET(PT_R28, pt_regs, regs[28]); + OFFSET(PT_R29, pt_regs, regs[29]); + OFFSET(PT_R30, pt_regs, regs[30]); + OFFSET(PT_R31, pt_regs, regs[31]); + + OFFSET(PT_ORIG_R4, pt_regs, orig_r4); + OFFSET(PT_ORIG_R7, pt_regs, orig_r7); + OFFSET(PT_CEL, pt_regs, cel); + OFFSET(PT_CEH, pt_regs, ceh); + OFFSET(PT_SR0, pt_regs, sr0); + OFFSET(PT_SR1, pt_regs, sr1); + OFFSET(PT_SR2, pt_regs, sr2); + OFFSET(PT_EPC, pt_regs, cp0_epc); + OFFSET(PT_EMA, pt_regs, cp0_ema); + OFFSET(PT_PSR, pt_regs, cp0_psr); + OFFSET(PT_ECR, pt_regs, cp0_ecr); + OFFSET(PT_CONDITION, pt_regs, cp0_condition); + OFFSET(PT_IS_SYSCALL, pt_regs, is_syscall); + + DEFINE(PT_SIZE, sizeof(struct pt_regs)); + BLANK(); +} + +void output_task_defines(void) +{ + COMMENT("SCORE task_struct offsets."); + OFFSET(TASK_STATE, task_struct, state); + OFFSET(TASK_THREAD_INFO, task_struct, stack); + OFFSET(TASK_FLAGS, task_struct, flags); + OFFSET(TASK_MM, task_struct, mm); + OFFSET(TASK_PID, task_struct, pid); + DEFINE(TASK_STRUCT_SIZE, sizeof(struct task_struct)); + BLANK(); +} + +void output_thread_info_defines(void) +{ + COMMENT("SCORE thread_info offsets."); + OFFSET(TI_TASK, thread_info, task); + OFFSET(TI_EXEC_DOMAIN, thread_info, exec_domain); + OFFSET(TI_FLAGS, thread_info, flags); + OFFSET(TI_TP_VALUE, thread_info, tp_value); + OFFSET(TI_CPU, thread_info, cpu); + OFFSET(TI_PRE_COUNT, thread_info, preempt_count); + OFFSET(TI_ADDR_LIMIT, thread_info, addr_limit); + OFFSET(TI_RESTART_BLOCK, thread_info, restart_block); + OFFSET(TI_REGS, thread_info, regs); + DEFINE(KERNEL_STACK_SIZE, THREAD_SIZE); + DEFINE(KERNEL_STACK_MASK, THREAD_MASK); + BLANK(); +} + +void output_thread_defines(void) +{ + COMMENT("SCORE specific thread_struct offsets."); + OFFSET(THREAD_REG0, task_struct, thread.reg0); + OFFSET(THREAD_REG2, task_struct, thread.reg2); + OFFSET(THREAD_REG3, task_struct, thread.reg3); + OFFSET(THREAD_REG12, task_struct, thread.reg12); + OFFSET(THREAD_REG13, task_struct, thread.reg13); + OFFSET(THREAD_REG14, task_struct, thread.reg14); + OFFSET(THREAD_REG15, task_struct, thread.reg15); + OFFSET(THREAD_REG16, task_struct, thread.reg16); + OFFSET(THREAD_REG17, task_struct, thread.reg17); + OFFSET(THREAD_REG18, task_struct, thread.reg18); + OFFSET(THREAD_REG19, task_struct, thread.reg19); + OFFSET(THREAD_REG20, task_struct, thread.reg20); + OFFSET(THREAD_REG21, task_struct, thread.reg21); + OFFSET(THREAD_REG29, task_struct, thread.reg29); + + OFFSET(THREAD_PSR, task_struct, thread.cp0_psr); + OFFSET(THREAD_EMA, task_struct, thread.cp0_ema); + OFFSET(THREAD_BADUADDR, task_struct, thread.cp0_baduaddr); + OFFSET(THREAD_ECODE, task_struct, thread.error_code); + OFFSET(THREAD_TRAPNO, task_struct, thread.trap_no); + BLANK(); +} + +void output_mm_defines(void) +{ + COMMENT("Size of struct page"); + DEFINE(STRUCT_PAGE_SIZE, sizeof(struct page)); + BLANK(); + COMMENT("Linux mm_struct offsets."); + OFFSET(MM_USERS, mm_struct, mm_users); + OFFSET(MM_PGD, mm_struct, pgd); + OFFSET(MM_CONTEXT, mm_struct, context); + BLANK(); + DEFINE(_PAGE_SIZE, PAGE_SIZE); + DEFINE(_PAGE_SHIFT, PAGE_SHIFT); + BLANK(); + DEFINE(_PGD_T_SIZE, sizeof(pgd_t)); + DEFINE(_PTE_T_SIZE, sizeof(pte_t)); + BLANK(); + DEFINE(_PGD_ORDER, PGD_ORDER); + DEFINE(_PTE_ORDER, PTE_ORDER); + BLANK(); + DEFINE(_PGDIR_SHIFT, PGDIR_SHIFT); + BLANK(); + DEFINE(_PTRS_PER_PGD, PTRS_PER_PGD); + DEFINE(_PTRS_PER_PTE, PTRS_PER_PTE); + BLANK(); +} + +void output_sc_defines(void) +{ + COMMENT("Linux sigcontext offsets."); + OFFSET(SC_REGS, sigcontext, sc_regs); + OFFSET(SC_MDCEH, sigcontext, sc_mdceh); + OFFSET(SC_MDCEL, sigcontext, sc_mdcel); + OFFSET(SC_PC, sigcontext, sc_pc); + OFFSET(SC_PSR, sigcontext, sc_psr); + OFFSET(SC_ECR, sigcontext, sc_ecr); + OFFSET(SC_EMA, sigcontext, sc_ema); + BLANK(); +} + +void output_signal_defined(void) +{ + COMMENT("Linux signal numbers."); + DEFINE(_SIGHUP, SIGHUP); + DEFINE(_SIGINT, SIGINT); + DEFINE(_SIGQUIT, SIGQUIT); + DEFINE(_SIGILL, SIGILL); + DEFINE(_SIGTRAP, SIGTRAP); + DEFINE(_SIGIOT, SIGIOT); + DEFINE(_SIGABRT, SIGABRT); + DEFINE(_SIGFPE, SIGFPE); + DEFINE(_SIGKILL, SIGKILL); + DEFINE(_SIGBUS, SIGBUS); + DEFINE(_SIGSEGV, SIGSEGV); + DEFINE(_SIGSYS, SIGSYS); + DEFINE(_SIGPIPE, SIGPIPE); + DEFINE(_SIGALRM, SIGALRM); + DEFINE(_SIGTERM, SIGTERM); + DEFINE(_SIGUSR1, SIGUSR1); + DEFINE(_SIGUSR2, SIGUSR2); + DEFINE(_SIGCHLD, SIGCHLD); + DEFINE(_SIGPWR, SIGPWR); + DEFINE(_SIGWINCH, SIGWINCH); + DEFINE(_SIGURG, SIGURG); + DEFINE(_SIGIO, SIGIO); + DEFINE(_SIGSTOP, SIGSTOP); + DEFINE(_SIGTSTP, SIGTSTP); + DEFINE(_SIGCONT, SIGCONT); + DEFINE(_SIGTTIN, SIGTTIN); + DEFINE(_SIGTTOU, SIGTTOU); + DEFINE(_SIGVTALRM, SIGVTALRM); + DEFINE(_SIGPROF, SIGPROF); + DEFINE(_SIGXCPU, SIGXCPU); + DEFINE(_SIGXFSZ, SIGXFSZ); + BLANK(); +} diff --git a/arch/score/kernel/entry.S b/arch/score/kernel/entry.S new file mode 100644 index 0000000..6c6b7ea --- /dev/null +++ b/arch/score/kernel/entry.S @@ -0,0 +1,542 @@ +/* + * arch/score/kernel/entry.S + * + * Score Processor version. + * + * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. + * Chen Liqin + * Lennox Wu + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include + +#include +#include +#include + +/* + * disable interrupts. + */ +.macro disable_irq + mfcr r8, cr0 + srli r8, r8, 1 + slli r8, r8, 1 + mtcr r8, cr0 + nop + nop + nop + nop + nop +.endm + +/* + * enable interrupts. + */ +.macro enable_irq + mfcr r8, cr0 + ori r8, 1 + mtcr r8, cr0 + nop + nop + nop + nop + nop +.endm + +__INIT +ENTRY(debug_exception_vector) + nop! + nop! + nop! + nop! + nop! + nop! + nop! + nop! + +ENTRY(general_exception_vector) # should move to addr 0x200 + j general_exception + nop! + nop! + nop! + nop! + nop! + nop! + +ENTRY(interrupt_exception_vector) # should move to addr 0x210 + j interrupt_exception + nop! + nop! + nop! + nop! + nop! + nop! + + .section ".text", "ax" + .align 2; +general_exception: + mfcr r31, cr2 + nop + la r30, exception_handlers + andi r31, 0x1f # get ecr.exc_code + slli r31, r31, 2 + add r30, r30, r31 + lw r30, [r30] + br r30 + +interrupt_exception: + SAVE_ALL + mfcr r4, cr2 + nop + lw r16, [r28, TI_REGS] + sw r0, [r28, TI_REGS] + la r3, ret_from_irq + srli r4, r4, 18 # get ecr.ip[7:2], interrupt No. + mv r5, r0 + j do_IRQ + +ENTRY(handle_nmi) # NMI #1 + SAVE_ALL + mv r4, r0 + la r8, nmi_exception_handler + brl r8 + j restore_all + +ENTRY(handle_adelinsn) # AdEL-instruction #2 + SAVE_ALL + mfcr r8, cr6 + nop + nop + sw r8, [r0, PT_EMA] + mv r4, r0 + la r8, do_adelinsn + brl r8 + mv r4, r0 + j ret_from_exception + nop + +ENTRY(handle_ibe) # BusEL-instruction #5 + SAVE_ALL + mv r4, r0 + la r8, do_be + brl r8 + mv r4, r0 + j ret_from_exception + nop + +ENTRY(handle_pel) # P-EL #6 + SAVE_ALL + mv r4, r0 + la r8, do_pel + brl r8 + mv r4, r0 + j ret_from_exception + nop + +ENTRY(handle_ccu) # CCU #8 + SAVE_ALL + mv r4, r0 + la r8, do_ccu + brl r8 + mv r4, r0 + j ret_from_exception + nop + +ENTRY(handle_ri) # RI #9 + SAVE_ALL + mv r4, r0 + la r8, do_ri + brl r8 + mv r4, r0 + j ret_from_exception + nop + +ENTRY(handle_tr) # Trap #10 + SAVE_ALL + mv r4, r0 + la r8, do_tr + brl r8 + mv r4, r0 + j ret_from_exception + nop + +ENTRY(handle_adedata) # AdES-instruction #12 + SAVE_ALL + mfcr r8, cr6 + nop + nop + sw r8, [r0, PT_EMA] + mv r4, r0 + la r8, do_adedata + brl r8 + mv r4, r0 + j ret_from_exception + nop + +ENTRY(handle_cee) # CeE #16 + SAVE_ALL + mv r4, r0 + la r8, do_cee + brl r8 + mv r4, r0 + j ret_from_exception + nop + +ENTRY(handle_cpe) # CpE #17 + SAVE_ALL + mv r4, r0 + la r8, do_cpe + brl r8 + mv r4, r0 + j ret_from_exception + nop + +ENTRY(handle_dbe) # BusEL-data #18 + SAVE_ALL + mv r4, r0 + la r8, do_be + brl r8 + mv r4, r0 + j ret_from_exception + nop + +ENTRY(handle_reserved) # others + SAVE_ALL + mv r4, r0 + la r8, do_reserved + brl r8 + mv r4, r0 + j ret_from_exception + nop + +#ifndef CONFIG_PREEMPT +#define resume_kernel restore_all +#else +#define __ret_from_irq ret_from_exception +#endif + + .align 2 +#ifndef CONFIG_PREEMPT +ENTRY(ret_from_exception) + disable_irq # preempt stop + nop + j __ret_from_irq + nop +#endif + +ENTRY(ret_from_irq) + sw r16, [r28, TI_REGS] + +ENTRY(__ret_from_irq) + lw r8, [r0, PT_PSR] # returning to kernel mode? + andri.c r8, r8, KU_USER + beq resume_kernel + +resume_userspace: + disable_irq + lw r6, [r28, TI_FLAGS] # current->work + li r8, _TIF_WORK_MASK + and.c r8, r8, r6 # ignoring syscall_trace + bne work_pending + nop + j restore_all + nop + +#ifdef CONFIG_PREEMPT +resume_kernel: + disable_irq + lw r8, [r28, TI_PRE_COUNT] + cmpz.c r8 + bne r8, restore_all +need_resched: + lw r8, [r28, TI_FLAGS] + andri.c r9, r8, _TIF_NEED_RESCHED + beq restore_all + lw r8, [r28, PT_PSR] # Interrupts off? + andri.c r8, r8, 1 + beq restore_all + bl preempt_schedule_irq + nop + j need_resched + nop +#endif + +ENTRY(ret_from_fork) + bl schedule_tail # r4=struct task_struct *prev + +ENTRY(syscall_exit) + nop + disable_irq + lw r6, [r28, TI_FLAGS] # current->work + li r8, _TIF_WORK_MASK + and.c r8, r6, r8 + bne syscall_exit_work + +ENTRY(restore_all) # restore full frame + RESTORE_ALL_AND_RET + +work_pending: + andri.c r8, r6, _TIF_NEED_RESCHED # r6 is preloaded with TI_FLAGS + beq work_notifysig +work_resched: + bl schedule + nop + disable_irq + lw r6, [r28, TI_FLAGS] + li r8, _TIF_WORK_MASK + and.c r8, r6, r8 # is there any work to be done + # other than syscall tracing? + beq restore_all + andri.c r8, r6, _TIF_NEED_RESCHED + bne work_resched + +work_notifysig: + mv r4, r0 + li r5, 0 + bl do_notify_resume # r6 already loaded + nop + j resume_userspace + nop + +ENTRY(syscall_exit_work) + li r8, _TIF_SYSCALL_TRACE + and.c r8, r8, r6 # r6 is preloaded with TI_FLAGS + beq work_pending # trace bit set? + nop + enable_irq + mv r4, r0 + li r5, 1 + bl do_syscall_trace + nop + b resume_userspace + nop + +.macro save_context reg + sw r12, [\reg, THREAD_REG12]; + sw r13, [\reg, THREAD_REG13]; + sw r14, [\reg, THREAD_REG14]; + sw r15, [\reg, THREAD_REG15]; + sw r16, [\reg, THREAD_REG16]; + sw r17, [\reg, THREAD_REG17]; + sw r18, [\reg, THREAD_REG18]; + sw r19, [\reg, THREAD_REG19]; + sw r20, [\reg, THREAD_REG20]; + sw r21, [\reg, THREAD_REG21]; + sw r29, [\reg, THREAD_REG29]; + sw r2, [\reg, THREAD_REG2]; + sw r0, [\reg, THREAD_REG0] +.endm + +.macro restore_context reg + lw r12, [\reg, THREAD_REG12]; + lw r13, [\reg, THREAD_REG13]; + lw r14, [\reg, THREAD_REG14]; + lw r15, [\reg, THREAD_REG15]; + lw r16, [\reg, THREAD_REG16]; + lw r17, [\reg, THREAD_REG17]; + lw r18, [\reg, THREAD_REG18]; + lw r19, [\reg, THREAD_REG19]; + lw r20, [\reg, THREAD_REG20]; + lw r21, [\reg, THREAD_REG21]; + lw r29, [\reg, THREAD_REG29]; + lw r0, [\reg, THREAD_REG0]; + lw r2, [\reg, THREAD_REG2]; + lw r3, [\reg, THREAD_REG3] +.endm + +/* + * task_struct *resume(task_struct *prev, task_struct *next, + * struct thread_info *next_ti) + */ +ENTRY(resume) + mfcr r9, cr0 + nop + nop + sw r9, [r4, THREAD_PSR] + save_context r4 + sw r3, [r4, THREAD_REG3] + + mv r28, r6 + restore_context r5 + mv r8, r6 + addi r8, KERNEL_STACK_SIZE + subi r8, 32 + la r9, kernelsp; + sw r8, [r9]; + + mfcr r9, cr0 + ldis r7, 0x00ff + nop + and r9, r9, r7 + lw r6, [r5, THREAD_PSR] + not r7, r7 + and r6, r6, r7 + or r6, r6, r9 + mtcr r6, cr0 + nop; nop; nop; nop; nop + br r3 + +ENTRY(handle_sys) + SAVE_ALL + enable_irq + + sw r4, [r0, PT_ORIG_R4] #for restart syscall + sw r7, [r0, PT_ORIG_R7] #for restart syscall + sw r27, [r0, PT_IS_SYSCALL] # it from syscall + + lw r9, [r0, PT_EPC] # skip syscall on return + addi r9, 4 + sw r9, [r0, PT_EPC] + + cmpi.c r27, __NR_syscalls # check syscall number + bgtu illegal_syscall + + slli r8, r27, 3 # get syscall routine + la r11, sys_call_table + add r11, r11, r8 + lw r10, [r11] # get syscall entry + lw r11, [r11, 4] # get number of args + + cmpz.c r10 + beq illegal_syscall + + cmpi.c r11, 4 # more than 4 arguments? + bgtu stackargs + +stack_done: + lw r8, [r28, TI_FLAGS] + li r9, _TIF_SYSCALL_TRACE + and.c r8, r8, r9 + bne syscall_trace_entry + + brl r10 # Do The Real system call + + cmpi.c r4, 0 + blt 1f + ldi r8, 0 + sw r8, [r0, PT_R7] + b 2f +1: + cmpi.c r4, -EMAXERRNO-1 # -EMAXERRNO - 1=-1134 + ble 2f + ldi r8, 0x1; + sw r8, [r0, PT_R7] + neg r4, r4 +2: + sw r4, [r0, PT_R4] # save result + +syscall_return: + disable_irq + lw r6, [r28, TI_FLAGS] # current->work + li r8, _TIF_WORK_MASK + and.c r8, r6, r8 + bne syscall_return_work + j restore_all + +syscall_return_work: + j syscall_exit_work + +syscall_trace_entry: + mv r16, r10 + mv r4, r0 + li r5, 0 + bl do_syscall_trace + + mv r8, r16 + lw r4, [r0, PT_R4] # Restore argument registers + lw r5, [r0, PT_R5] + lw r6, [r0, PT_R6] + lw r7, [r0, PT_R7] + brl r8 + + li r8, -EMAXERRNO - 1 # error? + sw r8, [r0, PT_R7] # set error flag + + neg r4, r4 # error + sw r4, [r0, PT_R0] # set flag for syscall + # restarting +1: sw r4, [r0, PT_R2] # result + j syscall_exit + +stackargs: + lw r8, [r0, PT_R0] + andri.c r9, r8, 3 # test whether user sp is align a word + bne bad_stack + subi r11, 5 + slli r9, r11, 2 + add.c r9, r9, r8 + + bmi bad_stack + la r9, 3f # calculate branch address + slli r11, r11, 3 + sub r9, r9, r11 + br r9 + +2: lw r9, [r8, 20] # argument 6 from usp + sw r9, [r0, 20] + +3: lw r9, [r8, 16] # argument 5 from usp + sw r9, [r0, 16] + j stack_done + + .section __ex_table,"a" + .word 2b, bad_stack + .word 3b, bad_stack + .previous + + /* + * The stackpointer for a call with more than 4 arguments is bad. + * We probably should handle this case a bit more drastic. + */ +bad_stack: + neg r27, r27 # error + sw r27, [r0, PT_ORIG_R4] + sw r27, [r0, PT_R4] + ldi r8, 1 # set error flag + sw r8, [r0, PT_R7] + j syscall_return + +illegal_syscall: + ldi r4, -ENOSYS # error + sw r4, [r0, PT_ORIG_R4] + sw r4, [r0, PT_R4] + ldi r9, 1 # set error flag + sw r9, [r0, PT_R7] + j syscall_return + +ENTRY(sys_execve) + mv r4, r0 + la r8, score_execve + br r8 + +ENTRY(sys_clone) + mv r4, r0 + la r8, score_clone + br r8 + +ENTRY(sys_rt_sigreturn) + mv r4, r0 + la r8, score_rt_sigreturn + br r8 + +ENTRY(sys_sigaltstack) + mv r4, r0 + la r8, score_sigaltstack + br r8 diff --git a/arch/score/kernel/head.S b/arch/score/kernel/head.S new file mode 100644 index 0000000..22a7e3c --- /dev/null +++ b/arch/score/kernel/head.S @@ -0,0 +1,70 @@ +/* + * arch/score/kernel/head.S + * + * Score Processor version. + * + * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. + * Chen Liqin + * Lennox Wu + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ +#include +#include + +#include + + .extern start_kernel + .global init_thread_union + .global kernelsp + +__INIT +ENTRY(_stext) + la r30, __bss_start /* initialize BSS segment. */ + la r31, _end + xor r8, r8, r8 + +1: cmp.c r31, r30 + beq 2f + + sw r8, [r30] /* clean memory. */ + addi r30, 4 + b 1b + +2: la r28, init_thread_union /* set kernel stack. */ + mv r0, r28 + addi r0, KERNEL_STACK_SIZE - 32 + la r30, kernelsp + sw r0, [r30] + subi r0, 4*4 + xor r30, r30, r30 + ori r30, 0x02 /* enable MMU. */ + mtcr r30, cr4 + nop + nop + nop + nop + nop + nop + nop + + /* there is no parameter */ + xor r4, r4, r4 + xor r5, r5, r5 + xor r6, r6, r6 + xor r7, r7, r7 + la r30, start_kernel /* jump to init_arch */ + br r30 diff --git a/arch/score/kernel/init_task.c b/arch/score/kernel/init_task.c new file mode 100644 index 0000000..9eecde0 --- /dev/null +++ b/arch/score/kernel/init_task.c @@ -0,0 +1,49 @@ +/* + * arch/score/kernel/init_task.c + * + * Score Processor version. + * + * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include + +static struct signal_struct init_signals = INIT_SIGNALS(init_signals); +static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); +struct mm_struct init_mm = INIT_MM(init_mm); +EXPORT_SYMBOL(init_mm); + +/* + * Initial thread structure. + * + * We need to make sure that this is THREAD_SIZE aligned due to the + * way process stacks are handled. This is done by having a special + * "init_task" linker map entry.. + */ +union thread_union init_thread_union + __attribute__((__section__(".data.init_task"))) = + { INIT_THREAD_INFO(init_task) }; + +/* + * Initial task structure. + * + * All other task structs will be allocated on slabs in fork.c + */ +struct task_struct init_task = INIT_TASK(init_task); +EXPORT_SYMBOL(init_task); diff --git a/arch/score/kernel/irq.c b/arch/score/kernel/irq.c new file mode 100644 index 0000000..55474e8 --- /dev/null +++ b/arch/score/kernel/irq.c @@ -0,0 +1,135 @@ +/* + * arch/score/kernel/irq.c + * + * Score Processor version. + * + * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. + * Chen Liqin + * Lennox Wu + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include + +#include + +/* + * handles all normal device IRQs + */ +asmlinkage void do_IRQ(int irq) +{ + irq_enter(); + generic_handle_irq(irq); + irq_exit(); +} + +static void score_mask(unsigned int irq_nr) +{ + unsigned int irq_source = 63 - irq_nr; + + if (irq_source < 32) + __raw_writel((__raw_readl((void *)P_INT_MASKL) | \ + (1 << irq_source)), (void *)P_INT_MASKL); + else + __raw_writel((__raw_readl((void *)P_INT_MASKH) | \ + (1 << (irq_source - 32))), (void *)P_INT_MASKH); +} + +static void score_unmask(unsigned int irq_nr) +{ + unsigned int irq_source = 63 - irq_nr; + + if (irq_source < 32) + __raw_writel((__raw_readl((void *)P_INT_MASKL) & \ + ~(1 << irq_source)), (void *)P_INT_MASKL); + else + __raw_writel((__raw_readl((void *)P_INT_MASKH) & \ + ~(1 << (irq_source - 32))), (void *)P_INT_MASKH); +} + +struct irq_chip score_irq_chip = { + .name = "Score7-level", + .mask = score_mask, + .mask_ack = score_mask, + .unmask = score_unmask, +}; + +/* + * initialise the interrupt system + */ +void __init init_IRQ(void) +{ + int index; + unsigned long target_addr; + + for (index = 0; index < NR_IRQS; ++index) + set_irq_chip_and_handler(index, &score_irq_chip, + handle_level_irq); + + for (target_addr = IRQ_VECTOR_BASE_ADDR; + target_addr <= IRQ_VECTOR_END_ADDR; + target_addr += IRQ_VECTOR_SIZE) + memcpy((void *)target_addr, \ + interrupt_exception_vector, IRQ_VECTOR_SIZE); + + __raw_writel(0xffffffff, (void *)P_INT_MASKL); + __raw_writel(0xffffffff, (void *)P_INT_MASKH); + + __asm__ __volatile__( + "mtcr %0, cr3\n\t" + : : "r" (EXCEPTION_VECTOR_BASE_ADDR | \ + VECTOR_ADDRESS_OFFSET_MODE16)); +} + +/* + * Generic, controller-independent functions: + */ +int show_interrupts(struct seq_file *p, void *v) +{ + int i = *(loff_t *)v, cpu; + struct irqaction *action; + unsigned long flags; + + if (i == 0) { + seq_puts(p, " "); + for_each_online_cpu(cpu) + seq_printf(p, "CPU%d ", cpu); + seq_putc(p, '\n'); + } + + if (i < NR_IRQS) { + spin_lock_irqsave(&irq_desc[i].lock, flags); + action = irq_desc[i].action; + if (!action) + goto unlock; + + seq_printf(p, "%3d: ", i); + seq_printf(p, "%10u ", kstat_irqs(i)); + seq_printf(p, " %8s", irq_desc[i].chip->name ? : "-"); + seq_printf(p, " %s", action->name); + for (action = action->next; action; action = action->next) + seq_printf(p, ", %s", action->name); + + seq_putc(p, '\n'); +unlock: + spin_unlock_irqrestore(&irq_desc[i].lock, flags); + } + + return 0; +} diff --git a/arch/score/kernel/module.c b/arch/score/kernel/module.c new file mode 100644 index 0000000..4ffce7f --- /dev/null +++ b/arch/score/kernel/module.c @@ -0,0 +1,164 @@ +/* + * arch/score/kernel/module.c + * + * Score Processor version. + * + * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. + * Chen Liqin + * Lennox Wu + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include + +void *module_alloc(unsigned long size) +{ + return size ? vmalloc(size) : NULL; +} + +/* Free memory returned from module_alloc */ +void module_free(struct module *mod, void *module_region) +{ + vfree(module_region); +} + +int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, + char *secstrings, struct module *mod) +{ + return 0; +} + +int apply_relocate(Elf_Shdr *sechdrs, const char *strtab, + unsigned int symindex, unsigned int relindex, + struct module *me) +{ + Elf32_Shdr *symsec = sechdrs + symindex; + Elf32_Shdr *relsec = sechdrs + relindex; + Elf32_Shdr *dstsec = sechdrs + relsec->sh_info; + Elf32_Rel *rel = (void *)relsec->sh_addr; + unsigned int i; + + for (i = 0; i < relsec->sh_size / sizeof(Elf32_Rel); i++, rel++) { + unsigned long loc; + Elf32_Sym *sym; + s32 offset; + + offset = ELF32_R_SYM(rel->r_info); + if ((offset < 0) || + (offset > (symsec->sh_size / sizeof(Elf32_Sym)))) { + printk(KERN_ERR "%s: bad relocation, section %d reloc %d\n", + me->name, relindex, i); + return -ENOEXEC; + } + + sym = ((Elf32_Sym *)symsec->sh_addr) + offset; + + if ((rel->r_offset < 0) || + (rel->r_offset > dstsec->sh_size - sizeof(u32))) { + printk(KERN_ERR "%s: out of bounds relocation, " + "section %d reloc %d offset %d size %d\n", + me->name, relindex, i, rel->r_offset, + dstsec->sh_size); + return -ENOEXEC; + } + + loc = dstsec->sh_addr + rel->r_offset; + switch (ELF32_R_TYPE(rel->r_info)) { + case R_SCORE_NONE: + break; + case R_SCORE_ABS32: + *(unsigned long *)loc += sym->st_value; + break; + case R_SCORE_HI16: + break; + case R_SCORE_LO16: { + unsigned long hi16_offset, offset; + unsigned long uvalue; + unsigned long temp, temp_hi; + temp_hi = *((unsigned long *)loc - 1); + temp = *(unsigned long *)loc; + + hi16_offset = (((((temp_hi) >> 16) & 0x3) << 15) | + ((temp_hi) & 0x7fff)) >> 1; + offset = ((temp >> 16 & 0x03) << 15) | + ((temp & 0x7fff) >> 1); + offset = (hi16_offset << 16) | (offset & 0xffff); + uvalue = sym->st_value + offset; + hi16_offset = (uvalue >> 16) << 1; + + temp_hi = ((temp_hi) & (~(0x37fff))) | + (hi16_offset & 0x7fff) | + ((hi16_offset << 1) & 0x30000); + *((unsigned long *)loc - 1) = temp_hi; + + offset = (uvalue & 0xffff) << 1; + temp = (temp & (~(0x37fff))) | (offset & 0x7fff) | + ((offset << 1) & 0x30000); + *(unsigned long *)loc = temp; + break; + } + case R_SCORE_24: { + unsigned long hi16_offset, offset; + unsigned long uvalue; + unsigned long temp; + + temp = *(unsigned long *)loc; + offset = (temp & 0x03FF7FFE); + hi16_offset = (offset & 0xFFFF0000); + offset = (hi16_offset | ((offset & 0xFFFF) << 1)) >> 2; + + uvalue = (sym->st_value + offset) >> 1; + uvalue = uvalue & 0x00ffffff; + + temp = (temp & 0xfc008001) | + ((uvalue << 2) & 0x3ff0000) | + ((uvalue & 0x3fff) << 1); + *(unsigned long *)loc = temp; + break; + } + default: + printk(KERN_ERR "%s: unknown relocation: %u\n", + me->name, ELF32_R_TYPE(rel->r_info)); + return -ENOEXEC; + } + } + + return 0; +} + +int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, + unsigned int symindex, unsigned int relsec, + struct module *me) +{ + return 0; +} + +/* Given an address, look for it in the module exception tables. */ +const struct exception_table_entry *search_module_dbetables(unsigned long addr) +{ + return 0; +} + +/* Put in dbe list if necessary. */ +int module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, + struct module *me) +{ + return 0; +} + +void module_arch_cleanup(struct module *mod) {} diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c new file mode 100644 index 0000000..aaa3085 --- /dev/null +++ b/arch/score/kernel/process.c @@ -0,0 +1,165 @@ +/* + * arch/score/kernel/process.c + * + * Score Processor version. + * + * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. + * Chen Liqin + * Lennox Wu + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include + +void (*pm_power_off)(void); +EXPORT_SYMBOL(pm_power_off); + +/* If or when software machine-restart is implemented, add code here. */ +void machine_restart(char *command) {} + +/* If or when software machine-halt is implemented, add code here. */ +void machine_halt(void) {} + +/* If or when software machine-power-off is implemented, add code here. */ +void machine_power_off(void) {} + +/* + * The idle thread. There's no useful work to be + * done, so just try to conserve power and have a + * low exit latency (ie sit in a loop waiting for + * somebody to say that they'd like to reschedule) + */ +void __noreturn cpu_idle(void) +{ + /* endless idle loop with no priority at all */ + while (1) { + while (!need_resched()) + barrier(); + + preempt_enable_no_resched(); + schedule(); + preempt_disable(); + } +} + +asmlinkage void ret_from_fork(void); + +void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp) +{ + unsigned long status; + + /* New thread loses kernel privileges. */ + status = regs->cp0_psr & ~(KU_MASK); + status |= KU_USER; + regs->cp0_psr = status; + regs->cp0_epc = pc; + regs->regs[0] = sp; +} + +void exit_thread(void) {} + +/* + * When a process does an "exec", machine state like FPU and debug + * registers need to be reset. This is a hook function for that. + * Currently we don't have any such state to reset, so this is empty. + */ +void flush_thread(void) {} + +/* + * set up the kernel stack and exception frames for a new process + */ +int copy_thread(unsigned long clone_flags, unsigned long usp, + unsigned long unused, + struct task_struct *p, struct pt_regs *regs) +{ + struct thread_info *ti = task_thread_info(p); + struct pt_regs *childregs = task_pt_regs(p); + + p->set_child_tid = NULL; + p->clear_child_tid = NULL; + + *childregs = *regs; + childregs->regs[7] = 0; /* Clear error flag */ + childregs->regs[4] = 0; /* Child gets zero as return value */ + regs->regs[4] = p->pid; + + if (childregs->cp0_psr & 0x8) { /* test kernel fork or user fork */ + childregs->regs[0] = usp; /* user fork */ + } else { + childregs->regs[28] = (unsigned long) ti; /* kernel fork */ + childregs->regs[0] = (unsigned long) childregs; + } + + p->thread.reg0 = (unsigned long) childregs; + p->thread.reg3 = (unsigned long) ret_from_fork; + p->thread.cp0_psr = 0; + + return 0; +} + +/* Fill in the fpu structure for a core dump. */ +int dump_fpu(struct pt_regs *regs, elf_fpregset_t *r) +{ + return 1; +} + +static void __noreturn +kernel_thread_helper(void *unused0, int (*fn)(void *), + void *arg, void *unused1) +{ + do_exit(fn(arg)); +} + +/* + * Create a kernel thread. + */ +long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) +{ + struct pt_regs regs; + + memset(®s, 0, sizeof(regs)); + + regs.regs[6] = (unsigned long) arg; + regs.regs[5] = (unsigned long) fn; + regs.cp0_epc = (unsigned long) kernel_thread_helper; + regs.cp0_psr = (regs.cp0_psr & ~(0x1|0x4|0x8)) | \ + ((regs.cp0_psr & 0x3) << 2); + + return do_fork(flags | CLONE_VM | CLONE_UNTRACED, \ + 0, ®s, 0, NULL, NULL); +} + +unsigned long thread_saved_pc(struct task_struct *tsk) +{ + return task_pt_regs(tsk)->cp0_epc; +} + +unsigned long get_wchan(struct task_struct *task) +{ + if (!task || task == current || task->state == TASK_RUNNING) + return 0; + + if (!task_stack_page(task)) + return 0; + + return task_pt_regs(task)->cp0_epc; +} + +unsigned long arch_align_stack(unsigned long sp) +{ + return sp; +} diff --git a/arch/score/kernel/ptrace.c b/arch/score/kernel/ptrace.c new file mode 100644 index 0000000..8fe7209 --- /dev/null +++ b/arch/score/kernel/ptrace.c @@ -0,0 +1,465 @@ +/* + * arch/score/kernel/ptrace.c + * + * Score Processor version. + * + * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. + * Chen Liqin + * Lennox Wu + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include + +#include + +static int is_16bitinsn(unsigned long insn) +{ + if ((insn & INSN32_MASK) == INSN32_MASK) + return 0; + else + return 1; +} + +int +read_tsk_long(struct task_struct *child, + unsigned long addr, unsigned long *res) +{ + int copied; + + copied = access_process_vm(child, addr, res, sizeof(*res), 0); + + return copied != sizeof(*res) ? -EIO : 0; +} + +int +read_tsk_short(struct task_struct *child, + unsigned long addr, unsigned short *res) +{ + int copied; + + copied = access_process_vm(child, addr, res, sizeof(*res), 0); + + return copied != sizeof(*res) ? -EIO : 0; +} + +static int +write_tsk_short(struct task_struct *child, + unsigned long addr, unsigned short val) +{ + int copied; + + copied = access_process_vm(child, addr, &val, sizeof(val), 1); + + return copied != sizeof(val) ? -EIO : 0; +} + +static int +write_tsk_long(struct task_struct *child, + unsigned long addr, unsigned long val) +{ + int copied; + + copied = access_process_vm(child, addr, &val, sizeof(val), 1); + + return copied != sizeof(val) ? -EIO : 0; +} + +void set_single_step(struct task_struct *child) +{ + /* far_epc is the target of branch */ + unsigned int epc, far_epc = 0; + unsigned long epc_insn, far_epc_insn; + int ninsn_type; /* next insn type 0=16b, 1=32b */ + unsigned int tmp, tmp2; + struct pt_regs *regs = task_pt_regs(child); + child->thread.single_step = 1; + child->thread.ss_nextcnt = 1; + epc = regs->cp0_epc; + + read_tsk_long(child, epc, &epc_insn); + + if (is_16bitinsn(epc_insn)) { + if ((epc_insn & J16M) == J16) { + tmp = epc_insn & 0xFFE; + epc = (epc & 0xFFFFF000) | tmp; + } else if ((epc_insn & B16M) == B16) { + child->thread.ss_nextcnt = 2; + tmp = (epc_insn & 0xFF) << 1; + tmp = tmp << 23; + tmp = (unsigned int)((int) tmp >> 23); + far_epc = epc + tmp; + epc += 2; + } else if ((epc_insn & BR16M) == BR16) { + child->thread.ss_nextcnt = 2; + tmp = (epc_insn >> 4) & 0xF; + far_epc = regs->regs[tmp]; + epc += 2; + } else + epc += 2; + } else { + if ((epc_insn & J32M) == J32) { + tmp = epc_insn & 0x03FFFFFE; + tmp2 = tmp & 0x7FFF; + tmp = (((tmp >> 16) & 0x3FF) << 15) | tmp2; + epc = (epc & 0xFFC00000) | tmp; + } else if ((epc_insn & B32M) == B32) { + child->thread.ss_nextcnt = 2; + tmp = epc_insn & 0x03FFFFFE; /* discard LK bit */ + tmp2 = tmp & 0x3FF; + tmp = (((tmp >> 16) & 0x3FF) << 10) | tmp2; /* 20bit */ + tmp = tmp << 12; + tmp = (unsigned int)((int) tmp >> 12); + far_epc = epc + tmp; + epc += 4; + } else if ((epc_insn & BR32M) == BR32) { + child->thread.ss_nextcnt = 2; + tmp = (epc_insn >> 16) & 0x1F; + far_epc = regs->regs[tmp]; + epc += 4; + } else + epc += 4; + } + + if (child->thread.ss_nextcnt == 1) { + read_tsk_long(child, epc, &epc_insn); + + if (is_16bitinsn(epc_insn)) { + write_tsk_short(child, epc, SINGLESTEP16_INSN); + ninsn_type = 0; + } else { + write_tsk_long(child, epc, SINGLESTEP32_INSN); + ninsn_type = 1; + } + + if (ninsn_type == 0) { /* 16bits */ + child->thread.insn1_type = 0; + child->thread.addr1 = epc; + /* the insn may have 32bit data */ + child->thread.insn1 = (short)epc_insn; + } else { + child->thread.insn1_type = 1; + child->thread.addr1 = epc; + child->thread.insn1 = epc_insn; + } + } else { + /* branch! have two target child->thread.ss_nextcnt=2 */ + read_tsk_long(child, epc, &epc_insn); + read_tsk_long(child, far_epc, &far_epc_insn); + if (is_16bitinsn(epc_insn)) { + write_tsk_short(child, epc, SINGLESTEP16_INSN); + ninsn_type = 0; + } else { + write_tsk_long(child, epc, SINGLESTEP32_INSN); + ninsn_type = 1; + } + + if (ninsn_type == 0) { /* 16bits */ + child->thread.insn1_type = 0; + child->thread.addr1 = epc; + /* the insn may have 32bit data */ + child->thread.insn1 = (short)epc_insn; + } else { + child->thread.insn1_type = 1; + child->thread.addr1 = epc; + child->thread.insn1 = epc_insn; + } + + if (is_16bitinsn(far_epc_insn)) { + write_tsk_short(child, far_epc, SINGLESTEP16_INSN); + ninsn_type = 0; + } else { + write_tsk_long(child, far_epc, SINGLESTEP32_INSN); + ninsn_type = 1; + } + + if (ninsn_type == 0) { /* 16bits */ + child->thread.insn2_type = 0; + child->thread.addr2 = far_epc; + /* the insn may have 32bit data */ + child->thread.insn2 = (short)far_epc_insn; + } else { + child->thread.insn2_type = 1; + child->thread.addr2 = far_epc; + child->thread.insn2 = far_epc_insn; + } + } +} + +void clear_single_step(struct task_struct *child) +{ + if (child->thread.insn1_type == 0) + write_tsk_short(child, child->thread.addr1, + child->thread.insn1); + + if (child->thread.insn1_type == 1) + write_tsk_long(child, child->thread.addr1, + child->thread.insn1); + + if (child->thread.ss_nextcnt == 2) { /* branch */ + if (child->thread.insn1_type == 0) + write_tsk_short(child, child->thread.addr1, + child->thread.insn1); + if (child->thread.insn1_type == 1) + write_tsk_long(child, child->thread.addr1, + child->thread.insn1); + if (child->thread.insn2_type == 0) + write_tsk_short(child, child->thread.addr2, + child->thread.insn2); + if (child->thread.insn2_type == 1) + write_tsk_long(child, child->thread.addr2, + child->thread.insn2); + } + + child->thread.single_step = 0; + child->thread.ss_nextcnt = 0; +} + + +void ptrace_disable(struct task_struct *child) {} + +long +arch_ptrace(struct task_struct *child, long request, long addr, long data) +{ + int ret; + + if (request == PTRACE_TRACEME) { + /* are we already being traced? */ + if (current->ptrace & PT_PTRACED) + return -EPERM; + + /* set the ptrace bit in the process flags. */ + current->ptrace |= PT_PTRACED; + return 0; + } + + ret = -ESRCH; + if (!child) + return ret; + + ret = -EPERM; + + if (request == PTRACE_ATTACH) { + ret = ptrace_attach(child); + return ret; + } + + ret = ptrace_check_attach(child, request == PTRACE_KILL); + if (ret < 0) + return ret; + + switch (request) { + case PTRACE_PEEKTEXT: /* read word at location addr. */ + case PTRACE_PEEKDATA: { + unsigned long tmp; + int copied; + + copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0); + ret = -EIO; + if (copied != sizeof(tmp)) + break; + + ret = put_user(tmp, (unsigned long *) data); + return ret; + } + + /* Read the word at location addr in the USER area. */ + case PTRACE_PEEKUSR: { + struct pt_regs *regs; + unsigned long tmp; + + regs = task_pt_regs(child); + + tmp = 0; /* Default return value. */ + switch (addr) { + case 0 ... 31: + tmp = regs->regs[addr]; + break; + case PC: + tmp = regs->cp0_epc; + break; + case ECR: + tmp = regs->cp0_ecr; + break; + case EMA: + tmp = regs->cp0_ema; + break; + case CEH: + tmp = regs->ceh; + break; + case CEL: + tmp = regs->cel; + break; + case CONDITION: + tmp = regs->cp0_condition; + break; + case PSR: + tmp = regs->cp0_psr; + break; + case COUNTER: + tmp = regs->sr0; + break; + case LDCR: + tmp = regs->sr1; + break; + case STCR: + tmp = regs->sr2; + break; + default: + tmp = 0; + return -EIO; + } + + ret = put_user(tmp, (unsigned long *) data); + return ret; + } + + case PTRACE_POKETEXT: /* write the word at location addr. */ + case PTRACE_POKEDATA: + ret = 0; + if (access_process_vm(child, addr, &data, sizeof(data), 1) + == sizeof(data)) + break; + ret = -EIO; + return ret; + + case PTRACE_POKEUSR: { + struct pt_regs *regs; + ret = 0; + regs = task_pt_regs(child); + + switch (addr) { + case 0 ... 31: + regs->regs[addr] = data; + break; + case PC: + regs->cp0_epc = data; + break; + case CEH: + regs->ceh = data; + break; + case CEL: + regs->cel = data; + break; + case CONDITION: + regs->cp0_condition = data; + break; + case PSR: + case COUNTER: + case STCR: + case LDCR: + break; /* user can't write the reg */ + default: + /* The rest are not allowed. */ + ret = -EIO; + break; + } + break; + } + + case PTRACE_SYSCALL: /* continue and stop at next + (return from) syscall. */ + case PTRACE_CONT: { /* restart after signal. */ + ret = -EIO; + if (!valid_signal(data)) + break; + if (request == PTRACE_SYSCALL) + set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); + else + clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); + + child->exit_code = data; + wake_up_process(child); + ret = 0; + break; + } + + /* + * make the child exit. Best I can do is send it a sigkill. + * perhaps it should be put in the status that it wants to + * exit. + */ + case PTRACE_KILL: + ret = 0; + if (child->state == EXIT_ZOMBIE) /* already dead. */ + break; + child->exit_code = SIGKILL; + clear_single_step(child); + wake_up_process(child); + break; + + case PTRACE_SINGLESTEP: { /* set the trap flag. */ + ret = -EIO; + if ((unsigned long) data > _NSIG) + break; + clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); + set_single_step(child); + child->exit_code = data; + /* give it a chance to run. */ + wake_up_process(child); + ret = 0; + break; + } + + case PTRACE_DETACH: /* detach a process that was attached. */ + ret = ptrace_detach(child, data); + break; + + case PTRACE_SETOPTIONS: + if (data & PTRACE_O_TRACESYSGOOD) + child->ptrace |= PT_TRACESYSGOOD; + else + child->ptrace &= ~PT_TRACESYSGOOD; + ret = 0; + break; + + default: + ret = -EIO; + break; + } + + return ret; +} + +/* + * Notification of system call entry/exit + * - triggered by current->work.syscall_trace + */ +asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit) +{ + if (!(current->ptrace & PT_PTRACED)) + return; + + if (!test_thread_flag(TIF_SYSCALL_TRACE)) + return; + + /* The 0x80 provides a way for the tracing parent to distinguish + between a syscall stop and SIGTRAP delivery. */ + ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) ? + 0x80 : 0)); + + /* + * this isn't the same as continuing with a signal, but it will do + * for normal use. strace only continues with a signal if the + * stopping signal is not SIGTRAP. -brl + */ + if (current->exit_code) { + send_sig(current->exit_code, current, 1); + current->exit_code = 0; + } +} diff --git a/arch/score/kernel/setup.c b/arch/score/kernel/setup.c new file mode 100644 index 0000000..a172ce1 --- /dev/null +++ b/arch/score/kernel/setup.c @@ -0,0 +1,157 @@ +/* + * arch/score/kernel/setup.c + * + * Score Processor version. + * + * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. + * Chen Liqin + * Lennox Wu + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include +#include +#include + +#include + +struct screen_info screen_info; +unsigned long kernelsp; + +static char command_line[COMMAND_LINE_SIZE]; +static struct resource code_resource = { .name = "Kernel code",}; +static struct resource data_resource = { .name = "Kernel data",}; + +static void __init bootmem_init(void) +{ + unsigned long reserved_end, bootmap_size; + unsigned long size = initrd_end - initrd_start; + + reserved_end = (unsigned long)_end; + + min_low_pfn = 0; + max_low_pfn = MEM_SIZE / PAGE_SIZE; + + /* Initialize the boot-time allocator with low memory only. */ + bootmap_size = init_bootmem_node(NODE_DATA(0), reserved_end, + min_low_pfn, max_low_pfn); + add_active_range(0, min_low_pfn, max_low_pfn); + + free_bootmem(PFN_PHYS(reserved_end), + (max_low_pfn - reserved_end) << PAGE_SHIFT); + memory_present(0, reserved_end, max_low_pfn); + + /* Reserve space for the bootmem bitmap. */ + reserve_bootmem(PFN_PHYS(reserved_end), bootmap_size, BOOTMEM_DEFAULT); + + if (size == 0) { + printk(KERN_INFO "Initrd not found or empty"); + goto disable; + } + + if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) { + printk(KERN_ERR "Initrd extends beyond end of memory"); + goto disable; + } + + /* Reserve space for the initrd bitmap. */ + reserve_bootmem(__pa(initrd_start), size, BOOTMEM_DEFAULT); + initrd_below_start_ok = 1; + + pr_info("Initial ramdisk at: 0x%lx (%lu bytes)\n", + initrd_start, size); + return; +disable: + printk(KERN_CONT " - disabling initrd\n"); + initrd_start = 0; + initrd_end = 0; +} + +static void __init resource_init(void) +{ + struct resource *res; + + code_resource.start = (unsigned long)_text; + code_resource.end = (unsigned long)_etext - 1; + data_resource.start = (unsigned long)_etext; + data_resource.end = (unsigned long)_edata - 1; + + res = alloc_bootmem(sizeof(struct resource)); + res->name = "System RAM"; + res->start = 0; + res->end = MEM_SIZE - 1; + res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; + request_resource(&iomem_resource, res); + + request_resource(res, &code_resource); + request_resource(res, &data_resource); +} + +void __init setup_arch(char **cmdline_p) +{ + randomize_va_space = 0; + *cmdline_p = command_line; + + cpu_cache_init(); + tlb_init(); + bootmem_init(); + paging_init(); + resource_init(); +} + +static int show_cpuinfo(struct seq_file *m, void *v) +{ + unsigned long n = (unsigned long) v - 1; + + seq_printf(m, "processor\t\t: %ld\n", n); + seq_printf(m, "\n"); + + return 0; +} + +static void *c_start(struct seq_file *m, loff_t *pos) +{ + unsigned long i = *pos; + + return i < 1 ? (void *) (i + 1) : NULL; +} + +static void *c_next(struct seq_file *m, void *v, loff_t *pos) +{ + ++*pos; + return c_start(m, pos); +} + +static void c_stop(struct seq_file *m, void *v) +{ +} + +const struct seq_operations cpuinfo_op = { + .start = c_start, + .next = c_next, + .stop = c_stop, + .show = show_cpuinfo, +}; + +static int __init topology_init(void) +{ + return 0; +} + +subsys_initcall(topology_init); diff --git a/arch/score/kernel/signal.c b/arch/score/kernel/signal.c new file mode 100644 index 0000000..b4ed1b3 --- /dev/null +++ b/arch/score/kernel/signal.c @@ -0,0 +1,355 @@ +/* + * arch/score/kernel/signal.c + * + * Score Processor version. + * + * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. + * Chen Liqin + * Lennox Wu + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include +#include +#include + +#include + +#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) + +struct rt_sigframe { + u32 rs_ass[4]; /* argument save space */ + u32 rs_code[2]; /* signal trampoline */ + struct siginfo rs_info; + struct ucontext rs_uc; +}; + +int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) +{ + int err = 0; + unsigned long reg; + + reg = regs->cp0_epc; err |= __put_user(reg, &sc->sc_pc); + err |= __put_user(regs->cp0_psr, &sc->sc_psr); + err |= __put_user(regs->cp0_condition, &sc->sc_condition); + + +#define save_gp_reg(i) { \ + reg = regs->regs[i]; \ + err |= __put_user(reg, &sc->sc_regs[i]); \ +} while (0) + save_gp_reg(0); save_gp_reg(1); save_gp_reg(2); + save_gp_reg(3); save_gp_reg(4); save_gp_reg(5); + save_gp_reg(6); save_gp_reg(7); save_gp_reg(8); + save_gp_reg(9); save_gp_reg(10); save_gp_reg(11); + save_gp_reg(12); save_gp_reg(13); save_gp_reg(14); + save_gp_reg(15); save_gp_reg(16); save_gp_reg(17); + save_gp_reg(18); save_gp_reg(19); save_gp_reg(20); + save_gp_reg(21); save_gp_reg(22); save_gp_reg(23); + save_gp_reg(24); save_gp_reg(25); save_gp_reg(26); + save_gp_reg(27); save_gp_reg(28); save_gp_reg(29); +#undef save_gp_reg + + reg = regs->ceh; err |= __put_user(reg, &sc->sc_mdceh); + reg = regs->cel; err |= __put_user(reg, &sc->sc_mdcel); + err |= __put_user(regs->cp0_ecr, &sc->sc_ecr); + err |= __put_user(regs->cp0_ema, &sc->sc_ema); + + return err; +} + +int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) +{ + int err = 0; + u32 reg; + + err |= __get_user(regs->cp0_epc, &sc->sc_pc); + err |= __get_user(regs->cp0_condition, &sc->sc_condition); + + err |= __get_user(reg, &sc->sc_mdceh); + regs->ceh = (int) reg; + err |= __get_user(reg, &sc->sc_mdcel); + regs->cel = (int) reg; + + err |= __get_user(reg, &sc->sc_psr); + regs->cp0_psr = (int) reg; + err |= __get_user(reg, &sc->sc_ecr); + regs->cp0_ecr = (int) reg; + err |= __get_user(reg, &sc->sc_ema); + regs->cp0_ema = (int) reg; + +#define restore_gp_reg(i) do { \ + err |= __get_user(reg, &sc->sc_regs[i]); \ + regs->regs[i] = reg; \ +} while (0) + restore_gp_reg(0); restore_gp_reg(1); restore_gp_reg(2); + restore_gp_reg(3); restore_gp_reg(4); restore_gp_reg(5); + restore_gp_reg(6); restore_gp_reg(7); restore_gp_reg(8); + restore_gp_reg(9); restore_gp_reg(10); restore_gp_reg(11); + restore_gp_reg(12); restore_gp_reg(13); restore_gp_reg(14); + restore_gp_reg(15); restore_gp_reg(16); restore_gp_reg(17); + restore_gp_reg(18); restore_gp_reg(19); restore_gp_reg(20); + restore_gp_reg(21); restore_gp_reg(22); restore_gp_reg(23); + restore_gp_reg(24); restore_gp_reg(25); restore_gp_reg(26); + restore_gp_reg(27); restore_gp_reg(28); restore_gp_reg(29); +#undef restore_gp_reg + + return err; +} + +/* + * Determine which stack to use.. + */ +void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, + size_t frame_size) +{ + unsigned long sp; + + /* Default to using normal stack */ + sp = regs->regs[0]; + sp -= 32; + + /* This is the X/Open sanctioned signal stack switching. */ + if ((ka->sa.sa_flags & SA_ONSTACK) && (!on_sig_stack(sp))) + sp = current->sas_ss_sp + current->sas_ss_size; + + return (void *)((sp - frame_size) & ~7); +} + +asmlinkage int score_sigaltstack(struct pt_regs *regs) +{ + const stack_t *uss = (const stack_t *) regs->regs[4]; + stack_t *uoss = (stack_t *) regs->regs[5]; + unsigned long usp = regs->regs[0]; + + return do_sigaltstack(uss, uoss, usp); +} + +asmlinkage void score_rt_sigreturn(struct pt_regs *regs) +{ + struct rt_sigframe __user *frame; + sigset_t set; + stack_t st; + int sig; + + frame = (struct rt_sigframe __user *) regs->regs[0]; + if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) + goto badframe; + if (__copy_from_user(&set, &frame->rs_uc.uc_sigmask, sizeof(set))) + goto badframe; + + sigdelsetmask(&set, ~_BLOCKABLE); + spin_lock_irq(¤t->sighand->siglock); + current->blocked = set; + recalc_sigpending(); + spin_unlock_irq(¤t->sighand->siglock); + + sig = restore_sigcontext(regs, &frame->rs_uc.uc_mcontext); + if (sig < 0) + goto badframe; + else if (sig) + force_sig(sig, current); + + if (__copy_from_user(&st, &frame->rs_uc.uc_stack, sizeof(st))) + goto badframe; + + /* It is more difficult to avoid calling this function than to + call it and ignore errors. */ + do_sigaltstack((stack_t __user *)&st, NULL, regs->regs[0]); + + __asm__ __volatile__( + "mv\tr0, %0\n\t" + "la\tr8, syscall_exit\n\t" + "br\tr8\n\t" + : : "r" (regs) : "r8"); + +badframe: + force_sig(SIGSEGV, current); +} + +int setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs, + int signr, sigset_t *set, siginfo_t *info) +{ + struct rt_sigframe *frame; + int err = 0; + + frame = get_sigframe(ka, regs, sizeof(*frame)); + if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) + goto give_sigsegv; + + /* + * Set up the return code ... + * + * li v0, __NR_rt_sigreturn + * syscall + */ + err |= __put_user(0x87788000 + __NR_rt_sigreturn*2, + frame->rs_code + 0); + err |= __put_user(0x80008002, frame->rs_code + 1); + flush_cache_sigtramp((unsigned long) frame->rs_code); + + err |= copy_siginfo_to_user(&frame->rs_info, info); + err |= __put_user(0, &frame->rs_uc.uc_flags); + err |= __put_user(0, &frame->rs_uc.uc_link); + err |= __put_user((void *)current->sas_ss_sp, + &frame->rs_uc.uc_stack.ss_sp); + err |= __put_user(sas_ss_flags(regs->regs[0]), + &frame->rs_uc.uc_stack.ss_flags); + err |= __put_user(current->sas_ss_size, + &frame->rs_uc.uc_stack.ss_size); + err |= setup_sigcontext(regs, &frame->rs_uc.uc_mcontext); + err |= __copy_to_user(&frame->rs_uc.uc_sigmask, set, sizeof(*set)); + + if (err) + goto give_sigsegv; + + regs->regs[0] = (unsigned long) frame; + regs->regs[3] = (unsigned long) frame->rs_code; + regs->regs[4] = signr; + regs->regs[5] = (unsigned long) &frame->rs_info; + regs->regs[6] = (unsigned long) &frame->rs_uc; + regs->regs[29] = (unsigned long) ka->sa.sa_handler; + regs->cp0_epc = (unsigned long) ka->sa.sa_handler; + + return 0; + +give_sigsegv: + if (signr == SIGSEGV) + ka->sa.sa_handler = SIG_DFL; + force_sig(SIGSEGV, current); + return -EFAULT; +} + +int handle_signal(unsigned long sig, siginfo_t *info, + struct k_sigaction *ka, sigset_t *oldset, struct pt_regs *regs) +{ + int ret; + + if (regs->is_syscall) { + switch (regs->regs[4]) { + case ERESTART_RESTARTBLOCK: + case ERESTARTNOHAND: + regs->regs[4] = EINTR; + break; + case ERESTARTSYS: + if (!(ka->sa.sa_flags & SA_RESTART)) { + regs->regs[4] = EINTR; + break; + } + case ERESTARTNOINTR: + regs->regs[4] = regs->orig_r4; + regs->regs[7] = regs->orig_r7; + regs->cp0_epc -= 8; + } + + regs->is_syscall = 0; + } + + /* + * Set up the stack frame + */ + ret = setup_rt_frame(ka, regs, sig, oldset, info); + + spin_lock_irq(¤t->sighand->siglock); + sigorsets(¤t->blocked, ¤t->blocked, &ka->sa.sa_mask); + if (!(ka->sa.sa_flags & SA_NODEFER)) + sigaddset(¤t->blocked, sig); + recalc_sigpending(); + spin_unlock_irq(¤t->sighand->siglock); + + return ret; +} + +asmlinkage void do_signal(struct pt_regs *regs) +{ + struct k_sigaction ka; + sigset_t *oldset; + siginfo_t info; + int signr; + + /* + * We want the common case to go fast, which is why we may in certain + * cases get here from kernel mode. Just return without doing anything + * if so. + */ + if (!user_mode(regs)) + return; + + if (test_thread_flag(TIF_RESTORE_SIGMASK)) + oldset = ¤t->saved_sigmask; + else + oldset = ¤t->blocked; + + signr = get_signal_to_deliver(&info, &ka, regs, NULL); + if (signr > 0) { + /* Actually deliver the signal. */ + if (handle_signal(signr, &info, &ka, oldset, regs) == 0) { + /* + * A signal was successfully delivered; the saved + * sigmask will have been stored in the signal frame, + * and will be restored by sigreturn, so we can simply + * clear the TIF_RESTORE_SIGMASK flag. + */ + if (test_thread_flag(TIF_RESTORE_SIGMASK)) + clear_thread_flag(TIF_RESTORE_SIGMASK); + } + + return; + } + + if (regs->is_syscall) { + if (regs->regs[4] == ERESTARTNOHAND || + regs->regs[4] == ERESTARTSYS || + regs->regs[4] == ERESTARTNOINTR) { + regs->regs[4] = regs->orig_r4; + regs->regs[7] = regs->orig_r7; + regs->cp0_epc -= 8; + } + + if (regs->regs[4] == ERESTART_RESTARTBLOCK) { + regs->regs[27] = __NR_restart_syscall; + regs->regs[4] = regs->orig_r4; + regs->regs[7] = regs->orig_r7; + regs->cp0_epc -= 8; + } + + regs->is_syscall = 0; /* Don't deal with this again. */ + } + + /* + * If there's no signal to deliver, we just put the saved sigmask + * back + */ + if (test_thread_flag(TIF_RESTORE_SIGMASK)) { + clear_thread_flag(TIF_RESTORE_SIGMASK); + sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); + } +} + +/* + * notification of userspace execution resumption + * - triggered by the TIF_WORK_MASK flags + */ +asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused, + __u32 thread_info_flags) +{ + /* deal with pending signal delivery */ + if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)) + do_signal(regs); +} diff --git a/arch/score/kernel/sys_score.c b/arch/score/kernel/sys_score.c new file mode 100644 index 0000000..6a60d1e --- /dev/null +++ b/arch/score/kernel/sys_score.c @@ -0,0 +1,147 @@ +/* + * arch/score/kernel/syscall.c + * + * Score Processor version. + * + * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. + * Chen Liqin + * Lennox Wu + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include +#include +#include + +unsigned long shm_align_mask = PAGE_SIZE - 1; +EXPORT_SYMBOL(shm_align_mask); + +asmlinkage unsigned long +sys_mmap2(unsigned long addr, unsigned long len, unsigned long prot, + unsigned long flags, unsigned long fd, unsigned long pgoff) +{ + int error = -EBADF; + struct file *file = NULL; + + if (pgoff & (~PAGE_MASK >> 12)) + return -EINVAL; + + flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); + if (!(flags & MAP_ANONYMOUS)) { + file = fget(fd); + if (!file) + return error; + } + + down_write(¤t->mm->mmap_sem); + error = do_mmap_pgoff(file, addr, len, prot, flags, + pgoff >> (PAGE_SHIFT - 12)); + up_write(¤t->mm->mmap_sem); + + if (file) + fput(file); + + return error; +} + +/* + * Clone a task - this clones the calling program thread. + * This is called indirectly via a small wrapper + */ +asmlinkage int +score_clone(struct pt_regs *regs) +{ + unsigned long clone_flags; + unsigned long newsp; + int __user *parent_tidptr, *child_tidptr; + + clone_flags = regs->regs[4]; + newsp = regs->regs[5]; + if (!newsp) + newsp = regs->regs[0]; + parent_tidptr = (int __user *)regs->regs[6]; + + child_tidptr = NULL; + if (clone_flags & (CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)) { + int __user *__user *usp = (int __user *__user *)regs->regs[0]; + + if (get_user(child_tidptr, &usp[4])) + return -EFAULT; + } + + return do_fork(clone_flags, newsp, regs, 0, + parent_tidptr, child_tidptr); +} + +/* + * sys_execve() executes a new program. + * This is called indirectly via a small wrapper + */ +asmlinkage int score_execve(struct pt_regs *regs) +{ + int error; + char *filename; + + filename = getname((char *) (long) regs->regs[4]); + error = PTR_ERR(filename); + if (IS_ERR(filename)) + return error; + + error = do_execve(filename, (char **) (long) regs->regs[5], + (char **) (long) regs->regs[6], regs); + + putname(filename); + return error; +} + +/* + * If we ever come here the user sp is bad. Zap the process right away. + * Due to the bad stack signaling wouldn't work. + */ +asmlinkage void bad_stack(void) +{ + do_exit(SIGSEGV); +} + +/* + * Do a system call from kernel instead of calling sys_execve so we + * end up with proper pt_regs. + */ +int kernel_execve(const char *filename, char *const argv[], char *const envp[]) +{ + register unsigned long __r4 asm("r4") = (unsigned long) filename; + register unsigned long __r5 asm("r5") = (unsigned long) argv; + register unsigned long __r6 asm("r6") = (unsigned long) envp; + register unsigned long __r7 asm("r7"); + + __asm__ __volatile__ (" \n" + "ldi r27, %5 \n" + "syscall \n" + "mv %0, r4 \n" + "mv %1, r7 \n" + : "=&r" (__r4), "=r" (__r7) + : "r" (__r4), "r" (__r5), "r" (__r6), "i" (__NR_execve) + : "r8", "r9", "r10", "r11", "r22", "r23", "r24", "r25", + "r26", "r27", "memory"); + + if (__r7 == 0) + return __r4; + + return -__r4; +} diff --git a/arch/score/kernel/time.c b/arch/score/kernel/time.c new file mode 100644 index 0000000..cd66ba3 --- /dev/null +++ b/arch/score/kernel/time.c @@ -0,0 +1,99 @@ +/* + * arch/score/kernel/time.c + * + * Score Processor version. + * + * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. + * Chen Liqin + * Lennox Wu + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include + +#include + +irqreturn_t timer_interrupt(int irq, void *dev_id) +{ + struct clock_event_device *evdev = dev_id; + + /* clear timer interrupt flag */ + outl(1, P_TIMER0_CPP_REG); + evdev->event_handler(evdev); + + return IRQ_HANDLED; +} + +static struct irqaction timer_irq = { + .handler = timer_interrupt, + .flags = IRQF_DISABLED | IRQF_TIMER, + .name = "timer", +}; + +static int score_timer_set_next_event(unsigned long delta, + struct clock_event_device *evdev) +{ + outl((TMR_M_PERIODIC | TMR_IE_ENABLE), P_TIMER0_CTRL); + outl(delta, P_TIMER0_PRELOAD); + outl(inl(P_TIMER0_CTRL) | TMR_ENABLE, P_TIMER0_CTRL); + + return 0; +} + +static void score_timer_set_mode(enum clock_event_mode mode, + struct clock_event_device *evdev) +{ + switch (mode) { + case CLOCK_EVT_MODE_PERIODIC: + outl((TMR_M_PERIODIC | TMR_IE_ENABLE), P_TIMER0_CTRL); + outl(SYSTEM_CLOCK/HZ, P_TIMER0_PRELOAD); + outl(inl(P_TIMER0_CTRL) | TMR_ENABLE, P_TIMER0_CTRL); + break; + case CLOCK_EVT_MODE_ONESHOT: + case CLOCK_EVT_MODE_SHUTDOWN: + case CLOCK_EVT_MODE_RESUME: + case CLOCK_EVT_MODE_UNUSED: + break; + default: + BUG(); + } +} + +static struct clock_event_device score_clockevent = { + .name = "score_clockevent", + .features = CLOCK_EVT_FEAT_PERIODIC, + .shift = 16, + .set_next_event = score_timer_set_next_event, + .set_mode = score_timer_set_mode, +}; + +void __init time_init(void) +{ + timer_irq.dev_id = &score_clockevent; + setup_irq(IRQ_TIMER , &timer_irq); + + /* setup COMPARE clockevent */ + score_clockevent.mult = div_sc(SYSTEM_CLOCK, NSEC_PER_SEC, + score_clockevent.shift); + score_clockevent.max_delta_ns = clockevent_delta2ns((u32)~0, + &score_clockevent); + score_clockevent.min_delta_ns = clockevent_delta2ns(50, + &score_clockevent) + 1; + score_clockevent.cpumask = cpumask_of(0); + clockevents_register_device(&score_clockevent); +} diff --git a/arch/score/kernel/traps.c b/arch/score/kernel/traps.c new file mode 100644 index 0000000..957ae9e --- /dev/null +++ b/arch/score/kernel/traps.c @@ -0,0 +1,349 @@ +/* + * arch/score/kernel/traps.c + * + * Score Processor version. + * + * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. + * Chen Liqin + * Lennox Wu + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include + +#include +#include +#include + +unsigned long exception_handlers[32]; + +/* + * The architecture-independent show_stack generator + */ +void show_stack(struct task_struct *task, unsigned long *sp) +{ + int i; + long stackdata; + + sp = sp ? sp : (unsigned long *)&sp; + + printk(KERN_NOTICE "Stack: "); + i = 1; + while ((long) sp & (PAGE_SIZE - 1)) { + if (i && ((i % 8) == 0)) + printk(KERN_NOTICE "\n"); + if (i > 40) { + printk(KERN_NOTICE " ..."); + break; + } + + if (__get_user(stackdata, sp++)) { + printk(KERN_NOTICE " (Bad stack address)"); + break; + } + + printk(KERN_NOTICE " %08lx", stackdata); + i++; + } + printk(KERN_NOTICE "\n"); +} + +static void show_trace(long *sp) +{ + int i; + long addr; + + sp = sp ? sp : (long *) &sp; + + printk(KERN_NOTICE "Call Trace: "); + i = 1; + while ((long) sp & (PAGE_SIZE - 1)) { + if (__get_user(addr, sp++)) { + if (i && ((i % 6) == 0)) + printk(KERN_NOTICE "\n"); + printk(KERN_NOTICE " (Bad stack address)\n"); + break; + } + + if (kernel_text_address(addr)) { + if (i && ((i % 6) == 0)) + printk(KERN_NOTICE "\n"); + if (i > 40) { + printk(KERN_NOTICE " ..."); + break; + } + + printk(KERN_NOTICE " [<%08lx>]", addr); + i++; + } + } + printk(KERN_NOTICE "\n"); +} + +static void show_code(unsigned int *pc) +{ + long i; + + printk(KERN_NOTICE "\nCode:"); + + for (i = -3; i < 6; i++) { + unsigned long insn; + if (__get_user(insn, pc + i)) { + printk(KERN_NOTICE " (Bad address in epc)\n"); + break; + } + printk(KERN_NOTICE "%c%08lx%c", (i ? ' ' : '<'), + insn, (i ? ' ' : '>')); + } +} + +/* + * FIXME: really the generic show_regs should take a const pointer argument. + */ +void show_regs(struct pt_regs *regs) +{ + printk("r0 : %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", + regs->regs[0], regs->regs[1], regs->regs[2], regs->regs[3], + regs->regs[4], regs->regs[5], regs->regs[6], regs->regs[7]); + printk("r8 : %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", + regs->regs[8], regs->regs[9], regs->regs[10], regs->regs[11], + regs->regs[12], regs->regs[13], regs->regs[14], regs->regs[15]); + printk("r16: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", + regs->regs[16], regs->regs[17], regs->regs[18], regs->regs[19], + regs->regs[20], regs->regs[21], regs->regs[22], regs->regs[23]); + printk("r24: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", + regs->regs[24], regs->regs[25], regs->regs[26], regs->regs[27], + regs->regs[28], regs->regs[29], regs->regs[30], regs->regs[31]); + + printk("CEH : %08lx\n", regs->ceh); + printk("CEL : %08lx\n", regs->cel); + + printk("EMA:%08lx, epc:%08lx %s\nPSR: %08lx\nECR:%08lx\nCondition : %08lx\n", + regs->cp0_ema, regs->cp0_epc, print_tainted(), regs->cp0_psr, + regs->cp0_ecr, regs->cp0_condition); +} + +static void show_registers(struct pt_regs *regs) +{ + show_regs(regs); + printk(KERN_NOTICE "Process %s (pid: %d, stackpage=%08lx)\n", + current->comm, current->pid, (unsigned long) current); + show_stack(current_thread_info()->task, (long *) regs->regs[0]); + show_trace((long *) regs->regs[0]); + show_code((unsigned int *) regs->cp0_epc); + printk(KERN_NOTICE "\n"); +} + +/* + * The architecture-independent dump_stack generator + */ +void dump_stack(void) +{ + show_stack(current_thread_info()->task, + (long *) get_irq_regs()->regs[0]); +} +EXPORT_SYMBOL(dump_stack); + +void __die(const char *str, struct pt_regs *regs, const char *file, + const char *func, unsigned long line) +{ + console_verbose(); + printk("%s", str); + if (file && func) + printk(" in %s:%s, line %ld", file, func, line); + printk(":\n"); + show_registers(regs); + do_exit(SIGSEGV); +} + +void __die_if_kernel(const char *str, struct pt_regs *regs, + const char *file, const char *func, unsigned long line) +{ + if (!user_mode(regs)) + __die(str, regs, file, func, line); +} + +asmlinkage void do_adelinsn(struct pt_regs *regs) +{ + printk("do_ADE-linsn:ema:0x%08lx:epc:0x%08lx\n", + regs->cp0_ema, regs->cp0_epc); + die_if_kernel("do_ade execution Exception\n", regs); + force_sig(SIGBUS, current); +} + +asmlinkage void do_adedata(struct pt_regs *regs) +{ + const struct exception_table_entry *fixup; + fixup = search_exception_tables(regs->cp0_epc); + if (fixup) { + regs->cp0_epc = fixup->fixup; + return; + } + printk("do_ADE-data:ema:0x%08lx:epc:0x%08lx\n", + regs->cp0_ema, regs->cp0_epc); + die_if_kernel("do_ade execution Exception\n", regs); + force_sig(SIGBUS, current); +} + +asmlinkage void do_pel(struct pt_regs *regs) +{ + die_if_kernel("do_pel execution Exception", regs); + force_sig(SIGFPE, current); +} + +asmlinkage void do_cee(struct pt_regs *regs) +{ + die_if_kernel("do_cee execution Exception", regs); + force_sig(SIGFPE, current); +} + +asmlinkage void do_cpe(struct pt_regs *regs) +{ + die_if_kernel("do_cpe execution Exception", regs); + force_sig(SIGFPE, current); +} + +asmlinkage void do_be(struct pt_regs *regs) +{ + die_if_kernel("do_be execution Exception", regs); + force_sig(SIGBUS, current); +} + +asmlinkage void do_ov(struct pt_regs *regs) +{ + siginfo_t info; + + die_if_kernel("do_ov execution Exception", regs); + + info.si_code = FPE_INTOVF; + info.si_signo = SIGFPE; + info.si_errno = 0; + info.si_addr = (void *)regs->cp0_epc; + force_sig_info(SIGFPE, &info, current); +} + +asmlinkage void do_tr(struct pt_regs *regs) +{ + die_if_kernel("do_tr execution Exception", regs); + force_sig(SIGTRAP, current); +} + +asmlinkage void do_ri(struct pt_regs *regs) +{ + unsigned long epc_insn; + unsigned long epc = regs->cp0_epc; + + read_tsk_long(current, epc, &epc_insn); + if (current->thread.single_step == 1) { + if ((epc == current->thread.addr1) || + (epc == current->thread.addr2)) { + clear_single_step(current); + force_sig(SIGTRAP, current); + return; + } else + BUG(); + } else if ((epc_insn == BREAKPOINT32_INSN) || + ((epc_insn & 0x0000FFFF) == 0x7002) || + ((epc_insn & 0xFFFF0000) == 0x70020000)) { + force_sig(SIGTRAP, current); + return; + } else { + die_if_kernel("do_ri execution Exception", regs); + force_sig(SIGILL, current); + } +} + +asmlinkage void do_ccu(struct pt_regs *regs) +{ + die_if_kernel("do_ccu execution Exception", regs); + force_sig(SIGILL, current); +} + +asmlinkage void do_reserved(struct pt_regs *regs) +{ + /* + * Game over - no way to handle this if it ever occurs. Most probably + * caused by a new unknown cpu type or after another deadly + * hard/software error. + */ + die_if_kernel("do_reserved execution Exception", regs); + show_regs(regs); + panic("Caught reserved exception - should not happen."); +} + +/* + * NMI exception handler. + */ +void nmi_exception_handler(struct pt_regs *regs) +{ + die_if_kernel("nmi_exception_handler execution Exception", regs); + die("NMI", regs); +} + +/* Install CPU exception handler */ +void *set_except_vector(int n, void *addr) +{ + unsigned long handler = (unsigned long) addr; + unsigned long old_handler = exception_handlers[n]; + + exception_handlers[n] = handler; + return (void *)old_handler; +} + +void __init trap_init(void) +{ + int i; + + pgd_current = (unsigned long)init_mm.pgd; + /* DEBUG EXCEPTION */ + memcpy((void *)DEBUG_VECTOR_BASE_ADDR, + &debug_exception_vector, DEBUG_VECTOR_SIZE); + /* NMI EXCEPTION */ + memcpy((void *)GENERAL_VECTOR_BASE_ADDR, + &general_exception_vector, GENERAL_VECTOR_SIZE); + + /* + * Initialise exception handlers + */ + for (i = 0; i <= 31; i++) + set_except_vector(i, handle_reserved); + + set_except_vector(1, handle_nmi); + set_except_vector(2, handle_adelinsn); + set_except_vector(3, handle_tlb_refill); + set_except_vector(4, handle_tlb_invaild); + set_except_vector(5, handle_ibe); + set_except_vector(6, handle_pel); + set_except_vector(7, handle_sys); + set_except_vector(8, handle_ccu); + set_except_vector(9, handle_ri); + set_except_vector(10, handle_tr); + set_except_vector(11, handle_adedata); + set_except_vector(12, handle_adedata); + set_except_vector(13, handle_tlb_refill); + set_except_vector(14, handle_tlb_invaild); + set_except_vector(15, handle_mod); + set_except_vector(16, handle_cee); + set_except_vector(17, handle_cpe); + set_except_vector(18, handle_dbe); + flush_icache_range(DEBUG_VECTOR_BASE_ADDR, IRQ_VECTOR_BASE_ADDR); + + atomic_inc(&init_mm.mm_count); + current->active_mm = &init_mm; + cpu_cache_init(); +} diff --git a/arch/score/kernel/vmlinux.lds.S b/arch/score/kernel/vmlinux.lds.S new file mode 100644 index 0000000..f855698 --- /dev/null +++ b/arch/score/kernel/vmlinux.lds.S @@ -0,0 +1,148 @@ +/* + * arch/score/kernel/vmlinux.lds.S + * + * Score Processor version. + * + * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. + * Chen Liqin + * Lennox Wu + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include + +OUTPUT_ARCH(score) +ENTRY(_stext) + +jiffies = jiffies_64; + +SECTIONS +{ + . = CONFIG_MEMORY_START + 0x2000; + /* read-only */ + .text : { + _text = .; /* Text and read-only data */ + TEXT_TEXT + SCHED_TEXT + LOCK_TEXT + KPROBES_TEXT + *(.text.*) + *(.fixup) + . = ALIGN (4) ; + _etext = .; /* End of text section */ + } + + . = ALIGN(16); + RODATA + + /* Exception table */ + . = ALIGN(16); + __ex_table : { + __start___ex_table = .; + *(__ex_table) + __stop___ex_table = .; + } + + /* writeable */ + .data ALIGN (4096): { + *(.data.init_task) + + DATA_DATA + CONSTRUCTORS + } + + /* We want the small data sections together, so single-instruction offsets + can access them all, and initialized data all before uninitialized, so + we can shorten the on-disk segment size. */ + . = ALIGN(8); + .sdata : { + *(.sdata) + } + + . = ALIGN(32); + .data.cacheline_aligned : { + *(.data.cacheline_aligned) + } + _edata = .; /* End of data section */ + + /* will be freed after init */ + . = ALIGN(4096); /* Init code and data */ + __init_begin = .; + + . = ALIGN(4096); + .init.text : { + _sinittext = .; + INIT_TEXT + _einittext = .; + } + .init.data : { + INIT_DATA + } + . = ALIGN(16); + .init.setup : { + __setup_start = .; + *(.init.setup) + __setup_end = .; + } + + .initcall.init : { + __initcall_start = .; + INITCALLS + __initcall_end = .; + } + + .con_initcall.init : { + __con_initcall_start = .; + *(.con_initcall.init) + __con_initcall_end = .; + } + SECURITY_INIT + + /* .exit.text is discarded at runtime, not link time, to deal with + * references from .rodata + */ + .exit.text : { + EXIT_TEXT + } + .exit.data : { + EXIT_DATA + } +#if defined(CONFIG_BLK_DEV_INITRD) + .init.ramfs ALIGN(4096): { + __initramfs_start = .; + *(.init.ramfs) + __initramfs_end = .; + . = ALIGN(4); + LONG(0); + } +#endif + . = ALIGN(4096); + __init_end = .; + /* freed after init ends here */ + + __bss_start = .; /* BSS */ + .sbss : { + *(.sbss) + *(.scommon) + } + .bss : { + *(.bss) + *(COMMON) + } + __bss_stop = .; + _end = .; +} diff --git a/arch/score/lib/Makefile b/arch/score/lib/Makefile new file mode 100644 index 0000000..553e30e --- /dev/null +++ b/arch/score/lib/Makefile @@ -0,0 +1,8 @@ +# +# Makefile for SCORE-specific library files.. +# + +lib-y += string.o checksum.o checksum_copy.o + +# libgcc-style stuff needed in the kernel +obj-y += ashldi3.o ashrdi3.o cmpdi2.o lshrdi3.o ucmpdi2.o diff --git a/arch/score/lib/ashldi3.c b/arch/score/lib/ashldi3.c new file mode 100644 index 0000000..15691a9 --- /dev/null +++ b/arch/score/lib/ashldi3.c @@ -0,0 +1,46 @@ +/* + * arch/score/lib/ashldi3.c + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include "libgcc.h" + +long long __ashldi3(long long u, word_type b) +{ + DWunion uu, w; + word_type bm; + + if (b == 0) + return u; + + uu.ll = u; + bm = 32 - b; + + if (bm <= 0) { + w.s.low = 0; + w.s.high = (unsigned int) uu.s.low << -bm; + } else { + const unsigned int carries = (unsigned int) uu.s.low >> bm; + + w.s.low = (unsigned int) uu.s.low << b; + w.s.high = ((unsigned int) uu.s.high << b) | carries; + } + + return w.ll; +} +EXPORT_SYMBOL(__ashldi3); diff --git a/arch/score/lib/ashrdi3.c b/arch/score/lib/ashrdi3.c new file mode 100644 index 0000000..d9814a5 --- /dev/null +++ b/arch/score/lib/ashrdi3.c @@ -0,0 +1,48 @@ +/* + * arch/score/lib/ashrdi3.c + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include "libgcc.h" + +long long __ashrdi3(long long u, word_type b) +{ + DWunion uu, w; + word_type bm; + + if (b == 0) + return u; + + uu.ll = u; + bm = 32 - b; + + if (bm <= 0) { + /* w.s.high = 1..1 or 0..0 */ + w.s.high = + uu.s.high >> 31; + w.s.low = uu.s.high >> -bm; + } else { + const unsigned int carries = (unsigned int) uu.s.high << bm; + + w.s.high = uu.s.high >> b; + w.s.low = ((unsigned int) uu.s.low >> b) | carries; + } + + return w.ll; +} +EXPORT_SYMBOL(__ashrdi3); diff --git a/arch/score/lib/checksum.S b/arch/score/lib/checksum.S new file mode 100644 index 0000000..706157e --- /dev/null +++ b/arch/score/lib/checksum.S @@ -0,0 +1,255 @@ +/* + * arch/score/lib/csum_partial.S + * + * Score Processor version. + * + * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. + * Lennox Wu + * Chen Liqin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ +#include + +#define ADDC(sum,reg) \ + add sum, sum, reg; \ + cmp.c reg, sum; \ + bleu 9f; \ + addi sum, 0x1; \ +9: + +#define CSUM_BIGCHUNK(src, offset, sum) \ + lw r8, [src, offset + 0x00]; \ + lw r9, [src, offset + 0x04]; \ + lw r10, [src, offset + 0x08]; \ + lw r11, [src, offset + 0x0c]; \ + ADDC(sum, r8); \ + ADDC(sum, r9); \ + ADDC(sum, r10); \ + ADDC(sum, r11); \ + lw r8, [src, offset + 0x10]; \ + lw r9, [src, offset + 0x14]; \ + lw r10, [src, offset + 0x18]; \ + lw r11, [src, offset + 0x1c]; \ + ADDC(sum, r8); \ + ADDC(sum, r9); \ + ADDC(sum, r10); \ + ADDC(sum, r11); \ + +#define src r4 +#define dest r5 +#define sum r27 + + .text +/* unknown src alignment and < 8 bytes to go */ +small_csumcpy: + mv r5, r10 + ldi r9, 0x0 + cmpi.c r25, 0x1 + beq pass_small_set_t7 /*already set, jump to pass_small_set_t7*/ + andri.c r25,r4 , 0x1 /*Is src 2 bytes aligned?*/ + +pass_small_set_t7: + beq aligned + cmpi.c r5, 0x0 + beq fold + lbu r9, [src] + slli r9,r9, 0x8 /*Little endian*/ + ADDC(sum, r9) + addi src, 0x1 + subi.c r5, 0x1 + + /*len still a full word */ +aligned: + andri.c r8, r5, 0x4 /*Len >= 4?*/ + beq len_less_4bytes + + /* Still a full word (4byte) to go,and the src is word aligned.*/ + andri.c r8, src, 0x3 /*src is 4bytes aligned, so use LW!!*/ + beq four_byte_aligned + lhu r9, [src] + addi src, 2 + ADDC(sum, r9) + lhu r9, [src] + addi src, 2 + ADDC(sum, r9) + b len_less_4bytes + +four_byte_aligned: /* Len >=4 and four byte aligned */ + lw r9, [src] + addi src, 4 + ADDC(sum, r9) + +len_less_4bytes: /* 2 byte aligned aligned and length<4B */ + andri.c r8, r5, 0x2 + beq len_less_2bytes + lhu r9, [src] + addi src, 0x2 /* src+=2 */ + ADDC(sum, r9) + +len_less_2bytes: /* len = 1 */ + andri.c r8, r5, 0x1 + beq fold /* less than 2 and not equal 1--> len=0 -> fold */ + lbu r9, [src] + +fold_ADDC: + ADDC(sum, r9) +fold: + /* fold checksum */ + slli r26, sum, 16 + add sum, sum, r26 + cmp.c r26, sum + srli sum, sum, 16 + bleu 1f /* if r26<=sum */ + addi sum, 0x1 /* r26>sum */ +1: + /* odd buffer alignment? r25 was set in csum_partial */ + cmpi.c r25, 0x0 + beq 1f + slli r26, sum, 8 + srli sum, sum, 8 + or sum, sum, r26 + andi sum, 0xffff +1: + .set optimize + /* Add the passed partial csum. */ + ADDC(sum, r6) + mv r4, sum + br r3 + .set volatile + + .align 5 +ENTRY(csum_partial) + ldi sum, 0 + ldi r25, 0 + mv r10, r5 + cmpi.c r5, 0x8 + blt small_csumcpy /* < 8(singed) bytes to copy */ + cmpi.c r5, 0x0 + beq out + andri.c r25, src, 0x1 /* odd buffer? */ + + beq word_align +hword_align: /* 1 byte */ + lbu r8, [src] + subi r5, 0x1 + slli r8, r8, 8 + ADDC(sum, r8) + addi src, 0x1 + +word_align: /* 2 bytes */ + andri.c r8, src, 0x2 /* 4bytes(dword)_aligned? */ + beq dword_align /* not, maybe dword_align */ + lhu r8, [src] + subi r5, 0x2 + ADDC(sum, r8) + addi src, 0x2 + +dword_align: /* 4bytes */ + mv r26, r5 /* maybe useless when len >=56 */ + ldi r8, 56 + cmp.c r8, r5 + bgtu do_end_words /* if a1(len)=128? */ + beq 1f /* len<128 */ + +/* r26 is the result that computed in oword_align */ +move_128bytes: + CSUM_BIGCHUNK(src, 0x00, sum) + CSUM_BIGCHUNK(src, 0x20, sum) + CSUM_BIGCHUNK(src, 0x40, sum) + CSUM_BIGCHUNK(src, 0x60, sum) + subi.c r26, 0x01 /* r26 equals len/128 */ + addi src, 0x80 + bne move_128bytes + +1: /* len<128,we process 64byte here */ + andri.c r10, r5, 0x40 + beq 1f + +move_64bytes: + CSUM_BIGCHUNK(src, 0x00, sum) + CSUM_BIGCHUNK(src, 0x20, sum) + addi src, 0x40 + +1: /* len<64 */ + andri r26, r5, 0x1c /* 0x1c=28 */ + andri.c r10, r5, 0x20 + beq do_end_words /* decided by andri */ + +move_32bytes: + CSUM_BIGCHUNK(src, 0x00, sum) + andri r26, r5, 0x1c + addri src, src, 0x20 + +do_end_words: /* len<32 */ + /* r26 was set already in dword_align */ + cmpi.c r26, 0x0 + beq maybe_end_cruft /* len<28 or len<56 */ + srli r26, r26, 0x2 + +end_words: + lw r8, [src] + subi.c r26, 0x1 /* unit is 4 byte */ + ADDC(sum, r8) + addi src, 0x4 + cmpi.c r26, 0x0 + bne end_words /* r26!=0 */ + +maybe_end_cruft: /* len<4 */ + andri r10, r5, 0x3 + +small_memcpy: + mv r5, r10 + j small_csumcpy + +out: + mv r4, sum + br r3 + +END(csum_partial) diff --git a/arch/score/lib/checksum_copy.c b/arch/score/lib/checksum_copy.c new file mode 100644 index 0000000..04565dd --- /dev/null +++ b/arch/score/lib/checksum_copy.c @@ -0,0 +1,52 @@ +/* + * arch/score/lib/csum_partial_copy.c + * + * Score Processor version. + * + * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. + * Lennox Wu + * Chen Liqin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include + +#include + +unsigned int csum_partial_copy(const char *src, char *dst, + int len, unsigned int sum) +{ + sum = csum_partial(src, len, sum); + memcpy(dst, src, len); + + return sum; +} + +unsigned int csum_partial_copy_from_user(const char *src, char *dst, + int len, unsigned int sum, + int *err_ptr) +{ + int missing; + + missing = copy_from_user(dst, src, len); + if (missing) { + memset(dst + len - missing, 0, missing); + *err_ptr = -EFAULT; + } + + return csum_partial(dst, len, sum); +} diff --git a/arch/score/lib/cmpdi2.c b/arch/score/lib/cmpdi2.c new file mode 100644 index 0000000..1ed5290 --- /dev/null +++ b/arch/score/lib/cmpdi2.c @@ -0,0 +1,44 @@ +/* + * arch/score/lib/cmpdi2.c + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include "libgcc.h" + +word_type __cmpdi2(long long a, long long b) +{ + const DWunion au = { + .ll = a + }; + const DWunion bu = { + .ll = b + }; + + if (au.s.high < bu.s.high) + return 0; + else if (au.s.high > bu.s.high) + return 2; + + if ((unsigned int) au.s.low < (unsigned int) bu.s.low) + return 0; + else if ((unsigned int) au.s.low > (unsigned int) bu.s.low) + return 2; + + return 1; +} +EXPORT_SYMBOL(__cmpdi2); diff --git a/arch/score/lib/libgcc.h b/arch/score/lib/libgcc.h new file mode 100644 index 0000000..0f12543 --- /dev/null +++ b/arch/score/lib/libgcc.h @@ -0,0 +1,37 @@ +/* + * arch/score/lib/libgcc.h + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + + +#ifndef __ASM_LIBGCC_H +#define __ASM_LIBGCC_H + +#include + +typedef int word_type __attribute__((mode(__word__))); + +struct DWstruct { + int low, high; +}; + +typedef union { + struct DWstruct s; + long long ll; +} DWunion; + +#endif /* __ASM_LIBGCC_H */ diff --git a/arch/score/lib/lshrdi3.c b/arch/score/lib/lshrdi3.c new file mode 100644 index 0000000..ce21175 --- /dev/null +++ b/arch/score/lib/lshrdi3.c @@ -0,0 +1,47 @@ +/* + * arch/score/lib/lshrdi3.c + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + + +#include +#include "libgcc.h" + +long long __lshrdi3(long long u, word_type b) +{ + DWunion uu, w; + word_type bm; + + if (b == 0) + return u; + + uu.ll = u; + bm = 32 - b; + + if (bm <= 0) { + w.s.high = 0; + w.s.low = (unsigned int) uu.s.high >> -bm; + } else { + const unsigned int carries = (unsigned int) uu.s.high << bm; + + w.s.high = (unsigned int) uu.s.high >> b; + w.s.low = ((unsigned int) uu.s.low >> b) | carries; + } + + return w.ll; +} +EXPORT_SYMBOL(__lshrdi3); diff --git a/arch/score/lib/string.S b/arch/score/lib/string.S new file mode 100644 index 0000000..943d091 --- /dev/null +++ b/arch/score/lib/string.S @@ -0,0 +1,196 @@ +/* + * arch/score/lib/string.S + * + * Score Processor version. + * + * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. + * Chen Liqin + * Lennox Wu + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include + + .text + .align 2 +ENTRY(__strncpy_from_user) + cmpi.c r6, 0 + mv r9, r6 + ble .L2 +0: lbu r7, [r5] + ldi r8, 0 +1: sb r7, [r4] +2: lb r6, [r5] + cmp.c r6, r8 + beq .L2 + +.L5: + addi r8, 1 + cmp.c r8, r9 + beq .L7 +3: lbu r6, [r5, 1]+ +4: sb r6, [r4, 1]+ +5: lb r7, [r5] + cmpi.c r7, 0 + bne .L5 +.L7: + mv r4, r8 + br r3 +.L2: + ldi r8, 0 + mv r4, r8 + br r3 + .section .fixup, "ax" +99: + ldi r4, -EFAULT + br r3 + .previous + .section __ex_table, "a" + .align 2 + .word 0b ,99b + .word 1b ,99b + .word 2b ,99b + .word 3b ,99b + .word 4b ,99b + .word 5b ,99b + .previous + + .align 2 +ENTRY(__strnlen_user) + cmpi.c r5, 0 + ble .L11 +0: lb r6, [r4] + ldi r7, 0 + cmp.c r6, r7 + beq .L11 +.L15: + addi r7, 1 + cmp.c r7, r5 + beq .L23 +1: lb r6, [r4,1]+ + cmpi.c r6, 0 + bne .L15 +.L23: + addri r4, r7, 1 + br r3 + +.L11: + ldi r4, 1 + br r3 + .section .fixup, "ax" +99: + ldi r4, 0 + br r3 + + .section __ex_table,"a" + .align 2 + .word 0b, 99b + .word 1b, 99b + .previous + + .align 2 +ENTRY(__strlen_user) +0: lb r6, [r4] + mv r7, r4 + extsb r6, r6 + cmpi.c r6, 0 + mv r4, r6 + beq .L27 +.L28: +1: lb r6, [r7, 1]+ + addi r6, 1 + cmpi.c r6, 0 + bne .L28 +.L27: + br r3 + .section .fixup, "ax" + ldi r4, 0x0 + br r3 +99: + ldi r4, 0 + br r3 + .previous + .section __ex_table, "a" + .align 2 + .word 0b ,99b + .word 1b ,99b + .previous + + .align 2 +ENTRY(__copy_tofrom_user) + cmpi.c r6, 0 + mv r10,r6 + beq .L32 + ldi r9, 0 +.L34: + add r6, r5, r9 +0: lbu r8, [r6] + add r7, r4, r9 +1: sb r8, [r7] + addi r9, 1 + cmp.c r9, r10 + bne .L34 +.L32: + ldi r4, 0 + br r3 + .section .fixup, "ax" +99: + sub r4, r10, r9 + br r3 + .previous + .section __ex_table, "a" + .align 2 + .word 0b, 99b + .word 1b, 99b + .previous + + .align 2 +ENTRY(__clear_user) + cmpi.c r5, 0 + beq .L38 + ldi r6, 0 + mv r7, r6 +.L40: + addi r6, 1 +0: sb r7, [r4]+, 1 + cmp.c r6, r5 + bne .L40 +.L38: + ldi r4, 0 + br r3 + + .section .fixup, "ax" + br r3 + .previous + .section __ex_table, "a" + .align 2 +99: + .word 0b, 99b + .previous + + .align 2 +ENTRY(__put_user_unknown) + .set volatile + ldi r4, -EFAULT + br r3 + + .align 2 +ENTRY(__get_user_unknown) + ldi r5, 0 + ldi r4, -EFAULT + br r3 diff --git a/arch/score/lib/ucmpdi2.c b/arch/score/lib/ucmpdi2.c new file mode 100644 index 0000000..b15241e --- /dev/null +++ b/arch/score/lib/ucmpdi2.c @@ -0,0 +1,38 @@ +/* + * arch/score/lib/ucmpdi2.c + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include "libgcc.h" + +word_type __ucmpdi2(unsigned long long a, unsigned long long b) +{ + const DWunion au = {.ll = a}; + const DWunion bu = {.ll = b}; + + if ((unsigned int) au.s.high < (unsigned int) bu.s.high) + return 0; + else if ((unsigned int) au.s.high > (unsigned int) bu.s.high) + return 2; + if ((unsigned int) au.s.low < (unsigned int) bu.s.low) + return 0; + else if ((unsigned int) au.s.low > (unsigned int) bu.s.low) + return 2; + return 1; +} +EXPORT_SYMBOL(__ucmpdi2); diff --git a/arch/score/mm/Makefile b/arch/score/mm/Makefile new file mode 100644 index 0000000..7b1e29b --- /dev/null +++ b/arch/score/mm/Makefile @@ -0,0 +1,6 @@ +# +# Makefile for the Linux/SCORE-specific parts of the memory manager. +# + +obj-y += cache.o extable.o fault.o init.o \ + tlb-miss.o tlb-score.o pgtable.o diff --git a/arch/score/mm/cache.c b/arch/score/mm/cache.c new file mode 100644 index 0000000..1ebc67f --- /dev/null +++ b/arch/score/mm/cache.c @@ -0,0 +1,308 @@ +/* + * arch/score/mm/cache.c + * + * Score Processor version. + * + * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. + * Lennox Wu + * Chen Liqin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include +#include +#include +#include + +#include + +/* Cache operations. */ +void (*flush_cache_all)(void); +void (*__flush_cache_all)(void); +void (*flush_cache_mm)(struct mm_struct *mm); +void (*flush_cache_range)(struct vm_area_struct *vma, + unsigned long start, unsigned long end); +void (*flush_cache_page)(struct vm_area_struct *vma, + unsigned long page, unsigned long pfn); +void (*flush_icache_range)(unsigned long start, unsigned long end); +void (*__flush_cache_vmap)(void); +void (*__flush_cache_vunmap)(void); +void (*flush_cache_sigtramp)(unsigned long addr); +void (*flush_data_cache_page)(unsigned long addr); +EXPORT_SYMBOL(flush_data_cache_page); +void (*flush_icache_all)(void); + +/*Score 7 cache operations*/ +static inline void s7___flush_cache_all(void); +static void s7_flush_cache_mm(struct mm_struct *mm); +static void s7_flush_cache_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end); +static void s7_flush_cache_page(struct vm_area_struct *vma, + unsigned long page, unsigned long pfn); +static void s7_flush_icache_range(unsigned long start, unsigned long end); +static void s7_flush_cache_sigtramp(unsigned long addr); +static void s7_flush_data_cache_page(unsigned long addr); +static void s7_flush_dcache_range(unsigned long start, unsigned long end); + +void __update_cache(struct vm_area_struct *vma, unsigned long address, + pte_t pte) +{ + struct page *page; + unsigned long pfn, addr; + int exec = (vma->vm_flags & VM_EXEC); + + pfn = pte_pfn(pte); + if (unlikely(!pfn_valid(pfn))) + return; + page = pfn_to_page(pfn); + if (page_mapping(page) && test_bit(PG_arch_1, &page->flags)) { + addr = (unsigned long) page_address(page); + if (exec) + s7_flush_data_cache_page(addr); + clear_bit(PG_arch_1, &page->flags); + } +} + +static inline void setup_protection_map(void) +{ + protection_map[0] = PAGE_NONE; + protection_map[1] = PAGE_READONLY; + protection_map[2] = PAGE_COPY; + protection_map[3] = PAGE_COPY; + protection_map[4] = PAGE_READONLY; + protection_map[5] = PAGE_READONLY; + protection_map[6] = PAGE_COPY; + protection_map[7] = PAGE_COPY; + protection_map[8] = PAGE_NONE; + protection_map[9] = PAGE_READONLY; + protection_map[10] = PAGE_SHARED; + protection_map[11] = PAGE_SHARED; + protection_map[12] = PAGE_READONLY; + protection_map[13] = PAGE_READONLY; + protection_map[14] = PAGE_SHARED; + protection_map[15] = PAGE_SHARED; +} + +void __devinit cpu_cache_init(void) +{ + flush_cache_all = s7_flush_cache_all; + __flush_cache_all = s7___flush_cache_all; + flush_cache_mm = s7_flush_cache_mm; + flush_cache_range = s7_flush_cache_range; + flush_cache_page = s7_flush_cache_page; + flush_icache_range = s7_flush_icache_range; + flush_cache_sigtramp = s7_flush_cache_sigtramp; + flush_data_cache_page = s7_flush_data_cache_page; + + setup_protection_map(); +} + +void s7_flush_icache_all(void) +{ + __asm__ __volatile__( + "la r8, s7_flush_icache_all\n" + "cache 0x10, [r8, 0]\n" + "nop\nnop\nnop\nnop\nnop\nnop\n" + : : : "r8"); +} + +void s7_flush_dcache_all(void) +{ + __asm__ __volatile__( + "la r8, s7_flush_dcache_all\n" + "cache 0x1f, [r8, 0]\n" + "nop\nnop\nnop\nnop\nnop\nnop\n" + "cache 0x1a, [r8, 0]\n" + "nop\nnop\nnop\nnop\nnop\nnop\n" + : : : "r8"); +} + +void s7_flush_cache_all(void) +{ + __asm__ __volatile__( + "la r8, s7_flush_cache_all\n" + "cache 0x10, [r8, 0]\n" + "nop\nnop\nnop\nnop\nnop\nnop\n" + "cache 0x1f, [r8, 0]\n" + "nop\nnop\nnop\nnop\nnop\nnop\n" + "cache 0x1a, [r8, 0]\n" + "nop\nnop\nnop\nnop\nnop\nnop\n" + : : : "r8"); +} + +void s7___flush_cache_all(void) +{ + __asm__ __volatile__( + "la r8, s7_flush_cache_all\n" + "cache 0x10, [r8, 0]\n" + "nop\nnop\nnop\nnop\nnop\nnop\n" + "cache 0x1f, [r8, 0]\n" + "nop\nnop\nnop\nnop\nnop\nnop\n" + "cache 0x1a, [r8, 0]\n" + "nop\nnop\nnop\nnop\nnop\nnop\n" + : : : "r8"); +} + +static void s7_flush_cache_mm(struct mm_struct *mm) +{ + if (!(mm->context)) + return; + s7_flush_cache_all(); +} + +/*if we flush a range precisely , the processing may be very long. +We must check each page in the range whether present. If the page is present, +we can flush the range in the page. Be careful, the range may be cross two +page, a page is present and another is not present. +*/ +/* +The interface is provided in hopes that the port can find +a suitably efficient method for removing multiple page +sized regions from the cache. +*/ +static void +s7_flush_cache_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end) +{ + struct mm_struct *mm = vma->vm_mm; + int exec = vma->vm_flags & VM_EXEC; + pgd_t *pgdp; + pud_t *pudp; + pmd_t *pmdp; + pte_t *ptep; + + if (!(mm->context)) + return; + + pgdp = pgd_offset(mm, start); + pudp = pud_offset(pgdp, start); + pmdp = pmd_offset(pudp, start); + ptep = pte_offset(pmdp, start); + + while (start <= end) { + unsigned long tmpend; + pgdp = pgd_offset(mm, start); + pudp = pud_offset(pgdp, start); + pmdp = pmd_offset(pudp, start); + ptep = pte_offset(pmdp, start); + + if (!(pte_val(*ptep) & _PAGE_PRESENT)) { + start = (start + PAGE_SIZE) & ~(PAGE_SIZE - 1); + continue; + } + tmpend = (start | (PAGE_SIZE-1)) > end ? + end : (start | (PAGE_SIZE-1)); + + s7_flush_dcache_range(start, tmpend); + if (exec) + s7_flush_icache_range(start, tmpend); + start = (start + PAGE_SIZE) & ~(PAGE_SIZE - 1); + } +} + +static void +s7_flush_cache_page(struct vm_area_struct *vma, + unsigned long addr, unsigned long pfn) +{ + int exec = vma->vm_flags & VM_EXEC; + unsigned long kaddr = 0xa0000000 | (pfn << PAGE_SHIFT); + + s7_flush_dcache_range(kaddr, kaddr + PAGE_SIZE); + + if (exec) + s7_flush_icache_range(kaddr, kaddr + PAGE_SIZE); +} + +static void s7_flush_cache_sigtramp(unsigned long addr) +{ + __asm__ __volatile__( + "cache 0x02, [%0, 0]\n" + "nop\nnop\nnop\nnop\nnop\n" + "cache 0x02, [%0, 0x4]\n" + "nop\nnop\nnop\nnop\nnop\n" + + "cache 0x0d, [%0, 0]\n" + "nop\nnop\nnop\nnop\nnop\n" + "cache 0x0d, [%0, 0x4]\n" + "nop\nnop\nnop\nnop\nnop\n" + + "cache 0x1a, [%0, 0]\n" + "nop\nnop\nnop\nnop\nnop\n" + : : "r" (addr)); +} + +/* +Just flush entire Dcache!! +You must ensure the page doesn't include instructions, because +the function will not flush the Icache. +The addr must be cache aligned. +*/ +static void s7_flush_data_cache_page(unsigned long addr) +{ + unsigned int i; + for (i = 0; i < (PAGE_SIZE / L1_CACHE_BYTES); i += L1_CACHE_BYTES) { + __asm__ __volatile__( + "cache 0x0e, [%0, 0]\n" + "cache 0x1a, [%0, 0]\n" + "nop\n" + : : "r" (addr)); + addr += L1_CACHE_BYTES; + } +} + +/* +1. WB and invalid a cache line of Dcache +2. Drain Write Buffer +the range must be smaller than PAGE_SIZE +*/ +static void s7_flush_dcache_range(unsigned long start, unsigned long end) +{ + int size, i; + + start = start & ~(L1_CACHE_BYTES - 1); + end = end & ~(L1_CACHE_BYTES - 1); + size = end - start; + /* flush dcache to ram, and invalidate dcache lines. */ + for (i = 0; i < size; i += L1_CACHE_BYTES) { + __asm__ __volatile__( + "cache 0x0e, [%0, 0]\n" + "nop\nnop\nnop\nnop\nnop\n" + "cache 0x1a, [%0, 0]\n" + "nop\nnop\nnop\nnop\nnop\n" + : : "r" (start)); + start += L1_CACHE_BYTES; + } +} + +static void s7_flush_icache_range(unsigned long start, unsigned long end) +{ + int size, i; + start = start & ~(L1_CACHE_BYTES - 1); + end = end & ~(L1_CACHE_BYTES - 1); + + size = end - start; + /* invalidate icache lines. */ + for (i = 0; i < size; i += L1_CACHE_BYTES) { + __asm__ __volatile__( + "cache 0x02, [%0, 0]\n" + "nop\nnop\nnop\nnop\nnop\n" + : : "r" (start)); + start += L1_CACHE_BYTES; + } +} diff --git a/arch/score/mm/extable.c b/arch/score/mm/extable.c new file mode 100644 index 0000000..01ff644 --- /dev/null +++ b/arch/score/mm/extable.c @@ -0,0 +1,38 @@ +/* + * arch/score/mm/extable.c + * + * Score Processor version. + * + * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. + * Lennox Wu + * Chen Liqin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include + +int fixup_exception(struct pt_regs *regs) +{ + const struct exception_table_entry *fixup; + + fixup = search_exception_tables(regs->cp0_epc); + if (fixup) { + regs->cp0_epc = fixup->fixup; + return 1; + } + return 0; +} diff --git a/arch/score/mm/fault.c b/arch/score/mm/fault.c new file mode 100644 index 0000000..47b600e --- /dev/null +++ b/arch/score/mm/fault.c @@ -0,0 +1,235 @@ +/* + * arch/score/mm/fault.c + * + * Score Processor version. + * + * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. + * Lennox Wu + * Chen Liqin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * This routine handles page faults. It determines the address, + * and the problem, and then passes it off to one of the appropriate + * routines. + */ +asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write, + unsigned long address) +{ + struct vm_area_struct *vma = NULL; + struct task_struct *tsk = current; + struct mm_struct *mm = tsk->mm; + const int field = sizeof(unsigned long) * 2; + siginfo_t info; + int fault; + + info.si_code = SEGV_MAPERR; + + /* + * We fault-in kernel-space virtual memory on-demand. The + * 'reference' page table is init_mm.pgd. + * + * NOTE! We MUST NOT take any locks for this case. We may + * be in an interrupt or a critical region, and should + * only copy the information from the master page table, + * nothing more. + */ + if (unlikely(address >= VMALLOC_START && address <= VMALLOC_END)) + goto vmalloc_fault; +#ifdef MODULE_START + if (unlikely(address >= MODULE_START && address < MODULE_END)) + goto vmalloc_fault; +#endif + + /* + * If we're in an interrupt or have no user + * context, we must not take the fault.. + */ + if (in_atomic() || !mm) + goto bad_area_nosemaphore; + + down_read(&mm->mmap_sem); + vma = find_vma(mm, address); + if (!vma) + goto bad_area; + if (vma->vm_start <= address) + goto good_area; + if (!(vma->vm_flags & VM_GROWSDOWN)) + goto bad_area; + if (expand_stack(vma, address)) + goto bad_area; + /* + * Ok, we have a good vm_area for this memory access, so + * we can handle it.. + */ +good_area: + info.si_code = SEGV_ACCERR; + + if (write) { + if (!(vma->vm_flags & VM_WRITE)) + goto bad_area; + } else { + if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))) + goto bad_area; + } + +survive: + /* + * If for any reason at all we couldn't handle the fault, + * make sure we exit gracefully rather than endlessly redo + * the fault. + */ + fault = handle_mm_fault(mm, vma, address, write); + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); + } + if (fault & VM_FAULT_MAJOR) + tsk->maj_flt++; + else + tsk->min_flt++; + + up_read(&mm->mmap_sem); + return; + + /* + * Something tried to access memory that isn't in our memory map.. + * Fix it, but check if it's kernel or user first.. + */ +bad_area: + up_read(&mm->mmap_sem); + +bad_area_nosemaphore: + /* User mode accesses just cause a SIGSEGV */ + if (user_mode(regs)) { + tsk->thread.cp0_badvaddr = address; + tsk->thread.error_code = write; + info.si_signo = SIGSEGV; + info.si_errno = 0; + /* info.si_code has been set above */ + info.si_addr = (void __user *) address; + force_sig_info(SIGSEGV, &info, tsk); + return; + } + +no_context: + /* Are we prepared to handle this kernel fault? */ + if (fixup_exception(regs)) { + current->thread.cp0_baduaddr = address; + return; + } + + /* + * Oops. The kernel tried to access some bad page. We'll have to + * terminate things with extreme prejudice. + */ + bust_spinlocks(1); + + printk(KERN_ALERT "CPU %d Unable to handle kernel paging request at " + "virtual address %0*lx, epc == %0*lx, ra == %0*lx\n", + 0, field, address, field, regs->cp0_epc, + field, regs->regs[3]); + die("Oops", regs); + + /* + * We ran out of memory, or some other thing happened to us that made + * us unable to handle the page fault gracefully. + */ +out_of_memory: + up_read(&mm->mmap_sem); + if (is_global_init(tsk)) { + yield(); + down_read(&mm->mmap_sem); + goto survive; + } + printk("VM: killing process %s\n", tsk->comm); + if (user_mode(regs)) + do_group_exit(SIGKILL); + goto no_context; + +do_sigbus: + up_read(&mm->mmap_sem); + /* Kernel mode? Handle exceptions or die */ + if (!user_mode(regs)) + goto no_context; + else + /* + * Send a sigbus, regardless of whether we were in kernel + * or user mode. + */ + tsk->thread.cp0_badvaddr = address; + info.si_signo = SIGBUS; + info.si_errno = 0; + info.si_code = BUS_ADRERR; + info.si_addr = (void __user *) address; + force_sig_info(SIGBUS, &info, tsk); + return; +vmalloc_fault: + { + /* + * Synchronize this task's top level page-table + * with the 'reference' page table. + * + * Do _not_ use "tsk" here. We might be inside + * an interrupt in the middle of a task switch.. + */ + int offset = __pgd_offset(address); + pgd_t *pgd, *pgd_k; + pud_t *pud, *pud_k; + pmd_t *pmd, *pmd_k; + pte_t *pte_k; + + pgd = (pgd_t *) pgd_current + offset; + pgd_k = init_mm.pgd + offset; + + if (!pgd_present(*pgd_k)) + goto no_context; + set_pgd(pgd, *pgd_k); + + pud = pud_offset(pgd, address); + pud_k = pud_offset(pgd_k, address); + if (!pud_present(*pud_k)) + goto no_context; + + pmd = pmd_offset(pud, address); + pmd_k = pmd_offset(pud_k, address); + if (!pmd_present(*pmd_k)) + goto no_context; + set_pmd(pmd, *pmd_k); + + pte_k = pte_offset_kernel(pmd_k, address); + if (!pte_present(*pte_k)) + goto no_context; + return; + } +} diff --git a/arch/score/mm/init.c b/arch/score/mm/init.c new file mode 100644 index 0000000..7780eec --- /dev/null +++ b/arch/score/mm/init.c @@ -0,0 +1,173 @@ +/* + * arch/score/mm/init.c + * + * Score Processor version. + * + * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. + * Lennox Wu + * Chen Liqin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); + +/* + * We have up to 8 empty zeroed pages so we can map one of the right colour + * when needed. + */ +unsigned long zero_page_mask; +unsigned long empty_zero_page; +EXPORT_SYMBOL_GPL(empty_zero_page); + +static struct kcore_list kcore_mem, kcore_vmalloc; + +unsigned long setup_zero_pages(void) +{ + unsigned int order = 0; + unsigned long size; + struct page *page; + + empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order); + if (!empty_zero_page) + panic("Oh boy, that early out of memory?"); + + page = virt_to_page((void *) empty_zero_page); + split_page(page, order); + while (page < virt_to_page((void *) (empty_zero_page + + (PAGE_SIZE << order)))) { + SetPageReserved(page); + page++; + } + + size = PAGE_SIZE << order; + zero_page_mask = (size - 1) & PAGE_MASK; + + return 1UL << order; +} + +#ifndef CONFIG_NEED_MULTIPLE_NODES +static int __init page_is_ram(unsigned long pagenr) +{ + if (pagenr >= min_low_pfn && pagenr < max_low_pfn) + return 1; + else + return 0; +} + +void __init paging_init(void) +{ + unsigned long max_zone_pfns[MAX_NR_ZONES]; + unsigned long lastpfn; + + pagetable_init(); + max_zone_pfns[ZONE_NORMAL] = max_low_pfn; + lastpfn = max_low_pfn; + free_area_init_nodes(max_zone_pfns); +} + +void __init mem_init(void) +{ + unsigned long codesize, reservedpages, datasize, initsize; + unsigned long tmp, ram = 0; + + max_mapnr = max_low_pfn; + high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); + totalram_pages += free_all_bootmem(); + totalram_pages -= setup_zero_pages(); /* Setup zeroed pages. */ + reservedpages = 0; + + for (tmp = 0; tmp < max_low_pfn; tmp++) + if (page_is_ram(tmp)) { + ram++; + if (PageReserved(pfn_to_page(tmp))) + reservedpages++; + } + + num_physpages = ram; + codesize = (unsigned long) &_etext - (unsigned long) &_text; + datasize = (unsigned long) &_edata - (unsigned long) &_etext; + initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; + + kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT); + kclist_add(&kcore_vmalloc, (void *) VMALLOC_START, + VMALLOC_END - VMALLOC_START); + + printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, " + "%ldk reserved, %ldk data, %ldk init, %ldk highmem)\n", + (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), + ram << (PAGE_SHIFT-10), codesize >> 10, + reservedpages << (PAGE_SHIFT-10), datasize >> 10, + initsize >> 10, + (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))); +} +#endif /* !CONFIG_NEED_MULTIPLE_NODES */ + +void free_init_pages(const char *what, unsigned long begin, unsigned long end) +{ + unsigned long pfn; + + for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) { + struct page *page = pfn_to_page(pfn); + void *addr = phys_to_virt(PFN_PHYS(pfn)); + + ClearPageReserved(page); + init_page_count(page); + memset(addr, POISON_FREE_INITMEM, PAGE_SIZE); + __free_page(page); + totalram_pages++; + } + printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10); +} + +#ifdef CONFIG_BLK_DEV_INITRD +void free_initrd_mem(unsigned long start, unsigned long end) +{ + free_init_pages("initrd memory", + virt_to_phys((void *) start), + virt_to_phys((void *) end)); +} +#endif + +void __init_refok free_initmem(void) +{ + free_init_pages("unused kernel memory", + (unsigned long)__init_begin, (unsigned long)__init_end); +} + +unsigned long pgd_current; + +#define __page_aligned(order) __attribute__((__aligned__(PAGE_SIZE< + * Chen Liqin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include +#include + +void pgd_init(unsigned long page) +{ + unsigned long *p = (unsigned long *) page; + int i; + + for (i = 0; i < USER_PTRS_PER_PGD; i += 8) { + p[i + 0] = (unsigned long) invalid_pte_table; + p[i + 1] = (unsigned long) invalid_pte_table; + p[i + 2] = (unsigned long) invalid_pte_table; + p[i + 3] = (unsigned long) invalid_pte_table; + p[i + 4] = (unsigned long) invalid_pte_table; + p[i + 5] = (unsigned long) invalid_pte_table; + p[i + 6] = (unsigned long) invalid_pte_table; + p[i + 7] = (unsigned long) invalid_pte_table; + } +} + +void __init pagetable_init(void) +{ + unsigned long vaddr; + pgd_t *pgd_base; + + /* Initialize the entire pgd. */ + pgd_init((unsigned long) swapper_pg_dir); + pgd_init((unsigned long) swapper_pg_dir + + sizeof(pgd_t) * USER_PTRS_PER_PGD); + + pgd_base = swapper_pg_dir; + vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; +} diff --git a/arch/score/mm/tlb-miss.S b/arch/score/mm/tlb-miss.S new file mode 100644 index 0000000..f276519 --- /dev/null +++ b/arch/score/mm/tlb-miss.S @@ -0,0 +1,199 @@ +/* + * arch/score/mm/tlbex.S + * + * Score Processor version. + * + * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. + * Lennox Wu + * Chen Liqin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include + +/* +* After this macro runs, the pte faulted on is +* in register PTE, a ptr into the table in which +* the pte belongs is in PTR. +*/ + .macro load_pte, pte, ptr + la \ptr, pgd_current + lw \ptr, [\ptr, 0] + mfcr \pte, cr6 + srli \pte, \pte, 22 + slli \pte, \pte, 2 + add \ptr, \ptr, \pte + lw \ptr, [\ptr, 0] + mfcr \pte, cr6 + srli \pte, \pte, 10 + andi \pte, 0xffc + add \ptr, \ptr, \pte + lw \pte, [\ptr, 0] + .endm + + .macro pte_reload, ptr + lw \ptr, [\ptr, 0] + mtcr \ptr, cr12 + nop + nop + nop + nop + nop + .endm + + .macro do_fault, write + SAVE_ALL + mfcr r6, cr6 + mv r4, r0 + ldi r5, \write + la r8, do_page_fault + brl r8 + j ret_from_exception + .endm + + .macro pte_writable, pte, ptr, label + andi \pte, 0x280 + cmpi.c \pte, 0x280 + bne \label + lw \pte, [\ptr, 0] /*reload PTE*/ + .endm + +/* + * Make PTE writable, update software status bits as well, + * then store at PTR. + */ + .macro pte_makewrite, pte, ptr + ori \pte, 0x426 + sw \pte, [\ptr, 0] + .endm + + .text +ENTRY(score7_FTLB_refill_Handler) + la r31, pgd_current /* get pgd pointer */ + lw r31, [r31, 0] /* get the address of PGD */ + mfcr r30, cr6 + srli r30, r30, 22 /* PGDIR_SHIFT = 22*/ + slli r30, r30, 2 + add r31, r31, r30 + lw r31, [r31, 0] /* get the address of the start address of PTE table */ + + mfcr r30, cr9 + andi r30, 0xfff /* equivalent to get PET index and right shift 2 bits */ + add r31, r31, r30 + lw r30, [r31, 0] /* load pte entry */ + mtcr r30, cr12 + nop + nop + nop + nop + nop + mtrtlb + nop + nop + nop + nop + nop + rte /* 6 cycles to make sure tlb entry works */ + +ENTRY(score7_KSEG_refill_Handler) + la r31, pgd_current /* get pgd pointer */ + lw r31, [r31, 0] /* get the address of PGD */ + mfcr r30, cr6 + srli r30, r30, 22 /* PGDIR_SHIFT = 22 */ + slli r30, r30, 2 + add r31, r31, r30 + lw r31, [r31, 0] /* get the address of the start address of PTE table */ + + mfcr r30, cr6 /* get Bad VPN */ + srli r30, r30, 10 + andi r30, 0xffc /* PTE VPN mask (bit 11~2) */ + + add r31, r31, r30 + lw r30, [r31, 0] /* load pte entry */ + mtcr r30, cr12 + nop + nop + nop + nop + nop + mtrtlb + nop + nop + nop + nop + nop + rte /* 6 cycles to make sure tlb entry works */ + +nopage_tlbl: + do_fault 0 /* Read */ + +ENTRY(handle_tlb_refill) + load_pte r30, r31 + pte_writable r30, r31, handle_tlb_refill_nopage + pte_makewrite r30, r31 /* Access|Modify|Dirty|Valid */ + pte_reload r31 + mtrtlb + nop + nop + nop + nop + nop + rte +handle_tlb_refill_nopage: + do_fault 0 /* Read */ + +ENTRY(handle_tlb_invaild) + load_pte r30, r31 + stlb /* find faulting entry */ + pte_writable r30, r31, handle_tlb_invaild_nopage + pte_makewrite r30, r31 /* Access|Modify|Dirty|Valid */ + pte_reload r31 + mtptlb + nop + nop + nop + nop + nop + rte +handle_tlb_invaild_nopage: + do_fault 0 /* Read */ + +ENTRY(handle_mod) + load_pte r30, r31 + stlb /* find faulting entry */ + andi r30, _PAGE_WRITE /* Writable? */ + cmpz.c r30 + beq nowrite_mod + lw r30, [r31, 0] /* reload into r30 */ + + /* Present and writable bits set, set accessed and dirty bits. */ + pte_makewrite r30, r31 + + /* Now reload the entry into the tlb. */ + pte_reload r31 + mtptlb + nop + nop + nop + nop + nop + rte + +nowrite_mod: + do_fault 1 /* Write */ diff --git a/arch/score/mm/tlb-score.c b/arch/score/mm/tlb-score.c new file mode 100644 index 0000000..4fa5aa5 --- /dev/null +++ b/arch/score/mm/tlb-score.c @@ -0,0 +1,251 @@ +/* + * arch/score/mm/tlb-score.c + * + * Score Processor version. + * + * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. + * Lennox Wu + * Chen Liqin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include + +#include +#include +#include + +#define TLBSIZE 32 + +unsigned long asid_cache = ASID_FIRST_VERSION; +EXPORT_SYMBOL(asid_cache); + +void local_flush_tlb_all(void) +{ + unsigned long flags; + unsigned long old_ASID; + int entry; + + local_irq_save(flags); + old_ASID = pevn_get() & ASID_MASK; + pectx_set(0); /* invalid */ + entry = tlblock_get(); /* skip locked entries*/ + + for (; entry < TLBSIZE; entry++) { + tlbpt_set(entry); + pevn_set(KSEG1); + barrier(); + tlb_write_indexed(); + } + pevn_set(old_ASID); + local_irq_restore(flags); +} + +/* + * If mm is currently active_mm, we can't really drop it. Instead, + * we will get a new one for it. + */ +static inline void +drop_mmu_context(struct mm_struct *mm) +{ + unsigned long flags; + + local_irq_save(flags); + get_new_mmu_context(mm); + pevn_set(mm->context & ASID_MASK); + local_irq_restore(flags); +} + +void local_flush_tlb_mm(struct mm_struct *mm) +{ + if (mm->context != 0) + drop_mmu_context(mm); +} + +void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end) +{ + struct mm_struct *mm = vma->vm_mm; + unsigned long vma_mm_context = mm->context; + if (mm->context != 0) { + unsigned long flags; + int size; + + local_irq_save(flags); + size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; + if (size <= TLBSIZE) { + int oldpid = pevn_get() & ASID_MASK; + int newpid = vma_mm_context & ASID_MASK; + + start &= PAGE_MASK; + end += (PAGE_SIZE - 1); + end &= PAGE_MASK; + while (start < end) { + int idx; + + pevn_set(start | newpid); + start += PAGE_SIZE; + barrier(); + tlb_probe(); + idx = tlbpt_get(); + pectx_set(0); + pevn_set(KSEG1); + if (idx < 0) + continue; + tlb_write_indexed(); + } + pevn_set(oldpid); + } else { + /* Bigger than TLBSIZE, get new ASID directly */ + get_new_mmu_context(mm); + if (mm == current->active_mm) + pevn_set(vma_mm_context & ASID_MASK); + } + local_irq_restore(flags); + } +} + +void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) +{ + unsigned long flags; + int size; + + local_irq_save(flags); + size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; + if (size <= TLBSIZE) { + int pid = pevn_get(); + + start &= PAGE_MASK; + end += PAGE_SIZE - 1; + end &= PAGE_MASK; + + while (start < end) { + long idx; + + pevn_set(start); + start += PAGE_SIZE; + tlb_probe(); + idx = tlbpt_get(); + if (idx < 0) + continue; + pectx_set(0); + pevn_set(KSEG1); + barrier(); + tlb_write_indexed(); + } + pevn_set(pid); + } else { + local_flush_tlb_all(); + } + + local_irq_restore(flags); +} + +void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) +{ + if (!vma || vma->vm_mm->context != 0) { + unsigned long flags; + int oldpid, newpid, idx; + unsigned long vma_ASID = vma->vm_mm->context; + + newpid = vma_ASID & ASID_MASK; + page &= PAGE_MASK; + local_irq_save(flags); + oldpid = pevn_get() & ASID_MASK; + pevn_set(page | newpid); + barrier(); + tlb_probe(); + idx = tlbpt_get(); + pectx_set(0); + pevn_set(KSEG1); + if (idx < 0) /* p_bit(31) - 1: miss, 0: hit*/ + goto finish; + barrier(); + tlb_write_indexed(); +finish: + pevn_set(oldpid); + local_irq_restore(flags); + } +} + +/* + * This one is only used for pages with the global bit set so we don't care + * much about the ASID. + */ +void local_flush_tlb_one(unsigned long page) +{ + unsigned long flags; + int oldpid, idx; + + local_irq_save(flags); + oldpid = pevn_get(); + page &= (PAGE_MASK << 1); + pevn_set(page); + barrier(); + tlb_probe(); + idx = tlbpt_get(); + pectx_set(0); + if (idx >= 0) { + /* Make sure all entries differ. */ + pevn_set(KSEG1); + barrier(); + tlb_write_indexed(); + } + pevn_set(oldpid); + local_irq_restore(flags); +} + +void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) +{ + unsigned long flags; + int idx, pid; + + /* + * Handle debugger faulting in for debugee. + */ + if (current->active_mm != vma->vm_mm) + return; + + pid = pevn_get() & ASID_MASK; + + local_irq_save(flags); + address &= PAGE_MASK; + pevn_set(address | pid); + barrier(); + tlb_probe(); + idx = tlbpt_get(); + pectx_set(pte_val(pte)); + pevn_set(address | pid); + if (idx < 0) + tlb_write_random(); + else + tlb_write_indexed(); + + pevn_set(pid); + local_irq_restore(flags); +} + +void __cpuinit tlb_init(void) +{ + tlblock_set(0); + local_flush_tlb_all(); + memcpy((void *)(EXCEPTION_VECTOR_BASE_ADDR + 0x100), + &score7_FTLB_refill_Handler, 0xFC); + flush_icache_range(EXCEPTION_VECTOR_BASE_ADDR + 0x100, + EXCEPTION_VECTOR_BASE_ADDR + 0x1FC); +} -- cgit v1.1 From 0402c91af944c61bf788370f03326959a35cb8be Mon Sep 17 00:00:00 2001 From: Chen Liqin Date: Fri, 19 Jun 2009 13:53:49 +0800 Subject: score: update files according to review comments modified: arch/score/include/asm/cacheflush.h modified: arch/score/include/asm/delay.h modified: arch/score/include/asm/errno.h modified: arch/score/include/asm/pgtable-bits.h modified: arch/score/include/asm/pgtable.h modified: arch/score/include/asm/ptrace.h modified: arch/score/include/asm/unistd.h modified: arch/score/kernel/entry.S modified: arch/score/kernel/process.c modified: arch/score/kernel/ptrace.c modified: arch/score/kernel/signal.c modified: arch/score/kernel/sys_score.c modified: arch/score/kernel/traps.c modified: arch/score/mm/cache.c Signed-off-by: Chen Liqin Signed-off-by: Arnd Bergmann --- arch/score/include/asm/cacheflush.h | 18 ++--- arch/score/include/asm/delay.h | 11 ++- arch/score/include/asm/errno.h | 1 - arch/score/include/asm/pgtable-bits.h | 2 + arch/score/include/asm/pgtable.h | 59 ++++++++------ arch/score/include/asm/ptrace.h | 18 +++-- arch/score/include/asm/unistd.h | 3 +- arch/score/kernel/entry.S | 6 +- arch/score/kernel/process.c | 2 +- arch/score/kernel/ptrace.c | 144 +++++++++------------------------- arch/score/kernel/signal.c | 6 +- arch/score/kernel/sys_score.c | 7 +- arch/score/kernel/traps.c | 2 +- arch/score/mm/cache.c | 125 +++++++++-------------------- 14 files changed, 154 insertions(+), 250 deletions(-) (limited to 'arch') diff --git a/arch/score/include/asm/cacheflush.h b/arch/score/include/asm/cacheflush.h index 1c74628..07cc8fc 100644 --- a/arch/score/include/asm/cacheflush.h +++ b/arch/score/include/asm/cacheflush.h @@ -4,18 +4,16 @@ /* Keep includes the same across arches. */ #include -extern void (*flush_cache_all)(void); -extern void (*flush_cache_mm)(struct mm_struct *mm); -extern void (*flush_cache_range)(struct vm_area_struct *vma, +extern void flush_cache_all(void); +extern void flush_cache_mm(struct mm_struct *mm); +extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); -extern void (*flush_cache_page)(struct vm_area_struct *vma, +extern void flush_cache_page(struct vm_area_struct *vma, unsigned long page, unsigned long pfn); -extern void (*flush_cache_sigtramp)(unsigned long addr); -extern void (*flush_icache_all)(void); -extern void (*flush_icache_range)(unsigned long start, unsigned long end); -extern void (*flush_data_cache_page)(unsigned long addr); - -extern void s7_flush_cache_all(void); +extern void flush_cache_sigtramp(unsigned long addr); +extern void flush_icache_all(void); +extern void flush_icache_range(unsigned long start, unsigned long end); +extern void flush_dcache_range(unsigned long start, unsigned long end); #define flush_cache_dup_mm(mm) do {} while (0) #define flush_dcache_page(page) do {} while (0) diff --git a/arch/score/include/asm/delay.h b/arch/score/include/asm/delay.h index ad716f6..6726ec1 100644 --- a/arch/score/include/asm/delay.h +++ b/arch/score/include/asm/delay.h @@ -3,17 +3,22 @@ static inline void __delay(unsigned long loops) { + /* 3 cycles per loop. */ __asm__ __volatile__ ( - "1:\tsubi\t%0,1\n\t" + "1:\tsubi\t%0, 3\n\t" "cmpz.c\t%0\n\t" - "bne\t1b\n\t" + "ble\t1b\n\t" : "=r" (loops) : "0" (loops)); } static inline void __udelay(unsigned long usecs) { - __delay(usecs); + unsigned long loops_per_usec; + + loops_per_usec = (loops_per_jiffy * HZ) / 1000000; + + __delay(usecs * loops_per_usec); } #define udelay(usecs) __udelay(usecs) diff --git a/arch/score/include/asm/errno.h b/arch/score/include/asm/errno.h index 7cd3e1f..29ff39d 100644 --- a/arch/score/include/asm/errno.h +++ b/arch/score/include/asm/errno.h @@ -2,6 +2,5 @@ #define _ASM_SCORE_ERRNO_H #include -#define EMAXERRNO 1024 #endif /* _ASM_SCORE_ERRNO_H */ diff --git a/arch/score/include/asm/pgtable-bits.h b/arch/score/include/asm/pgtable-bits.h index ca16d35..7d65a96 100644 --- a/arch/score/include/asm/pgtable-bits.h +++ b/arch/score/include/asm/pgtable-bits.h @@ -17,6 +17,8 @@ #define _CACHE_MASK (1<<3) #define _PAGE_BUFFERABLE (1<<4) /*Fallow Spec. */ +#define __READABLE (_PAGE_READ | _PAGE_SILENT_READ | _PAGE_ACCESSED) +#define __WRITEABLE (_PAGE_WRITE | _PAGE_SILENT_WRITE | _PAGE_MODIFIED) #define _PAGE_CHG_MASK \ (PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_CACHE) diff --git a/arch/score/include/asm/pgtable.h b/arch/score/include/asm/pgtable.h index 0f7177a..5e913e5 100644 --- a/arch/score/include/asm/pgtable.h +++ b/arch/score/include/asm/pgtable.h @@ -106,24 +106,6 @@ static inline void pmd_clear(pmd_t *pmdp) ((swp_entry_t) { pte_val(pte)}) #define __swp_entry_to_pte(x) ((pte_t) {(x).val}) -#define __P000 __pgprot(0) -#define __P001 __pgprot(0) -#define __P010 __pgprot(0) -#define __P011 __pgprot(0) -#define __P100 __pgprot(0) -#define __P101 __pgprot(0) -#define __P110 __pgprot(0) -#define __P111 __pgprot(0) - -#define __S000 __pgprot(0) -#define __S001 __pgprot(0) -#define __S010 __pgprot(0) -#define __S011 __pgprot(0) -#define __S100 __pgprot(0) -#define __S101 __pgprot(0) -#define __S110 __pgprot(0) -#define __S111 __pgprot(0) - #define pmd_page(pmd) virt_to_page(__va(pmd_val(pmd))) #define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot) static inline pte_t pte_mkspecial(pte_t pte) { return pte; } @@ -136,10 +118,15 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; } #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ remap_pfn_range(vma, vaddr, pfn, size, prot) -#define pgd_present(pgd) (1) /* pages are always present on non MMU */ +/* + * The "pgd_xxx()" functions here are trivial for a folded two-level + * setup: the pgd is never bad, and a pmd always exists (as it's folded + * into the pgd entry) + */ +#define pgd_present(pgd) (1) #define pgd_none(pgd) (0) #define pgd_bad(pgd) (0) -#define pgd_clear(pgdp) +#define pgd_clear(pgdp) do { } while (0) #define kern_addr_valid(addr) (1) #define pmd_offset(a, b) ((void *) 0) @@ -150,11 +137,33 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; } #define pud_offset(pgd, address) ((pud_t *) pgd) -#define PAGE_NONE __pgprot(0) /* these mean nothing to non MMU */ -#define PAGE_SHARED __pgprot(0) /* these mean nothing to non MMU */ -#define PAGE_COPY __pgprot(0) /* these mean nothing to non MMU */ -#define PAGE_READONLY __pgprot(0) /* these mean nothing to non MMU */ -#define PAGE_KERNEL __pgprot(0) /* these mean nothing to non MMU */ +#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_CACHE) +#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ + _PAGE_CACHE) +#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_CACHE) +#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_CACHE) +#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \ + _PAGE_GLOBAL | _PAGE_CACHE) +#define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \ + __WRITEABLE | _PAGE_GLOBAL & ~_PAGE_CACHE) + +#define __P000 PAGE_NONE +#define __P001 PAGE_READONLY +#define __P010 PAGE_COPY +#define __P011 PAGE_COPY +#define __P100 PAGE_READONLY +#define __P101 PAGE_READONLY +#define __P110 PAGE_COPY +#define __P111 PAGE_COPY + +#define __S000 PAGE_NONE +#define __S001 PAGE_READONLY +#define __S010 PAGE_SHARED +#define __S011 PAGE_SHARED +#define __S100 PAGE_READONLY +#define __S101 PAGE_READONLY +#define __S110 PAGE_SHARED +#define __S111 PAGE_SHARED #define pgprot_noncached(x) (x) diff --git a/arch/score/include/asm/ptrace.h b/arch/score/include/asm/ptrace.h index 1a4900a..66b14c8 100644 --- a/arch/score/include/asm/ptrace.h +++ b/arch/score/include/asm/ptrace.h @@ -1,6 +1,9 @@ #ifndef _ASM_SCORE_PTRACE_H #define _ASM_SCORE_PTRACE_H +#define PTRACE_GETREGS 12 +#define PTRACE_SETREGS 13 + #define PC 32 #define CONDITION 33 #define ECR 34 @@ -76,12 +79,17 @@ struct pt_regs { */ #define user_mode(regs) ((regs->cp0_psr & 8) == 8) -#define instruction_pointer(regs) (0) -#define profile_pc(regs) instruction_pointer(regs) +#define instruction_pointer(regs) ((unsigned long)(regs)->cp0_epc) +#define profile_pc(regs) instruction_pointer(regs) -extern asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit); +extern void do_syscall_trace(struct pt_regs *regs, int entryexit); extern int read_tsk_long(struct task_struct *, unsigned long, unsigned long *); -extern void clear_single_step(struct task_struct *); -#endif +extern int read_tsk_short(struct task_struct *, unsigned long, + unsigned short *); + +#define arch_has_single_step() (1) +extern void user_enable_single_step(struct task_struct *); +extern void user_disable_single_step(struct task_struct *); +#endif /* __KERNEL__ */ #endif /* _ASM_SCORE_PTRACE_H */ diff --git a/arch/score/include/asm/unistd.h b/arch/score/include/asm/unistd.h index 9aa3a15..f0f84de 100644 --- a/arch/score/include/asm/unistd.h +++ b/arch/score/include/asm/unistd.h @@ -1,7 +1,8 @@ -#ifndef _ASM_SCORE_UNISTD_H +#if !defined(_ASM_SCORE_UNISTD_H) || defined(__SYSCALL) #define _ASM_SCORE_UNISTD_H #define __ARCH_HAVE_MMU +#define __ARCH_WANT_IPC_PARSE_VERSION #include diff --git a/arch/score/kernel/entry.S b/arch/score/kernel/entry.S index 6c6b7ea..0af89b2 100644 --- a/arch/score/kernel/entry.S +++ b/arch/score/kernel/entry.S @@ -23,7 +23,7 @@ * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ -#include +#include #include #include @@ -434,7 +434,7 @@ stack_done: sw r8, [r0, PT_R7] b 2f 1: - cmpi.c r4, -EMAXERRNO-1 # -EMAXERRNO - 1=-1134 + cmpi.c r4, -MAX_ERRNO - 1 ble 2f ldi r8, 0x1; sw r8, [r0, PT_R7] @@ -466,7 +466,7 @@ syscall_trace_entry: lw r7, [r0, PT_R7] brl r8 - li r8, -EMAXERRNO - 1 # error? + li r8, -MAX_ERRNO - 1 sw r8, [r0, PT_R7] # set error flag neg r4, r4 # error diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c index aaa3085..d93966f 100644 --- a/arch/score/kernel/process.c +++ b/arch/score/kernel/process.c @@ -56,7 +56,7 @@ void __noreturn cpu_idle(void) } } -asmlinkage void ret_from_fork(void); +void ret_from_fork(void); void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp) { diff --git a/arch/score/kernel/ptrace.c b/arch/score/kernel/ptrace.c index 8fe7209..19911e3 100644 --- a/arch/score/kernel/ptrace.c +++ b/arch/score/kernel/ptrace.c @@ -80,7 +80,35 @@ write_tsk_long(struct task_struct *child, return copied != sizeof(val) ? -EIO : 0; } -void set_single_step(struct task_struct *child) +/* + * Get all user integer registers. + */ +static int ptrace_getregs(struct task_struct *tsk, void __user *uregs) +{ + struct pt_regs *regs = task_pt_regs(tsk); + + return copy_to_user(uregs, regs, sizeof(struct pt_regs)) ? -EFAULT : 0; +} + +/* + * Set all user integer registers. + */ +static int ptrace_setregs(struct task_struct *tsk, void __user *uregs) +{ + struct pt_regs newregs; + int ret; + + ret = -EFAULT; + if (copy_from_user(&newregs, uregs, sizeof(struct pt_regs)) == 0) { + struct pt_regs *regs = task_pt_regs(tsk); + *regs = newregs; + ret = 0; + } + + return ret; +} + +void user_enable_single_step(struct task_struct *child) { /* far_epc is the target of branch */ unsigned int epc, far_epc = 0; @@ -201,7 +229,7 @@ void set_single_step(struct task_struct *child) } } -void clear_single_step(struct task_struct *child) +void user_disable_single_step(struct task_struct *child) { if (child->thread.insn1_type == 0) write_tsk_short(child, child->thread.addr1, @@ -230,54 +258,17 @@ void clear_single_step(struct task_struct *child) child->thread.ss_nextcnt = 0; } - -void ptrace_disable(struct task_struct *child) {} +void ptrace_disable(struct task_struct *child) +{ + user_disable_single_step(child); +} long arch_ptrace(struct task_struct *child, long request, long addr, long data) { int ret; - if (request == PTRACE_TRACEME) { - /* are we already being traced? */ - if (current->ptrace & PT_PTRACED) - return -EPERM; - - /* set the ptrace bit in the process flags. */ - current->ptrace |= PT_PTRACED; - return 0; - } - - ret = -ESRCH; - if (!child) - return ret; - - ret = -EPERM; - - if (request == PTRACE_ATTACH) { - ret = ptrace_attach(child); - return ret; - } - - ret = ptrace_check_attach(child, request == PTRACE_KILL); - if (ret < 0) - return ret; - switch (request) { - case PTRACE_PEEKTEXT: /* read word at location addr. */ - case PTRACE_PEEKDATA: { - unsigned long tmp; - int copied; - - copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0); - ret = -EIO; - if (copied != sizeof(tmp)) - break; - - ret = put_user(tmp, (unsigned long *) data); - return ret; - } - /* Read the word at location addr in the USER area. */ case PTRACE_PEEKUSR: { struct pt_regs *regs; @@ -329,15 +320,6 @@ arch_ptrace(struct task_struct *child, long request, long addr, long data) return ret; } - case PTRACE_POKETEXT: /* write the word at location addr. */ - case PTRACE_POKEDATA: - ret = 0; - if (access_process_vm(child, addr, &data, sizeof(data), 1) - == sizeof(data)) - break; - ret = -EIO; - return ret; - case PTRACE_POKEUSR: { struct pt_regs *regs; ret = 0; @@ -372,64 +354,16 @@ arch_ptrace(struct task_struct *child, long request, long addr, long data) break; } - case PTRACE_SYSCALL: /* continue and stop at next - (return from) syscall. */ - case PTRACE_CONT: { /* restart after signal. */ - ret = -EIO; - if (!valid_signal(data)) - break; - if (request == PTRACE_SYSCALL) - set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); - else - clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); - - child->exit_code = data; - wake_up_process(child); - ret = 0; - break; - } - - /* - * make the child exit. Best I can do is send it a sigkill. - * perhaps it should be put in the status that it wants to - * exit. - */ - case PTRACE_KILL: - ret = 0; - if (child->state == EXIT_ZOMBIE) /* already dead. */ - break; - child->exit_code = SIGKILL; - clear_single_step(child); - wake_up_process(child); + case PTRACE_GETREGS: + ret = ptrace_getregs(child, (void __user *)data); break; - case PTRACE_SINGLESTEP: { /* set the trap flag. */ - ret = -EIO; - if ((unsigned long) data > _NSIG) - break; - clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); - set_single_step(child); - child->exit_code = data; - /* give it a chance to run. */ - wake_up_process(child); - ret = 0; - break; - } - - case PTRACE_DETACH: /* detach a process that was attached. */ - ret = ptrace_detach(child, data); - break; - - case PTRACE_SETOPTIONS: - if (data & PTRACE_O_TRACESYSGOOD) - child->ptrace |= PT_TRACESYSGOOD; - else - child->ptrace &= ~PT_TRACESYSGOOD; - ret = 0; + case PTRACE_SETREGS: + ret = ptrace_setregs(child, (void __user *)data); break; default: - ret = -EIO; + ret = ptrace_request(child, request, addr, data); break; } diff --git a/arch/score/kernel/signal.c b/arch/score/kernel/signal.c index b4ed1b3..5c00408 100644 --- a/arch/score/kernel/signal.c +++ b/arch/score/kernel/signal.c @@ -132,7 +132,7 @@ void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, return (void *)((sp - frame_size) & ~7); } -asmlinkage int score_sigaltstack(struct pt_regs *regs) +int score_sigaltstack(struct pt_regs *regs) { const stack_t *uss = (const stack_t *) regs->regs[4]; stack_t *uoss = (stack_t *) regs->regs[5]; @@ -141,7 +141,7 @@ asmlinkage int score_sigaltstack(struct pt_regs *regs) return do_sigaltstack(uss, uoss, usp); } -asmlinkage void score_rt_sigreturn(struct pt_regs *regs) +void score_rt_sigreturn(struct pt_regs *regs) { struct rt_sigframe __user *frame; sigset_t set; @@ -276,7 +276,7 @@ int handle_signal(unsigned long sig, siginfo_t *info, return ret; } -asmlinkage void do_signal(struct pt_regs *regs) +void do_signal(struct pt_regs *regs) { struct k_sigaction ka; sigset_t *oldset; diff --git a/arch/score/kernel/sys_score.c b/arch/score/kernel/sys_score.c index 6a60d1e..68655f4 100644 --- a/arch/score/kernel/sys_score.c +++ b/arch/score/kernel/sys_score.c @@ -64,8 +64,7 @@ sys_mmap2(unsigned long addr, unsigned long len, unsigned long prot, * Clone a task - this clones the calling program thread. * This is called indirectly via a small wrapper */ -asmlinkage int -score_clone(struct pt_regs *regs) +int score_clone(struct pt_regs *regs) { unsigned long clone_flags; unsigned long newsp; @@ -93,7 +92,7 @@ score_clone(struct pt_regs *regs) * sys_execve() executes a new program. * This is called indirectly via a small wrapper */ -asmlinkage int score_execve(struct pt_regs *regs) +int score_execve(struct pt_regs *regs) { int error; char *filename; @@ -114,7 +113,7 @@ asmlinkage int score_execve(struct pt_regs *regs) * If we ever come here the user sp is bad. Zap the process right away. * Due to the bad stack signaling wouldn't work. */ -asmlinkage void bad_stack(void) +void bad_stack(void) { do_exit(SIGSEGV); } diff --git a/arch/score/kernel/traps.c b/arch/score/kernel/traps.c index 957ae9e..0e46fb1 100644 --- a/arch/score/kernel/traps.c +++ b/arch/score/kernel/traps.c @@ -252,7 +252,7 @@ asmlinkage void do_ri(struct pt_regs *regs) if (current->thread.single_step == 1) { if ((epc == current->thread.addr1) || (epc == current->thread.addr2)) { - clear_single_step(current); + user_disable_single_step(current); force_sig(SIGTRAP, current); return; } else diff --git a/arch/score/mm/cache.c b/arch/score/mm/cache.c index 1ebc67f..dbac9d9 100644 --- a/arch/score/mm/cache.c +++ b/arch/score/mm/cache.c @@ -32,34 +32,26 @@ #include -/* Cache operations. */ -void (*flush_cache_all)(void); -void (*__flush_cache_all)(void); -void (*flush_cache_mm)(struct mm_struct *mm); -void (*flush_cache_range)(struct vm_area_struct *vma, - unsigned long start, unsigned long end); -void (*flush_cache_page)(struct vm_area_struct *vma, - unsigned long page, unsigned long pfn); -void (*flush_icache_range)(unsigned long start, unsigned long end); -void (*__flush_cache_vmap)(void); -void (*__flush_cache_vunmap)(void); -void (*flush_cache_sigtramp)(unsigned long addr); -void (*flush_data_cache_page)(unsigned long addr); -EXPORT_SYMBOL(flush_data_cache_page); -void (*flush_icache_all)(void); - -/*Score 7 cache operations*/ -static inline void s7___flush_cache_all(void); -static void s7_flush_cache_mm(struct mm_struct *mm); -static void s7_flush_cache_range(struct vm_area_struct *vma, - unsigned long start, unsigned long end); -static void s7_flush_cache_page(struct vm_area_struct *vma, - unsigned long page, unsigned long pfn); -static void s7_flush_icache_range(unsigned long start, unsigned long end); -static void s7_flush_cache_sigtramp(unsigned long addr); -static void s7_flush_data_cache_page(unsigned long addr); -static void s7_flush_dcache_range(unsigned long start, unsigned long end); +/* +Just flush entire Dcache!! +You must ensure the page doesn't include instructions, because +the function will not flush the Icache. +The addr must be cache aligned. +*/ +static void flush_data_cache_page(unsigned long addr) +{ + unsigned int i; + for (i = 0; i < (PAGE_SIZE / L1_CACHE_BYTES); i += L1_CACHE_BYTES) { + __asm__ __volatile__( + "cache 0x0e, [%0, 0]\n" + "cache 0x1a, [%0, 0]\n" + "nop\n" + : : "r" (addr)); + addr += L1_CACHE_BYTES; + } +} +/* called by update_mmu_cache. */ void __update_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) { @@ -74,7 +66,7 @@ void __update_cache(struct vm_area_struct *vma, unsigned long address, if (page_mapping(page) && test_bit(PG_arch_1, &page->flags)) { addr = (unsigned long) page_address(page); if (exec) - s7_flush_data_cache_page(addr); + flush_data_cache_page(addr); clear_bit(PG_arch_1, &page->flags); } } @@ -101,44 +93,22 @@ static inline void setup_protection_map(void) void __devinit cpu_cache_init(void) { - flush_cache_all = s7_flush_cache_all; - __flush_cache_all = s7___flush_cache_all; - flush_cache_mm = s7_flush_cache_mm; - flush_cache_range = s7_flush_cache_range; - flush_cache_page = s7_flush_cache_page; - flush_icache_range = s7_flush_icache_range; - flush_cache_sigtramp = s7_flush_cache_sigtramp; - flush_data_cache_page = s7_flush_data_cache_page; - setup_protection_map(); } -void s7_flush_icache_all(void) +void flush_icache_all(void) { __asm__ __volatile__( - "la r8, s7_flush_icache_all\n" + "la r8, flush_icache_all\n" "cache 0x10, [r8, 0]\n" "nop\nnop\nnop\nnop\nnop\nnop\n" : : : "r8"); } -void s7_flush_dcache_all(void) -{ - __asm__ __volatile__( - "la r8, s7_flush_dcache_all\n" - "cache 0x1f, [r8, 0]\n" - "nop\nnop\nnop\nnop\nnop\nnop\n" - "cache 0x1a, [r8, 0]\n" - "nop\nnop\nnop\nnop\nnop\nnop\n" - : : : "r8"); -} - -void s7_flush_cache_all(void) +void flush_dcache_all(void) { __asm__ __volatile__( - "la r8, s7_flush_cache_all\n" - "cache 0x10, [r8, 0]\n" - "nop\nnop\nnop\nnop\nnop\nnop\n" + "la r8, flush_dcache_all\n" "cache 0x1f, [r8, 0]\n" "nop\nnop\nnop\nnop\nnop\nnop\n" "cache 0x1a, [r8, 0]\n" @@ -146,10 +116,10 @@ void s7_flush_cache_all(void) : : : "r8"); } -void s7___flush_cache_all(void) +void flush_cache_all(void) { __asm__ __volatile__( - "la r8, s7_flush_cache_all\n" + "la r8, flush_cache_all\n" "cache 0x10, [r8, 0]\n" "nop\nnop\nnop\nnop\nnop\nnop\n" "cache 0x1f, [r8, 0]\n" @@ -159,11 +129,11 @@ void s7___flush_cache_all(void) : : : "r8"); } -static void s7_flush_cache_mm(struct mm_struct *mm) +void flush_cache_mm(struct mm_struct *mm) { if (!(mm->context)) return; - s7_flush_cache_all(); + flush_cache_all(); } /*if we flush a range precisely , the processing may be very long. @@ -176,8 +146,7 @@ The interface is provided in hopes that the port can find a suitably efficient method for removing multiple page sized regions from the cache. */ -static void -s7_flush_cache_range(struct vm_area_struct *vma, +void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { struct mm_struct *mm = vma->vm_mm; @@ -209,27 +178,26 @@ s7_flush_cache_range(struct vm_area_struct *vma, tmpend = (start | (PAGE_SIZE-1)) > end ? end : (start | (PAGE_SIZE-1)); - s7_flush_dcache_range(start, tmpend); + flush_dcache_range(start, tmpend); if (exec) - s7_flush_icache_range(start, tmpend); + flush_icache_range(start, tmpend); start = (start + PAGE_SIZE) & ~(PAGE_SIZE - 1); } } -static void -s7_flush_cache_page(struct vm_area_struct *vma, +void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) { int exec = vma->vm_flags & VM_EXEC; unsigned long kaddr = 0xa0000000 | (pfn << PAGE_SHIFT); - s7_flush_dcache_range(kaddr, kaddr + PAGE_SIZE); + flush_dcache_range(kaddr, kaddr + PAGE_SIZE); if (exec) - s7_flush_icache_range(kaddr, kaddr + PAGE_SIZE); + flush_icache_range(kaddr, kaddr + PAGE_SIZE); } -static void s7_flush_cache_sigtramp(unsigned long addr) +void flush_cache_sigtramp(unsigned long addr) { __asm__ __volatile__( "cache 0x02, [%0, 0]\n" @@ -248,30 +216,11 @@ static void s7_flush_cache_sigtramp(unsigned long addr) } /* -Just flush entire Dcache!! -You must ensure the page doesn't include instructions, because -the function will not flush the Icache. -The addr must be cache aligned. -*/ -static void s7_flush_data_cache_page(unsigned long addr) -{ - unsigned int i; - for (i = 0; i < (PAGE_SIZE / L1_CACHE_BYTES); i += L1_CACHE_BYTES) { - __asm__ __volatile__( - "cache 0x0e, [%0, 0]\n" - "cache 0x1a, [%0, 0]\n" - "nop\n" - : : "r" (addr)); - addr += L1_CACHE_BYTES; - } -} - -/* 1. WB and invalid a cache line of Dcache 2. Drain Write Buffer the range must be smaller than PAGE_SIZE */ -static void s7_flush_dcache_range(unsigned long start, unsigned long end) +void flush_dcache_range(unsigned long start, unsigned long end) { int size, i; @@ -290,7 +239,7 @@ static void s7_flush_dcache_range(unsigned long start, unsigned long end) } } -static void s7_flush_icache_range(unsigned long start, unsigned long end) +void flush_icache_range(unsigned long start, unsigned long end) { int size, i; start = start & ~(L1_CACHE_BYTES - 1); -- cgit v1.1 From 72ea3723411c18cace4c8c9e0ccf4116d5e6eaaa Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 19 Jun 2009 11:11:55 +0200 Subject: score: unset __ARCH_WANT_IPC_PARSE_VERSION This really should not be needed. The change for not changing the IPC code for every new architecture just went into 2.6.31, so we can skip it now. Signed-off-by: Arnd Bergmann --- arch/score/include/asm/unistd.h | 1 - 1 file changed, 1 deletion(-) (limited to 'arch') diff --git a/arch/score/include/asm/unistd.h b/arch/score/include/asm/unistd.h index f0f84de..0d98b72 100644 --- a/arch/score/include/asm/unistd.h +++ b/arch/score/include/asm/unistd.h @@ -2,7 +2,6 @@ #define _ASM_SCORE_UNISTD_H #define __ARCH_HAVE_MMU -#define __ARCH_WANT_IPC_PARSE_VERSION #include -- cgit v1.1 From 78229db4c4f78f27ecf772fe7489a70530ba9862 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 19 Jun 2009 11:22:30 +0200 Subject: score: remove __{put,get}_user_unknown The point of these extern declarations is to provoke a link error, so an architecture must not provide a symbol for them. Signed-off-by: Arnd Bergmann --- arch/score/lib/string.S | 12 ------------ 1 file changed, 12 deletions(-) (limited to 'arch') diff --git a/arch/score/lib/string.S b/arch/score/lib/string.S index 943d091..00b7d3a 100644 --- a/arch/score/lib/string.S +++ b/arch/score/lib/string.S @@ -182,15 +182,3 @@ ENTRY(__clear_user) 99: .word 0b, 99b .previous - - .align 2 -ENTRY(__put_user_unknown) - .set volatile - ldi r4, -EFAULT - br r3 - - .align 2 -ENTRY(__get_user_unknown) - ldi r5, 0 - ldi r4, -EFAULT - br r3 -- cgit v1.1 From f673c032ed13ed8f3fda5922c2190da2892398bc Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 19 Jun 2009 11:31:54 +0200 Subject: score: add generic sys_call_table This adds back a sys_call_table to the score architecture, which got lost in the conversion to the generic unistd.h file. It's rather worrying that the code got submitted without a system call table, which evidently means that it got zero testing. Since the system call table has a different layout from the old one (which was modeled after the mips-o32 one), I also try to fix the entry.S path to use it. In the modified calling conventions, all system call arguments are passed as registers r4 through r9, instead of r4 through r7 plus stack for the fifth and sixth argument. This matches what other architectures to when they normally pass arguments on the stack. Signed-off-by: Arnd Bergmann --- arch/score/kernel/Makefile | 3 ++- arch/score/kernel/entry.S | 47 +++----------------------------------- arch/score/kernel/sys_call_table.c | 12 ++++++++++ arch/score/kernel/sys_score.c | 9 +------- 4 files changed, 18 insertions(+), 53 deletions(-) create mode 100644 arch/score/kernel/sys_call_table.c (limited to 'arch') diff --git a/arch/score/kernel/Makefile b/arch/score/kernel/Makefile index 1e5de89..f218673 100644 --- a/arch/score/kernel/Makefile +++ b/arch/score/kernel/Makefile @@ -5,6 +5,7 @@ extra-y := head.o vmlinux.lds obj-y += entry.o init_task.o irq.o process.o ptrace.o \ - setup.o signal.o sys_score.o time.o traps.o + setup.o signal.o sys_score.o time.o traps.o \ + sys_call_table.o obj-$(CONFIG_MODULES) += module.o diff --git a/arch/score/kernel/entry.S b/arch/score/kernel/entry.S index 0af89b2..2f16917 100644 --- a/arch/score/kernel/entry.S +++ b/arch/score/kernel/entry.S @@ -400,6 +400,8 @@ ENTRY(handle_sys) sw r4, [r0, PT_ORIG_R4] #for restart syscall sw r7, [r0, PT_ORIG_R7] #for restart syscall sw r27, [r0, PT_IS_SYSCALL] # it from syscall + sw r8, [r0, 16] # argument 5 from user r8 + sw r9, [r0, 20] # argument 6 from user r9 lw r9, [r0, PT_EPC] # skip syscall on return addi r9, 4 @@ -408,19 +410,14 @@ ENTRY(handle_sys) cmpi.c r27, __NR_syscalls # check syscall number bgtu illegal_syscall - slli r8, r27, 3 # get syscall routine + slli r8, r27, 2 # get syscall routine la r11, sys_call_table add r11, r11, r8 lw r10, [r11] # get syscall entry - lw r11, [r11, 4] # get number of args cmpz.c r10 beq illegal_syscall - cmpi.c r11, 4 # more than 4 arguments? - bgtu stackargs - -stack_done: lw r8, [r28, TI_FLAGS] li r9, _TIF_SYSCALL_TRACE and.c r8, r8, r9 @@ -475,44 +472,6 @@ syscall_trace_entry: 1: sw r4, [r0, PT_R2] # result j syscall_exit -stackargs: - lw r8, [r0, PT_R0] - andri.c r9, r8, 3 # test whether user sp is align a word - bne bad_stack - subi r11, 5 - slli r9, r11, 2 - add.c r9, r9, r8 - - bmi bad_stack - la r9, 3f # calculate branch address - slli r11, r11, 3 - sub r9, r9, r11 - br r9 - -2: lw r9, [r8, 20] # argument 6 from usp - sw r9, [r0, 20] - -3: lw r9, [r8, 16] # argument 5 from usp - sw r9, [r0, 16] - j stack_done - - .section __ex_table,"a" - .word 2b, bad_stack - .word 3b, bad_stack - .previous - - /* - * The stackpointer for a call with more than 4 arguments is bad. - * We probably should handle this case a bit more drastic. - */ -bad_stack: - neg r27, r27 # error - sw r27, [r0, PT_ORIG_R4] - sw r27, [r0, PT_R4] - ldi r8, 1 # set error flag - sw r8, [r0, PT_R7] - j syscall_return - illegal_syscall: ldi r4, -ENOSYS # error sw r4, [r0, PT_ORIG_R4] diff --git a/arch/score/kernel/sys_call_table.c b/arch/score/kernel/sys_call_table.c new file mode 100644 index 0000000..287369b --- /dev/null +++ b/arch/score/kernel/sys_call_table.c @@ -0,0 +1,12 @@ +#include +#include +#include + +#include + +#undef __SYSCALL +#define __SYSCALL(nr, call) [nr] = (call), + +void *sys_call_table[__NR_syscalls] = { +#include +}; diff --git a/arch/score/kernel/sys_score.c b/arch/score/kernel/sys_score.c index 68655f4..3318861 100644 --- a/arch/score/kernel/sys_score.c +++ b/arch/score/kernel/sys_score.c @@ -75,14 +75,7 @@ int score_clone(struct pt_regs *regs) if (!newsp) newsp = regs->regs[0]; parent_tidptr = (int __user *)regs->regs[6]; - - child_tidptr = NULL; - if (clone_flags & (CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)) { - int __user *__user *usp = (int __user *__user *)regs->regs[0]; - - if (get_user(child_tidptr, &usp[4])) - return -EFAULT; - } + child_tidptr = (int __user *)regs->regs[8]; return do_fork(clone_flags, newsp, regs, 0, parent_tidptr, child_tidptr); -- cgit v1.1 From b5022df4c275607f0824526eceb3c217e85279f3 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 19 Jun 2009 11:45:18 +0200 Subject: score: remove init_mm init_mm is now part of the common code and not provided by the architecture any more. Signed-off-by: Arnd Bergmann --- arch/score/kernel/init_task.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch') diff --git a/arch/score/kernel/init_task.c b/arch/score/kernel/init_task.c index 9eecde0..54d9552 100644 --- a/arch/score/kernel/init_task.c +++ b/arch/score/kernel/init_task.c @@ -26,8 +26,6 @@ static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); -struct mm_struct init_mm = INIT_MM(init_mm); -EXPORT_SYMBOL(init_mm); /* * Initial thread structure. -- cgit v1.1 From 2f476ef61f0e00fe3fcb96693b6a624a6c52fad9 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 19 Jun 2009 13:40:41 +0200 Subject: score: add prototypes for wrapped syscalls Every system call should be declared, so this adds missing declarations for the ones we were missing so far. Signed-off-by: Arnd Bergmann --- arch/score/include/asm/syscalls.h | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/score/include/asm/syscalls.h b/arch/score/include/asm/syscalls.h index 00c28e0..1dd5e0d 100644 --- a/arch/score/include/asm/syscalls.h +++ b/arch/score/include/asm/syscalls.h @@ -1,8 +1,10 @@ #ifndef _ASM_SCORE_SYSCALLS_H #define _ASM_SCORE_SYSCALLS_H -asmlinkage long sys_clone(int flags, unsigned long stack, struct pt_regs *regs); -#define sys_clone sys_clone +asmlinkage long score_clone(struct pt_regs *regs); +asmlinkage long score_execve(struct pt_regs *regs); +asmlinkage long score_sigaltstack(struct pt_regs *regs); +asmlinkage long score_rt_sigreturn(struct pt_regs *regs); #include -- cgit v1.1 From e487683990972bf9aa4e688434c46ead76748bca Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Sat, 20 Jun 2009 23:27:16 -0700 Subject: x86, mce: fix typo in comment in asm/mce.h Fix comment to match the actual declaration. Signed-off-by: Borislav Petkov Cc: Andi Kleen Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/mce.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index 5cdd8d1..b50b9e9 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h @@ -9,7 +9,7 @@ */ #define MCG_BANKCNT_MASK 0xff /* Number of Banks */ -#define MCG_CTL_P (1ULL<<8) /* MCG_CAP register available */ +#define MCG_CTL_P (1ULL<<8) /* MCG_CTL register available */ #define MCG_EXT_P (1ULL<<9) /* Extended registers available */ #define MCG_CMCI_P (1ULL<<10) /* CMCI supported */ #define MCG_EXT_CNT_MASK 0xff0000 /* Number of Extended registers */ -- cgit v1.1 From a95436e44a76a32dcbe7c8df59701ddde53017c1 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Sat, 20 Jun 2009 23:28:22 -0700 Subject: x86, mce: use atomic_inc_return() instead of add by 1 Use atomic_inc_return() instead of atomic_add_return() by 1. Signed-off-by: Borislav Petkov Cc: Andi Kleen Signed-off-by: H. Peter Anvin --- arch/x86/kernel/cpu/mcheck/mce.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 284d1de..7da8fec 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -242,7 +242,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp) /* * Make sure only one CPU runs in machine check panic */ - if (atomic_add_return(1, &mce_paniced) > 1) + if (atomic_inc_return(&mce_paniced) > 1) wait_for_panic(); barrier(); @@ -705,7 +705,7 @@ static int mce_start(int *no_way_out) * global_nwo should be updated before mce_callin */ smp_wmb(); - order = atomic_add_return(1, &mce_callin); + order = atomic_inc_return(&mce_callin); /* * Wait for everyone. -- cgit v1.1 From e831a9c6186ca1f63fdf2f41628193dd690ab440 Mon Sep 17 00:00:00 2001 From: Chen Liqin Date: Mon, 22 Jun 2009 17:10:57 +0800 Subject: score: move save arg5 and arg6 instruction in front of enable_irq Because enable_irq clobber r8 before arg5 was saved. modified: arch/score/kernel/entry.S Signed-off-by: Chen Liqin Signed-off-by: Arnd Bergmann --- arch/score/kernel/entry.S | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/score/kernel/entry.S b/arch/score/kernel/entry.S index 2f16917..10e859d 100644 --- a/arch/score/kernel/entry.S +++ b/arch/score/kernel/entry.S @@ -395,13 +395,13 @@ ENTRY(resume) ENTRY(handle_sys) SAVE_ALL + sw r8, [r0, 16] # argument 5 from user r8 + sw r9, [r0, 20] # argument 6 from user r9 enable_irq sw r4, [r0, PT_ORIG_R4] #for restart syscall sw r7, [r0, PT_ORIG_R7] #for restart syscall sw r27, [r0, PT_IS_SYSCALL] # it from syscall - sw r8, [r0, 16] # argument 5 from user r8 - sw r9, [r0, 20] # argument 6 from user r9 lw r9, [r0, PT_EPC] # skip syscall on return addi r9, 4 -- cgit v1.1 From e74e396204bfcb67570ba4517b08f5918e69afea Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 30 Mar 2009 19:07:44 +0900 Subject: percpu: use dynamic percpu allocator as the default percpu allocator This patch makes most !CONFIG_HAVE_SETUP_PER_CPU_AREA archs use dynamic percpu allocator. The first chunk is allocated using embedding helper and 8k is reserved for modules. This ensures that the new allocator behaves almost identically to the original allocator as long as static percpu variables are concerned, so it shouldn't introduce much breakage. s390 and alpha use custom SHIFT_PERCPU_PTR() to work around addressing range limit the addressing model imposes. Unfortunately, this breaks if the address is specified using a variable, so for now, the two archs aren't converted. The following architectures are affected by this change. * sh * arm * cris * mips * sparc(32) * blackfin * avr32 * parisc (broken, under investigation) * m32r * powerpc(32) As this change makes the dynamic allocator the default one, CONFIG_HAVE_DYNAMIC_PER_CPU_AREA is replaced with its invert - CONFIG_HAVE_LEGACY_PER_CPU_AREA, which is added to yet-to-be converted archs. These archs implement their own setup_per_cpu_areas() and the conversion is not trivial. * powerpc(64) * sparc(64) * ia64 * alpha * s390 Boot and batch alloc/free tests on x86_32 with debug code (x86_32 doesn't use default first chunk initialization). Compile tested on sparc(32), powerpc(32), arm and alpha. Kyle McMartin reported that this change breaks parisc. The problem is still under investigation and he is okay with pushing this patch forward and fixing parisc later. [ Impact: use dynamic allocator for most archs w/o custom percpu setup ] Signed-off-by: Tejun Heo Acked-by: Rusty Russell Acked-by: David S. Miller Acked-by: Benjamin Herrenschmidt Acked-by: Martin Schwidefsky Reviewed-by: Christoph Lameter Cc: Paul Mundt Cc: Russell King Cc: Mikael Starvik Cc: Ralf Baechle Cc: Bryan Wu Cc: Kyle McMartin Cc: Matthew Wilcox Cc: Grant Grundler Cc: Hirokazu Takata Cc: Richard Henderson Cc: Ivan Kokshaysky Cc: Heiko Carstens Cc: Ingo Molnar --- arch/alpha/Kconfig | 3 +++ arch/ia64/Kconfig | 3 +++ arch/powerpc/Kconfig | 3 +++ arch/s390/Kconfig | 3 +++ arch/sparc/Kconfig | 3 +++ arch/x86/Kconfig | 3 --- 6 files changed, 15 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index 9fb8aae..05d8640 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig @@ -70,6 +70,9 @@ config AUTO_IRQ_AFFINITY depends on SMP default y +config HAVE_LEGACY_PER_CPU_AREA + def_bool y + source "init/Kconfig" source "kernel/Kconfig.freezer" diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 170042b..328d2f8b 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -89,6 +89,9 @@ config GENERIC_TIME_VSYSCALL bool default y +config HAVE_LEGACY_PER_CPU_AREA + def_bool y + config HAVE_SETUP_PER_CPU_AREA def_bool y diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index bf6cedf..a774c2a 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -46,6 +46,9 @@ config GENERIC_HARDIRQS_NO__DO_IRQ bool default y +config HAVE_LEGACY_PER_CPU_AREA + def_bool PPC64 + config HAVE_SETUP_PER_CPU_AREA def_bool PPC64 diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index a14dba0..f4a3cc6 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -75,6 +75,9 @@ config VIRT_CPU_ACCOUNTING config ARCH_SUPPORTS_DEBUG_PAGEALLOC def_bool y +config HAVE_LEGACY_PER_CPU_AREA + def_bool y + mainmenu "Linux Kernel Configuration" config S390 diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 3f8b6a9..7a8698b 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -92,6 +92,9 @@ config AUDIT_ARCH bool default y +config HAVE_LEGACY_PER_CPU_AREA + def_bool y if SPARC64 + config HAVE_SETUP_PER_CPU_AREA def_bool y if SPARC64 diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index d1430ef..a48a900 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -149,9 +149,6 @@ config ARCH_HAS_CACHE_LINE_SIZE config HAVE_SETUP_PER_CPU_AREA def_bool y -config HAVE_DYNAMIC_PER_CPU_AREA - def_bool y - config HAVE_CPUMASK_OF_CPU_MAP def_bool X86_64_SMP -- cgit v1.1 From 405d967dc70002991f8fc35c20e0d3cbc7614f63 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 24 Jun 2009 15:13:38 +0900 Subject: linker script: throw away .discard section x86 throws away .discard section but no other archs do. Also, .discard is not thrown away while linking modules. Make every arch and module linking throw it away. This will be used to define dummy variables for percpu declarations and definitions. This patch is based on Ivan Kokshaysky's alpha percpu patch. [ Impact: always throw away everything in .discard ] Signed-off-by: Tejun Heo Cc: Ivan Kokshaysky Cc: Richard Henderson Cc: Russell King Cc: Haavard Skinnemoen Cc: Bryan Wu Cc: Mikael Starvik Cc: Jesper Nilsson Cc: David Howells Cc: Yoshinori Sato Cc: Tony Luck Cc: Hirokazu Takata Cc: Geert Uytterhoeven Cc: Michal Simek Cc: Ralf Baechle Cc: Kyle McMartin Cc: Benjamin Herrenschmidt Cc: Paul Mackerras Cc: Martin Schwidefsky Cc: Heiko Carstens Cc: Paul Mundt Cc: David S. Miller Cc: Jeff Dike Cc: Chris Zankel Cc: Rusty Russell Cc: Ingo Molnar --- arch/alpha/kernel/vmlinux.lds.S | 1 + arch/arm/kernel/vmlinux.lds.S | 1 + arch/avr32/kernel/vmlinux.lds.S | 1 + arch/blackfin/kernel/vmlinux.lds.S | 1 + arch/cris/kernel/vmlinux.lds.S | 1 + arch/frv/kernel/vmlinux.lds.S | 2 ++ arch/h8300/kernel/vmlinux.lds.S | 1 + arch/ia64/kernel/vmlinux.lds.S | 1 + arch/m32r/kernel/vmlinux.lds.S | 1 + arch/m68k/kernel/vmlinux-std.lds | 1 + arch/m68k/kernel/vmlinux-sun3.lds | 1 + arch/m68knommu/kernel/vmlinux.lds.S | 1 + arch/microblaze/kernel/vmlinux.lds.S | 2 ++ arch/mips/kernel/vmlinux.lds.S | 1 + arch/mn10300/kernel/vmlinux.lds.S | 1 + arch/parisc/kernel/vmlinux.lds.S | 1 + arch/powerpc/kernel/vmlinux.lds.S | 1 + arch/s390/kernel/vmlinux.lds.S | 1 + arch/sh/kernel/vmlinux.lds.S | 1 + arch/sparc/kernel/vmlinux.lds.S | 1 + arch/um/kernel/dyn.lds.S | 2 ++ arch/um/kernel/uml.lds.S | 2 ++ arch/xtensa/kernel/vmlinux.lds.S | 1 + 23 files changed, 27 insertions(+) (limited to 'arch') diff --git a/arch/alpha/kernel/vmlinux.lds.S b/arch/alpha/kernel/vmlinux.lds.S index b9d6568..75fe1d6 100644 --- a/arch/alpha/kernel/vmlinux.lds.S +++ b/arch/alpha/kernel/vmlinux.lds.S @@ -139,6 +139,7 @@ SECTIONS EXIT_TEXT EXIT_DATA *(.exitcall.exit) + *(.discard) } .mdebug 0 : { diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 6c07797..e256c57 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -82,6 +82,7 @@ SECTIONS EXIT_TEXT EXIT_DATA *(.exitcall.exit) + *(.discard) *(.ARM.exidx.exit.text) *(.ARM.extab.exit.text) #ifndef CONFIG_MMU diff --git a/arch/avr32/kernel/vmlinux.lds.S b/arch/avr32/kernel/vmlinux.lds.S index 7910d41..b832460 100644 --- a/arch/avr32/kernel/vmlinux.lds.S +++ b/arch/avr32/kernel/vmlinux.lds.S @@ -131,6 +131,7 @@ SECTIONS /DISCARD/ : { EXIT_DATA *(.exitcall.exit) + *(.discard) } DWARF_DEBUG diff --git a/arch/blackfin/kernel/vmlinux.lds.S b/arch/blackfin/kernel/vmlinux.lds.S index 6ac307c..6e8eabd 100644 --- a/arch/blackfin/kernel/vmlinux.lds.S +++ b/arch/blackfin/kernel/vmlinux.lds.S @@ -280,5 +280,6 @@ SECTIONS /DISCARD/ : { *(.exitcall.exit) + *(.discard) } } diff --git a/arch/cris/kernel/vmlinux.lds.S b/arch/cris/kernel/vmlinux.lds.S index 0d2adfc..a3175eb 100644 --- a/arch/cris/kernel/vmlinux.lds.S +++ b/arch/cris/kernel/vmlinux.lds.S @@ -145,6 +145,7 @@ SECTIONS EXIT_TEXT EXIT_DATA *(.exitcall.exit) + *(.discard) } dram_end = dram_start + (CONFIG_ETRAX_DRAM_SIZE - __CONFIG_ETRAX_VMEM_SIZE)*1024*1024; diff --git a/arch/frv/kernel/vmlinux.lds.S b/arch/frv/kernel/vmlinux.lds.S index 22d9787..64b5a5e 100644 --- a/arch/frv/kernel/vmlinux.lds.S +++ b/arch/frv/kernel/vmlinux.lds.S @@ -177,6 +177,8 @@ SECTIONS .debug_ranges 0 : { *(.debug_ranges) } .comment 0 : { *(.comment) } + + /DISCARD/ : { *(.discard) } } __kernel_image_size_no_bss = __bss_start - __kernel_image_start; diff --git a/arch/h8300/kernel/vmlinux.lds.S b/arch/h8300/kernel/vmlinux.lds.S index 43a87b9..03d6c0d 100644 --- a/arch/h8300/kernel/vmlinux.lds.S +++ b/arch/h8300/kernel/vmlinux.lds.S @@ -154,6 +154,7 @@ SECTIONS } /DISCARD/ : { *(.exitcall.exit) + *(.discard) } .romfs : { diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S index 4a95e86..13d9589 100644 --- a/arch/ia64/kernel/vmlinux.lds.S +++ b/arch/ia64/kernel/vmlinux.lds.S @@ -29,6 +29,7 @@ SECTIONS EXIT_TEXT EXIT_DATA *(.exitcall.exit) + *(.discard) *(.IA_64.unwind.exit.text) *(.IA_64.unwind_info.exit.text) } diff --git a/arch/m32r/kernel/vmlinux.lds.S b/arch/m32r/kernel/vmlinux.lds.S index 4179adf..480a499 100644 --- a/arch/m32r/kernel/vmlinux.lds.S +++ b/arch/m32r/kernel/vmlinux.lds.S @@ -125,6 +125,7 @@ SECTIONS EXIT_TEXT EXIT_DATA *(.exitcall.exit) + *(.discard) } /* Stabs debugging sections. */ diff --git a/arch/m68k/kernel/vmlinux-std.lds b/arch/m68k/kernel/vmlinux-std.lds index 01d212b..905a797 100644 --- a/arch/m68k/kernel/vmlinux-std.lds +++ b/arch/m68k/kernel/vmlinux-std.lds @@ -87,6 +87,7 @@ SECTIONS EXIT_TEXT EXIT_DATA *(.exitcall.exit) + *(.discard) } /* Stabs debugging sections. */ diff --git a/arch/m68k/kernel/vmlinux-sun3.lds b/arch/m68k/kernel/vmlinux-sun3.lds index c192f77..47d04be 100644 --- a/arch/m68k/kernel/vmlinux-sun3.lds +++ b/arch/m68k/kernel/vmlinux-sun3.lds @@ -82,6 +82,7 @@ __init_begin = .; EXIT_TEXT EXIT_DATA *(.exitcall.exit) + *(.discard) } .crap : { diff --git a/arch/m68knommu/kernel/vmlinux.lds.S b/arch/m68knommu/kernel/vmlinux.lds.S index b7fe505..68111a6 100644 --- a/arch/m68knommu/kernel/vmlinux.lds.S +++ b/arch/m68knommu/kernel/vmlinux.lds.S @@ -188,6 +188,7 @@ SECTIONS { EXIT_TEXT EXIT_DATA *(.exitcall.exit) + *(.discard) } .bss : { diff --git a/arch/microblaze/kernel/vmlinux.lds.S b/arch/microblaze/kernel/vmlinux.lds.S index d34d38d..a207543 100644 --- a/arch/microblaze/kernel/vmlinux.lds.S +++ b/arch/microblaze/kernel/vmlinux.lds.S @@ -162,4 +162,6 @@ SECTIONS { } . = ALIGN(4096); _end = .; + + /DISCARD/ : { *(.discard) } } diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S index 58738c8..4590160 100644 --- a/arch/mips/kernel/vmlinux.lds.S +++ b/arch/mips/kernel/vmlinux.lds.S @@ -179,6 +179,7 @@ SECTIONS /* Sections to be discarded */ /DISCARD/ : { *(.exitcall.exit) + *(.discard) /* ABI crap starts here */ *(.MIPS.options) diff --git a/arch/mn10300/kernel/vmlinux.lds.S b/arch/mn10300/kernel/vmlinux.lds.S index 24de6b9..5d9f2f9 100644 --- a/arch/mn10300/kernel/vmlinux.lds.S +++ b/arch/mn10300/kernel/vmlinux.lds.S @@ -146,6 +146,7 @@ SECTIONS /* Sections to be discarded */ /DISCARD/ : { *(.exitcall.exit) + *(.discard) } STABS_DEBUG diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S index fd2cc4f..ccf5834 100644 --- a/arch/parisc/kernel/vmlinux.lds.S +++ b/arch/parisc/kernel/vmlinux.lds.S @@ -240,6 +240,7 @@ SECTIONS /* Sections to be discarded */ /DISCARD/ : { *(.exitcall.exit) + *(.discard) #ifdef CONFIG_64BIT /* temporary hack until binutils is fixed to not emit these * for static binaries diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index 8ef8a14..7fca935 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -40,6 +40,7 @@ SECTIONS /* Sections to be discarded. */ /DISCARD/ : { *(.exitcall.exit) + *(.discard) EXIT_DATA } diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S index a53db23..98867df 100644 --- a/arch/s390/kernel/vmlinux.lds.S +++ b/arch/s390/kernel/vmlinux.lds.S @@ -161,6 +161,7 @@ SECTIONS /DISCARD/ : { EXIT_DATA *(.exitcall.exit) + *(.discard) } /* Debugging sections. */ diff --git a/arch/sh/kernel/vmlinux.lds.S b/arch/sh/kernel/vmlinux.lds.S index f53c76a..766976d 100644 --- a/arch/sh/kernel/vmlinux.lds.S +++ b/arch/sh/kernel/vmlinux.lds.S @@ -171,6 +171,7 @@ SECTIONS */ /DISCARD/ : { *(.exitcall.exit) + *(.discard) } STABS_DEBUG diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S index fcbbd00..d63cf91 100644 --- a/arch/sparc/kernel/vmlinux.lds.S +++ b/arch/sparc/kernel/vmlinux.lds.S @@ -175,6 +175,7 @@ SECTIONS EXIT_TEXT EXIT_DATA *(.exitcall.exit) + *(.discard) } STABS_DEBUG diff --git a/arch/um/kernel/dyn.lds.S b/arch/um/kernel/dyn.lds.S index 9975e1a..2916d6e 100644 --- a/arch/um/kernel/dyn.lds.S +++ b/arch/um/kernel/dyn.lds.S @@ -156,4 +156,6 @@ SECTIONS STABS_DEBUG DWARF_DEBUG + + /DISCARD/ : { *(.discard) } } diff --git a/arch/um/kernel/uml.lds.S b/arch/um/kernel/uml.lds.S index 11b8352..1f8a622 100644 --- a/arch/um/kernel/uml.lds.S +++ b/arch/um/kernel/uml.lds.S @@ -100,4 +100,6 @@ SECTIONS STABS_DEBUG DWARF_DEBUG + + /DISCARD/ : { *(.discard) } } diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S index 41c159c..b1e2463 100644 --- a/arch/xtensa/kernel/vmlinux.lds.S +++ b/arch/xtensa/kernel/vmlinux.lds.S @@ -287,6 +287,7 @@ SECTIONS EXIT_TEXT EXIT_DATA *(.exitcall.exit) + *(.discard) } .xt.lit : { *(.xt.lit) } -- cgit v1.1 From fe87f94f341a4b4097285b46f003059b26eb59bf Mon Sep 17 00:00:00 2001 From: Jesper Nilsson Date: Wed, 24 Jun 2009 15:13:41 +0900 Subject: CRIS: Change DEFINE_PER_CPU of current_pgd to be non volatile. The DEFINE_PER_CPU of current_pgd was on CRIS defined using volatile, which is not needed. Remove volatile. Tested on an ARTPEC-3 (CRISv32) board. tj: extern DEFINE_PER_CPU() replaced with DECLARE_PER_CPU() [ Impact: code cleanup ] Signed-off-by: Jesper Nilsson Signed-off-by: Tejun Heo --- arch/cris/include/asm/mmu_context.h | 3 ++- arch/cris/mm/fault.c | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/cris/include/asm/mmu_context.h b/arch/cris/include/asm/mmu_context.h index 72ba08d..1d45fd6 100644 --- a/arch/cris/include/asm/mmu_context.h +++ b/arch/cris/include/asm/mmu_context.h @@ -17,7 +17,8 @@ extern void switch_mm(struct mm_struct *prev, struct mm_struct *next, * registers like cr3 on the i386 */ -extern volatile DEFINE_PER_CPU(pgd_t *,current_pgd); /* defined in arch/cris/mm/fault.c */ +/* defined in arch/cris/mm/fault.c */ +DECLARE_PER_CPU(pgd_t *, current_pgd); static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) { diff --git a/arch/cris/mm/fault.c b/arch/cris/mm/fault.c index f925115..4a7cdd9 100644 --- a/arch/cris/mm/fault.c +++ b/arch/cris/mm/fault.c @@ -29,7 +29,7 @@ extern void die_if_kernel(const char *, struct pt_regs *, long); /* current active page directory */ -volatile DEFINE_PER_CPU(pgd_t *,current_pgd); +DEFINE_PER_CPU(pgd_t *, current_pgd); unsigned long cris_signal_return_page; /* -- cgit v1.1 From 204fba4aa303ea4a7bb726a539bf4a5b9e3203d0 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 24 Jun 2009 15:13:45 +0900 Subject: percpu: cleanup percpu array definitions Currently, the following three different ways to define percpu arrays are in use. 1. DEFINE_PER_CPU(elem_type[array_len], array_name); 2. DEFINE_PER_CPU(elem_type, array_name[array_len]); 3. DEFINE_PER_CPU(elem_type, array_name)[array_len]; Unify to #1 which correctly separates the roles of the two parameters and thus allows more flexibility in the way percpu variables are defined. [ Impact: cleanup ] Signed-off-by: Tejun Heo Reviewed-by: Christoph Lameter Cc: Ingo Molnar Cc: Tony Luck Cc: Benjamin Herrenschmidt Cc: Thomas Gleixner Cc: Jeremy Fitzhardinge Cc: linux-mm@kvack.org Cc: Christoph Lameter Cc: David S. Miller --- arch/ia64/kernel/smp.c | 2 +- arch/ia64/sn/kernel/setup.c | 2 +- arch/powerpc/mm/stab.c | 2 +- arch/powerpc/platforms/ps3/smp.c | 2 +- arch/x86/kernel/cpu/cpu_debug.c | 4 ++-- arch/x86/kernel/cpu/mcheck/mce_amd.c | 2 +- arch/x86/kernel/cpu/perf_counter.c | 2 +- 7 files changed, 8 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c index f0c521b..94cf78b 100644 --- a/arch/ia64/kernel/smp.c +++ b/arch/ia64/kernel/smp.c @@ -58,7 +58,7 @@ static struct local_tlb_flush_counts { unsigned int count; } __attribute__((__aligned__(32))) local_tlb_flush_counts[NR_CPUS]; -static DEFINE_PER_CPU(unsigned short, shadow_flush_counts[NR_CPUS]) ____cacheline_aligned; +static DEFINE_PER_CPU(unsigned short [NR_CPUS], shadow_flush_counts) ____cacheline_aligned; #define IPI_CALL_FUNC 0 #define IPI_CPU_STOP 1 diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c index e456f06..ece1bf9 100644 --- a/arch/ia64/sn/kernel/setup.c +++ b/arch/ia64/sn/kernel/setup.c @@ -71,7 +71,7 @@ EXPORT_SYMBOL(sn_rtc_cycles_per_second); DEFINE_PER_CPU(struct sn_hub_info_s, __sn_hub_info); EXPORT_PER_CPU_SYMBOL(__sn_hub_info); -DEFINE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_COMPACT_NODES]); +DEFINE_PER_CPU(short [MAX_COMPACT_NODES], __sn_cnodeid_to_nasid); EXPORT_PER_CPU_SYMBOL(__sn_cnodeid_to_nasid); DEFINE_PER_CPU(struct nodepda_s *, __sn_nodepda); diff --git a/arch/powerpc/mm/stab.c b/arch/powerpc/mm/stab.c index 98cd1dc..6e9b69c 100644 --- a/arch/powerpc/mm/stab.c +++ b/arch/powerpc/mm/stab.c @@ -31,7 +31,7 @@ struct stab_entry { #define NR_STAB_CACHE_ENTRIES 8 static DEFINE_PER_CPU(long, stab_cache_ptr); -static DEFINE_PER_CPU(long, stab_cache[NR_STAB_CACHE_ENTRIES]); +static DEFINE_PER_CPU(long [NR_STAB_CACHE_ENTRIES], stab_cache); /* * Create a segment table entry for the given esid/vsid pair. diff --git a/arch/powerpc/platforms/ps3/smp.c b/arch/powerpc/platforms/ps3/smp.c index f6e04bc..51ffde4 100644 --- a/arch/powerpc/platforms/ps3/smp.c +++ b/arch/powerpc/platforms/ps3/smp.c @@ -37,7 +37,7 @@ */ #define MSG_COUNT 4 -static DEFINE_PER_CPU(unsigned int, ps3_ipi_virqs[MSG_COUNT]); +static DEFINE_PER_CPU(unsigned int [MSG_COUNT], ps3_ipi_virqs); static void do_message_pass(int target, int msg) { diff --git a/arch/x86/kernel/cpu/cpu_debug.c b/arch/x86/kernel/cpu/cpu_debug.c index 6b2a52d..dca325c 100644 --- a/arch/x86/kernel/cpu/cpu_debug.c +++ b/arch/x86/kernel/cpu/cpu_debug.c @@ -30,8 +30,8 @@ #include #include -static DEFINE_PER_CPU(struct cpu_cpuX_base, cpu_arr[CPU_REG_ALL_BIT]); -static DEFINE_PER_CPU(struct cpu_private *, priv_arr[MAX_CPU_FILES]); +static DEFINE_PER_CPU(struct cpu_cpuX_base [CPU_REG_ALL_BIT], cpu_arr); +static DEFINE_PER_CPU(struct cpu_private * [MAX_CPU_FILES], priv_arr); static DEFINE_PER_CPU(int, cpu_priv_count); static DEFINE_MUTEX(cpu_debug_lock); diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c index ddae216..bd2a2fa 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c @@ -69,7 +69,7 @@ struct threshold_bank { struct threshold_block *blocks; cpumask_var_t cpus; }; -static DEFINE_PER_CPU(struct threshold_bank *, threshold_banks[NR_BANKS]); +static DEFINE_PER_CPU(struct threshold_bank * [NR_BANKS], threshold_banks); #ifdef CONFIG_SMP static unsigned char shared_bank[NR_BANKS] = { diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 76dfef2..4946288 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -862,7 +862,7 @@ amd_pmu_disable_counter(struct hw_perf_counter *hwc, int idx) x86_pmu_disable_counter(hwc, idx); } -static DEFINE_PER_CPU(u64, prev_left[X86_PMC_IDX_MAX]); +static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], prev_left); /* * Set the next IRQ period, based on the hwc->period_left value. -- cgit v1.1 From b9bf3121af348d9255f1c917830fe8c2df52efcb Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 24 Jun 2009 15:13:47 +0900 Subject: percpu: use DEFINE_PER_CPU_SHARED_ALIGNED() There are a few places where ___cacheline_aligned* is used with DEFINE_PER_CPU(). Use DEFINE_PER_CPU_SHARED_ALIGNED() instead. DEFINE_PER_CPU_SHARED_ALIGNED() applies alignment only on SMPs. While all other converted places used _in_smp variant or only get compiled for SMP, net/rds used unconditional ____cacheline_aligned. I don't see any reason these data structures should be aligned on UP and thus converted together. Signed-off-by: Tejun Heo Cc: Mike Frysinger Cc: Tony Luck Cc: Andy Grover --- arch/blackfin/mm/sram-alloc.c | 6 +++--- arch/ia64/kernel/smp.c | 3 ++- 2 files changed, 5 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/blackfin/mm/sram-alloc.c b/arch/blackfin/mm/sram-alloc.c index 0bc3c4e..99e4dbb 100644 --- a/arch/blackfin/mm/sram-alloc.c +++ b/arch/blackfin/mm/sram-alloc.c @@ -42,9 +42,9 @@ #include #include "blackfin_sram.h" -static DEFINE_PER_CPU(spinlock_t, l1sram_lock) ____cacheline_aligned_in_smp; -static DEFINE_PER_CPU(spinlock_t, l1_data_sram_lock) ____cacheline_aligned_in_smp; -static DEFINE_PER_CPU(spinlock_t, l1_inst_sram_lock) ____cacheline_aligned_in_smp; +static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t, l1sram_lock); +static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t, l1_data_sram_lock); +static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t, l1_inst_sram_lock); static spinlock_t l2_sram_lock ____cacheline_aligned_in_smp; /* the data structure for L1 scratchpad and DATA SRAM */ diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c index 94cf78b..93ebfea 100644 --- a/arch/ia64/kernel/smp.c +++ b/arch/ia64/kernel/smp.c @@ -58,7 +58,8 @@ static struct local_tlb_flush_counts { unsigned int count; } __attribute__((__aligned__(32))) local_tlb_flush_counts[NR_CPUS]; -static DEFINE_PER_CPU(unsigned short [NR_CPUS], shadow_flush_counts) ____cacheline_aligned; +static DEFINE_PER_CPU_SHARED_ALIGNED(unsigned short [NR_CPUS], + shadow_flush_counts); #define IPI_CALL_FUNC 0 #define IPI_CPU_STOP 1 -- cgit v1.1 From 245b2e70eabd797932adb263a65da0bab3711753 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 24 Jun 2009 15:13:48 +0900 Subject: percpu: clean up percpu variable definitions Percpu variable definition is about to be updated such that all percpu symbols including the static ones must be unique. Update percpu variable definitions accordingly. * as,cfq: rename ioc_count uniquely * cpufreq: rename cpu_dbs_info uniquely * xen: move nesting_count out of xen_evtchn_do_upcall() and rename it * mm: move ratelimits out of balance_dirty_pages_ratelimited_nr() and rename it * ipv4,6: rename cookie_scratch uniquely * x86 perf_counter: rename prev_left to pmc_prev_left, irq_entry to pmc_irq_entry and nmi_entry to pmc_nmi_entry * perf_counter: rename disable_count to perf_disable_count * ftrace: rename test_event_disable to ftrace_test_event_disable * kmemleak: rename test_pointer to kmemleak_test_pointer * mce: rename next_interval to mce_next_interval [ Impact: percpu usage cleanups, no duplicate static percpu var names ] Signed-off-by: Tejun Heo Reviewed-by: Christoph Lameter Cc: Ivan Kokshaysky Cc: Jens Axboe Cc: Dave Jones Cc: Jeremy Fitzhardinge Cc: linux-mm Cc: David S. Miller Cc: Peter Zijlstra Cc: Steven Rostedt Cc: Li Zefan Cc: Catalin Marinas Cc: Andi Kleen --- arch/x86/kernel/cpu/mcheck/mce.c | 8 ++++---- arch/x86/kernel/cpu/perf_counter.c | 14 +++++++------- 2 files changed, 11 insertions(+), 11 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 284d1de..cba8cd3 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -1091,7 +1091,7 @@ void mce_log_therm_throt_event(__u64 status) */ static int check_interval = 5 * 60; /* 5 minutes */ -static DEFINE_PER_CPU(int, next_interval); /* in jiffies */ +static DEFINE_PER_CPU(int, mce_next_interval); /* in jiffies */ static DEFINE_PER_CPU(struct timer_list, mce_timer); static void mcheck_timer(unsigned long data) @@ -1110,7 +1110,7 @@ static void mcheck_timer(unsigned long data) * Alert userspace if needed. If we logged an MCE, reduce the * polling interval, otherwise increase the polling interval. */ - n = &__get_cpu_var(next_interval); + n = &__get_cpu_var(mce_next_interval); if (mce_notify_irq()) *n = max(*n/2, HZ/100); else @@ -1311,7 +1311,7 @@ static void mce_cpu_features(struct cpuinfo_x86 *c) static void mce_init_timer(void) { struct timer_list *t = &__get_cpu_var(mce_timer); - int *n = &__get_cpu_var(next_interval); + int *n = &__get_cpu_var(mce_next_interval); if (mce_ignore_ce) return; @@ -1914,7 +1914,7 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) case CPU_DOWN_FAILED: case CPU_DOWN_FAILED_FROZEN: t->expires = round_jiffies(jiffies + - __get_cpu_var(next_interval)); + __get_cpu_var(mce_next_interval)); add_timer_on(t, cpu); smp_call_function_single(cpu, mce_reenable_cpu, &action, 1); break; diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 4946288..5fdf63a 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -862,7 +862,7 @@ amd_pmu_disable_counter(struct hw_perf_counter *hwc, int idx) x86_pmu_disable_counter(hwc, idx); } -static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], prev_left); +static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left); /* * Set the next IRQ period, based on the hwc->period_left value. @@ -901,7 +901,7 @@ x86_perf_counter_set_period(struct perf_counter *counter, if (left > x86_pmu.max_period) left = x86_pmu.max_period; - per_cpu(prev_left[idx], smp_processor_id()) = left; + per_cpu(pmc_prev_left[idx], smp_processor_id()) = left; /* * The hw counter starts counting from this counter offset, @@ -1089,7 +1089,7 @@ void perf_counter_print_debug(void) rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl); rdmsrl(x86_pmu.perfctr + idx, pmc_count); - prev_left = per_cpu(prev_left[idx], cpu); + prev_left = per_cpu(pmc_prev_left[idx], cpu); pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n", cpu, idx, pmc_ctrl); @@ -1561,8 +1561,8 @@ void callchain_store(struct perf_callchain_entry *entry, u64 ip) entry->ip[entry->nr++] = ip; } -static DEFINE_PER_CPU(struct perf_callchain_entry, irq_entry); -static DEFINE_PER_CPU(struct perf_callchain_entry, nmi_entry); +static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry); +static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry); static void @@ -1709,9 +1709,9 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) struct perf_callchain_entry *entry; if (in_nmi()) - entry = &__get_cpu_var(nmi_entry); + entry = &__get_cpu_var(pmc_nmi_entry); else - entry = &__get_cpu_var(irq_entry); + entry = &__get_cpu_var(pmc_irq_entry); entry->nr = 0; -- cgit v1.1 From 6088464cf1ae9fb3d2ccc0ec5feb3f5b971098d8 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 24 Jun 2009 15:13:52 +0900 Subject: alpha: kill unnecessary __used attribute in PER_CPU_ATTRIBUTES With the previous percpu variable definition change, all percpu variables are global and there's no need to specify __used, which only triggers on recent compilers anyway. Kill it. [ Impact: remove unnecessary percpu attribute ] Signed-off-by: Tejun Heo Cc: Ivan Kokshaysky Cc: Richard Henderson --- arch/alpha/include/asm/percpu.h | 5 ----- 1 file changed, 5 deletions(-) (limited to 'arch') diff --git a/arch/alpha/include/asm/percpu.h b/arch/alpha/include/asm/percpu.h index 06c5c7a..7f0a9c4 100644 --- a/arch/alpha/include/asm/percpu.h +++ b/arch/alpha/include/asm/percpu.h @@ -30,7 +30,6 @@ extern unsigned long __per_cpu_offset[NR_CPUS]; #ifndef MODULE #define SHIFT_PERCPU_PTR(var, offset) RELOC_HIDE(&per_cpu_var(var), (offset)) -#define PER_CPU_ATTRIBUTES #else /* * To calculate addresses of locally defined variables, GCC uses 32-bit @@ -49,8 +48,6 @@ extern unsigned long __per_cpu_offset[NR_CPUS]; : "=&r"(__ptr), "=&r"(tmp_gp)); \ (typeof(&per_cpu_var(var)))(__ptr + (offset)); }) -#define PER_CPU_ATTRIBUTES __used - #endif /* MODULE */ /* @@ -71,8 +68,6 @@ extern unsigned long __per_cpu_offset[NR_CPUS]; #define __get_cpu_var(var) per_cpu_var(var) #define __raw_get_cpu_var(var) per_cpu_var(var) -#define PER_CPU_ATTRIBUTES - #endif /* SMP */ #ifdef CONFIG_SMP -- cgit v1.1 From 9b7dbc7dc0365a943af2d73b1376a6f0aac5dc0d Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 24 Jun 2009 15:13:52 +0900 Subject: alpha: switch to dynamic percpu allocator Alpha implements custom SHIFT_PERCPU_PTR for modules because percpu area can be located far away from the 4G area where the module text is located. The custom SHIFT_PERCPU_PTR forces GOT usage using ldq instruction with literal relocation; however, the relocation can't be used with dynamically allocated percpu variables. Fortunately, similar result can be achieved by using weak percpu variable definitions. This patch makes alpha use weak definitions and switch to dynamic percpu allocator. asm/tlbflush.h was getting linux/sched.h via asm/percpu.h which no longer needs it. Include linux/sched.h directly in asm/tlbflush.h. Compile tested. Generation of litereal relocation verified. This patch is based on Ivan Kokshaysky's alpha percpu patch. [ Impact: use dynamic percpu allocator ] Signed-off-by: Tejun Heo Acked-by: Ivan Kokshaysky Cc: Richard Henderson --- arch/alpha/Kconfig | 3 -- arch/alpha/include/asm/percpu.h | 95 ++++----------------------------------- arch/alpha/include/asm/tlbflush.h | 1 + 3 files changed, 9 insertions(+), 90 deletions(-) (limited to 'arch') diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index 05d8640..9fb8aae 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig @@ -70,9 +70,6 @@ config AUTO_IRQ_AFFINITY depends on SMP default y -config HAVE_LEGACY_PER_CPU_AREA - def_bool y - source "init/Kconfig" source "kernel/Kconfig.freezer" diff --git a/arch/alpha/include/asm/percpu.h b/arch/alpha/include/asm/percpu.h index 7f0a9c4..2c12378 100644 --- a/arch/alpha/include/asm/percpu.h +++ b/arch/alpha/include/asm/percpu.h @@ -1,97 +1,18 @@ #ifndef __ALPHA_PERCPU_H #define __ALPHA_PERCPU_H -#include -#include -#include - /* - * Determine the real variable name from the name visible in the - * kernel sources. - */ -#define per_cpu_var(var) per_cpu__##var - -#ifdef CONFIG_SMP - -/* - * per_cpu_offset() is the offset that has to be added to a - * percpu variable to get to the instance for a certain processor. - */ -extern unsigned long __per_cpu_offset[NR_CPUS]; - -#define per_cpu_offset(x) (__per_cpu_offset[x]) - -#define __my_cpu_offset per_cpu_offset(raw_smp_processor_id()) -#ifdef CONFIG_DEBUG_PREEMPT -#define my_cpu_offset per_cpu_offset(smp_processor_id()) -#else -#define my_cpu_offset __my_cpu_offset -#endif - -#ifndef MODULE -#define SHIFT_PERCPU_PTR(var, offset) RELOC_HIDE(&per_cpu_var(var), (offset)) -#else -/* - * To calculate addresses of locally defined variables, GCC uses 32-bit - * displacement from the GP. Which doesn't work for per cpu variables in - * modules, as an offset to the kernel per cpu area is way above 4G. + * To calculate addresses of locally defined variables, GCC uses + * 32-bit displacement from the GP. Which doesn't work for per cpu + * variables in modules, as an offset to the kernel per cpu area is + * way above 4G. * - * This forces allocation of a GOT entry for per cpu variable using - * ldq instruction with a 'literal' relocation. - */ -#define SHIFT_PERCPU_PTR(var, offset) ({ \ - extern int simple_identifier_##var(void); \ - unsigned long __ptr, tmp_gp; \ - asm ( "br %1, 1f \n\ - 1: ldgp %1, 0(%1) \n\ - ldq %0, per_cpu__" #var"(%1)\t!literal" \ - : "=&r"(__ptr), "=&r"(tmp_gp)); \ - (typeof(&per_cpu_var(var)))(__ptr + (offset)); }) - -#endif /* MODULE */ - -/* - * A percpu variable may point to a discarded regions. The following are - * established ways to produce a usable pointer from the percpu variable - * offset. + * Always use weak definitions for percpu variables in modules. */ -#define per_cpu(var, cpu) \ - (*SHIFT_PERCPU_PTR(var, per_cpu_offset(cpu))) -#define __get_cpu_var(var) \ - (*SHIFT_PERCPU_PTR(var, my_cpu_offset)) -#define __raw_get_cpu_var(var) \ - (*SHIFT_PERCPU_PTR(var, __my_cpu_offset)) - -#else /* ! SMP */ - -#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu_var(var))) -#define __get_cpu_var(var) per_cpu_var(var) -#define __raw_get_cpu_var(var) per_cpu_var(var) - -#endif /* SMP */ - -#ifdef CONFIG_SMP -#define PER_CPU_BASE_SECTION ".data.percpu" -#else -#define PER_CPU_BASE_SECTION ".data" -#endif - -#ifdef CONFIG_SMP - -#ifdef MODULE -#define PER_CPU_SHARED_ALIGNED_SECTION "" -#else -#define PER_CPU_SHARED_ALIGNED_SECTION ".shared_aligned" -#endif -#define PER_CPU_FIRST_SECTION ".first" - -#else - -#define PER_CPU_SHARED_ALIGNED_SECTION "" -#define PER_CPU_FIRST_SECTION "" - +#if defined(MODULE) && defined(CONFIG_SMP) +#define ARCH_NEEDS_WEAK_PER_CPU #endif -#define PER_CPU_ATTRIBUTES +#include #endif /* __ALPHA_PERCPU_H */ diff --git a/arch/alpha/include/asm/tlbflush.h b/arch/alpha/include/asm/tlbflush.h index 9d87aaa..e89e0c2 100644 --- a/arch/alpha/include/asm/tlbflush.h +++ b/arch/alpha/include/asm/tlbflush.h @@ -2,6 +2,7 @@ #define _ALPHA_TLBFLUSH_H #include +#include #include #include -- cgit v1.1 From 9a0ef2923abd2cc2c6f78d3663ac7af34c0220e8 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 24 Jun 2009 15:13:53 +0900 Subject: s390: switch to dynamic percpu allocator 64bit s390 shares the same problem with alpha regarding percpu symbol addressing from modules. It needs assembly magic to force GOTENT reference when building module as the percpu address will be outside the usual 4G range from the module text. This can be solved by using weak percpu variable definitions. This patch makes s390 use weak definitions and switch to dynamic percpu allocator. Please note that weak attribute is not added if !SMP as percpu variables behave exactly the same as normal variables on UP. Compile tested. Generation of GOTENT reference verified. This patch is based on Ivan Kokshaysky's alpha percpu patch. [ Impact: use dynamic percpu allocator ] Signed-off-by: Tejun Heo Cc: Martin Schwidefsky Cc: Heiko Carstens --- arch/s390/Kconfig | 3 --- arch/s390/include/asm/percpu.h | 32 ++++++++------------------------ 2 files changed, 8 insertions(+), 27 deletions(-) (limited to 'arch') diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index f4a3cc6..a14dba0 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -75,9 +75,6 @@ config VIRT_CPU_ACCOUNTING config ARCH_SUPPORTS_DEBUG_PAGEALLOC def_bool y -config HAVE_LEGACY_PER_CPU_AREA - def_bool y - mainmenu "Linux Kernel Configuration" config S390 diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h index 408d60b..f7ad871 100644 --- a/arch/s390/include/asm/percpu.h +++ b/arch/s390/include/asm/percpu.h @@ -1,37 +1,21 @@ #ifndef __ARCH_S390_PERCPU__ #define __ARCH_S390_PERCPU__ -#include -#include - /* * s390 uses its own implementation for per cpu data, the offset of * the cpu local data area is cached in the cpu's lowcore memory. - * For 64 bit module code s390 forces the use of a GOT slot for the - * address of the per cpu variable. This is needed because the module - * may be more than 4G above the per cpu area. */ -#if defined(__s390x__) && defined(MODULE) - -#define SHIFT_PERCPU_PTR(ptr,offset) (({ \ - extern int simple_identifier_##var(void); \ - unsigned long *__ptr; \ - asm ( "larl %0, %1@GOTENT" \ - : "=a" (__ptr) : "X" (ptr) ); \ - (typeof(ptr))((*__ptr) + (offset)); })) - -#else - -#define SHIFT_PERCPU_PTR(ptr, offset) (({ \ - extern int simple_identifier_##var(void); \ - unsigned long __ptr; \ - asm ( "" : "=a" (__ptr) : "0" (ptr) ); \ - (typeof(ptr)) (__ptr + (offset)); })) +#define __my_cpu_offset S390_lowcore.percpu_offset +/* + * For 64 bit module code, the module may be more than 4G above the + * per cpu area, use weak definitions to force the compiler to + * generate external references. + */ +#if defined(CONFIG_SMP) && defined(__s390x__) && defined(MODULE) +#define ARCH_NEEDS_WEAK_PER_CPU #endif -#define __my_cpu_offset S390_lowcore.percpu_offset - #include #endif /* __ARCH_S390_PERCPU__ */ -- cgit v1.1 From bf4bb2b1f285ec56e7f3cbf0190761b42131871c Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 24 Jun 2009 16:57:03 +0900 Subject: sparc64: fix build breakage introduced by percpu-convert-most patchset Commit e74e396204bfcb67570ba4517b08f5918e69afea incorrectly added HAVE_LEGACY_PER_CPU_AREA to sparc64 although it already has been converted to dynamic percpu allocator. Drop both HAVE_{LEGACY|DYNAMIC}_PER_CPU_AREA. Signed-off-by: Tejun Heo Acked-by: David Miller --- arch/sparc/Kconfig | 6 ------ 1 file changed, 6 deletions(-) (limited to 'arch') diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 7a8698b..4f6ed0f 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -92,15 +92,9 @@ config AUDIT_ARCH bool default y -config HAVE_LEGACY_PER_CPU_AREA - def_bool y if SPARC64 - config HAVE_SETUP_PER_CPU_AREA def_bool y if SPARC64 -config HAVE_DYNAMIC_PER_CPU_AREA - def_bool y if SPARC64 - config GENERIC_HARDIRQS_NO__DO_IRQ bool def_bool y if SPARC64 -- cgit v1.1 From 9fb24cc50045ec8d13d0a6c3d4d454750b466d61 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Sat, 27 Jun 2009 14:50:51 +0200 Subject: score: add missing #includes Files that define a global function should #include the header with its declaration to make sure that the prototypes do not diverge. Signed-off-by: Arnd Bergmann --- arch/score/include/asm/ucontext.h | 1 + arch/score/kernel/module.c | 1 + arch/score/kernel/process.c | 3 +++ arch/score/kernel/signal.c | 4 +++- arch/score/kernel/sys_score.c | 2 ++ 5 files changed, 10 insertions(+), 1 deletion(-) create mode 100644 arch/score/include/asm/ucontext.h (limited to 'arch') diff --git a/arch/score/include/asm/ucontext.h b/arch/score/include/asm/ucontext.h new file mode 100644 index 0000000..9bc07b9 --- /dev/null +++ b/arch/score/include/asm/ucontext.h @@ -0,0 +1 @@ +#include diff --git a/arch/score/kernel/module.c b/arch/score/kernel/module.c index 4ffce7f..1a62557 100644 --- a/arch/score/kernel/module.c +++ b/arch/score/kernel/module.c @@ -23,6 +23,7 @@ * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ +#include #include #include diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c index d93966f..25d0803 100644 --- a/arch/score/kernel/process.c +++ b/arch/score/kernel/process.c @@ -24,6 +24,9 @@ */ #include +#include +#include +#include void (*pm_power_off)(void); EXPORT_SYMBOL(pm_power_off); diff --git a/arch/score/kernel/signal.c b/arch/score/kernel/signal.c index 5c00408..252f6d9 100644 --- a/arch/score/kernel/signal.c +++ b/arch/score/kernel/signal.c @@ -27,7 +27,9 @@ #include #include #include -#include + +#include +#include #include diff --git a/arch/score/kernel/sys_score.c b/arch/score/kernel/sys_score.c index 3318861..eb4d2d3 100644 --- a/arch/score/kernel/sys_score.c +++ b/arch/score/kernel/sys_score.c @@ -28,6 +28,8 @@ #include #include #include +#include +#include unsigned long shm_align_mask = PAGE_SIZE - 1; EXPORT_SYMBOL(shm_align_mask); -- cgit v1.1 From a1f8213b9518d0e9124a48a34bdd58b4bc2650e5 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Sat, 27 Jun 2009 15:05:30 +0200 Subject: score: add address space annotations Annotate the address space for pointers that are used correctly with __user and __iomem, so that sparse can better warn about incorrect casts. Signed-off-by: Arnd Bergmann --- arch/score/include/asm/uaccess.h | 4 ++-- arch/score/kernel/ptrace.c | 7 ++++--- arch/score/kernel/signal.c | 10 +++++----- arch/score/kernel/sys_score.c | 9 +++++---- 4 files changed, 16 insertions(+), 14 deletions(-) (limited to 'arch') diff --git a/arch/score/include/asm/uaccess.h b/arch/score/include/asm/uaccess.h index 43ce28a1d..6f09e2b 100644 --- a/arch/score/include/asm/uaccess.h +++ b/arch/score/include/asm/uaccess.h @@ -13,9 +13,9 @@ extern int fixup_exception(struct pt_regs *regs); #ifndef __ASSEMBLY__ #define __range_ok(addr, size) \ - ((((unsigned long)(addr) >= 0x80000000) \ + ((((unsigned long __force)(addr) >= 0x80000000) \ || ((unsigned long)(size) > 0x80000000) \ - || (((unsigned long)(addr) + (unsigned long)(size)) > 0x80000000))) + || (((unsigned long __force)(addr) + (unsigned long)(size)) > 0x80000000))) #define __access_ok(addr, size) \ (__range_ok((addr), (size)) == 0) diff --git a/arch/score/kernel/ptrace.c b/arch/score/kernel/ptrace.c index 19911e3..1db876b 100644 --- a/arch/score/kernel/ptrace.c +++ b/arch/score/kernel/ptrace.c @@ -267,6 +267,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) { int ret; + unsigned long __user *datap = (void __user *)data; switch (request) { /* Read the word at location addr in the USER area. */ @@ -316,7 +317,7 @@ arch_ptrace(struct task_struct *child, long request, long addr, long data) return -EIO; } - ret = put_user(tmp, (unsigned long *) data); + ret = put_user(tmp, (unsigned int __user *) datap); return ret; } @@ -355,11 +356,11 @@ arch_ptrace(struct task_struct *child, long request, long addr, long data) } case PTRACE_GETREGS: - ret = ptrace_getregs(child, (void __user *)data); + ret = ptrace_getregs(child, (void __user *)datap); break; case PTRACE_SETREGS: - ret = ptrace_setregs(child, (void __user *)data); + ret = ptrace_setregs(child, (void __user *)datap); break; default: diff --git a/arch/score/kernel/signal.c b/arch/score/kernel/signal.c index 252f6d9..1634aaa 100644 --- a/arch/score/kernel/signal.c +++ b/arch/score/kernel/signal.c @@ -131,13 +131,13 @@ void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, if ((ka->sa.sa_flags & SA_ONSTACK) && (!on_sig_stack(sp))) sp = current->sas_ss_sp + current->sas_ss_size; - return (void *)((sp - frame_size) & ~7); + return (void __user*)((sp - frame_size) & ~7); } int score_sigaltstack(struct pt_regs *regs) { - const stack_t *uss = (const stack_t *) regs->regs[4]; - stack_t *uoss = (stack_t *) regs->regs[5]; + const stack_t __user *uss = (const stack_t __user *) regs->regs[4]; + stack_t __user *uoss = (stack_t __user *) regs->regs[5]; unsigned long usp = regs->regs[0]; return do_sigaltstack(uss, uoss, usp); @@ -188,7 +188,7 @@ badframe: int setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs, int signr, sigset_t *set, siginfo_t *info) { - struct rt_sigframe *frame; + struct rt_sigframe __user *frame; int err = 0; frame = get_sigframe(ka, regs, sizeof(*frame)); @@ -209,7 +209,7 @@ int setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs, err |= copy_siginfo_to_user(&frame->rs_info, info); err |= __put_user(0, &frame->rs_uc.uc_flags); err |= __put_user(0, &frame->rs_uc.uc_link); - err |= __put_user((void *)current->sas_ss_sp, + err |= __put_user((void __user *)current->sas_ss_sp, &frame->rs_uc.uc_stack.ss_sp); err |= __put_user(sas_ss_flags(regs->regs[0]), &frame->rs_uc.uc_stack.ss_flags); diff --git a/arch/score/kernel/sys_score.c b/arch/score/kernel/sys_score.c index eb4d2d3..16ace29 100644 --- a/arch/score/kernel/sys_score.c +++ b/arch/score/kernel/sys_score.c @@ -87,18 +87,19 @@ int score_clone(struct pt_regs *regs) * sys_execve() executes a new program. * This is called indirectly via a small wrapper */ -int score_execve(struct pt_regs *regs) +asmlinkage long +score_execve(struct pt_regs *regs) { int error; char *filename; - filename = getname((char *) (long) regs->regs[4]); + filename = getname((char __user*)regs->regs[4]); error = PTR_ERR(filename); if (IS_ERR(filename)) return error; - error = do_execve(filename, (char **) (long) regs->regs[5], - (char **) (long) regs->regs[6], regs); + error = do_execve(filename, (char __user *__user*)regs->regs[5], + (char __user *__user *) regs->regs[6], regs); putname(filename); return error; -- cgit v1.1 From bddc605955bca2d914ca621a7ef4ca6c271f55d8 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Sat, 27 Jun 2009 15:12:16 +0200 Subject: score: fix function prototypes Syscalls should return 'long' and be marked as 'asmlinkage'. Functions that are only used in a single file should be 'static'. Signed-off-by: Arnd Bergmann --- arch/score/kernel/signal.c | 22 +++++++++++++--------- arch/score/kernel/sys_score.c | 5 +++-- arch/score/kernel/time.c | 2 +- 3 files changed, 17 insertions(+), 12 deletions(-) (limited to 'arch') diff --git a/arch/score/kernel/signal.c b/arch/score/kernel/signal.c index 1634aaa..afbfe33 100644 --- a/arch/score/kernel/signal.c +++ b/arch/score/kernel/signal.c @@ -42,7 +42,7 @@ struct rt_sigframe { struct ucontext rs_uc; }; -int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) +static int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) { int err = 0; unsigned long reg; @@ -76,7 +76,7 @@ int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) return err; } -int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) +static int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) { int err = 0; u32 reg; @@ -118,8 +118,8 @@ int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) /* * Determine which stack to use.. */ -void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, - size_t frame_size) +static void __user *get_sigframe(struct k_sigaction *ka, + struct pt_regs *regs, size_t frame_size) { unsigned long sp; @@ -134,7 +134,8 @@ void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, return (void __user*)((sp - frame_size) & ~7); } -int score_sigaltstack(struct pt_regs *regs) +asmlinkage long +score_sigaltstack(struct pt_regs *regs) { const stack_t __user *uss = (const stack_t __user *) regs->regs[4]; stack_t __user *uoss = (stack_t __user *) regs->regs[5]; @@ -143,7 +144,8 @@ int score_sigaltstack(struct pt_regs *regs) return do_sigaltstack(uss, uoss, usp); } -void score_rt_sigreturn(struct pt_regs *regs) +asmlinkage long +score_rt_sigreturn(struct pt_regs *regs) { struct rt_sigframe __user *frame; sigset_t set; @@ -183,9 +185,11 @@ void score_rt_sigreturn(struct pt_regs *regs) badframe: force_sig(SIGSEGV, current); + + return 0; } -int setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs, +static int setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs, int signr, sigset_t *set, siginfo_t *info) { struct rt_sigframe __user *frame; @@ -238,7 +242,7 @@ give_sigsegv: return -EFAULT; } -int handle_signal(unsigned long sig, siginfo_t *info, +static int handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, sigset_t *oldset, struct pt_regs *regs) { int ret; @@ -278,7 +282,7 @@ int handle_signal(unsigned long sig, siginfo_t *info, return ret; } -void do_signal(struct pt_regs *regs) +static void do_signal(struct pt_regs *regs) { struct k_sigaction ka; sigset_t *oldset; diff --git a/arch/score/kernel/sys_score.c b/arch/score/kernel/sys_score.c index 16ace29..5b3cc4e 100644 --- a/arch/score/kernel/sys_score.c +++ b/arch/score/kernel/sys_score.c @@ -34,7 +34,7 @@ unsigned long shm_align_mask = PAGE_SIZE - 1; EXPORT_SYMBOL(shm_align_mask); -asmlinkage unsigned long +asmlinkage unsigned sys_mmap2(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long fd, unsigned long pgoff) { @@ -66,7 +66,8 @@ sys_mmap2(unsigned long addr, unsigned long len, unsigned long prot, * Clone a task - this clones the calling program thread. * This is called indirectly via a small wrapper */ -int score_clone(struct pt_regs *regs) +asmlinkage long +score_clone(struct pt_regs *regs) { unsigned long clone_flags; unsigned long newsp; diff --git a/arch/score/kernel/time.c b/arch/score/kernel/time.c index cd66ba3..f0a43aff 100644 --- a/arch/score/kernel/time.c +++ b/arch/score/kernel/time.c @@ -28,7 +28,7 @@ #include -irqreturn_t timer_interrupt(int irq, void *dev_id) +static irqreturn_t timer_interrupt(int irq, void *dev_id) { struct clock_event_device *evdev = dev_id; -- cgit v1.1 From c6067472252c1d6155c7c01c93e0d580342cdb29 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Sat, 27 Jun 2009 14:46:35 +0200 Subject: score: cleanups: dead code, 0 as pointer, shadowed variables A few smaller issues found by sparse, some code that was never used, two instances of '0' instead of 'NULL' and local variables shadowing another one. Signed-off-by: Arnd Bergmann --- arch/score/kernel/module.c | 12 ++++++------ arch/score/kernel/signal.c | 2 +- arch/score/kernel/sys_score.c | 14 +------------- 3 files changed, 8 insertions(+), 20 deletions(-) (limited to 'arch') diff --git a/arch/score/kernel/module.c b/arch/score/kernel/module.c index 1a62557..4de8d47 100644 --- a/arch/score/kernel/module.c +++ b/arch/score/kernel/module.c @@ -57,17 +57,17 @@ int apply_relocate(Elf_Shdr *sechdrs, const char *strtab, for (i = 0; i < relsec->sh_size / sizeof(Elf32_Rel); i++, rel++) { unsigned long loc; Elf32_Sym *sym; - s32 offset; + s32 r_offset; - offset = ELF32_R_SYM(rel->r_info); - if ((offset < 0) || - (offset > (symsec->sh_size / sizeof(Elf32_Sym)))) { + r_offset = ELF32_R_SYM(rel->r_info); + if ((r_offset < 0) || + (r_offset > (symsec->sh_size / sizeof(Elf32_Sym)))) { printk(KERN_ERR "%s: bad relocation, section %d reloc %d\n", me->name, relindex, i); return -ENOEXEC; } - sym = ((Elf32_Sym *)symsec->sh_addr) + offset; + sym = ((Elf32_Sym *)symsec->sh_addr) + r_offset; if ((rel->r_offset < 0) || (rel->r_offset > dstsec->sh_size - sizeof(u32))) { @@ -152,7 +152,7 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, /* Given an address, look for it in the module exception tables. */ const struct exception_table_entry *search_module_dbetables(unsigned long addr) { - return 0; + return NULL; } /* Put in dbe list if necessary. */ diff --git a/arch/score/kernel/signal.c b/arch/score/kernel/signal.c index afbfe33..950f87c 100644 --- a/arch/score/kernel/signal.c +++ b/arch/score/kernel/signal.c @@ -212,7 +212,7 @@ static int setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs, err |= copy_siginfo_to_user(&frame->rs_info, info); err |= __put_user(0, &frame->rs_uc.uc_flags); - err |= __put_user(0, &frame->rs_uc.uc_link); + err |= __put_user(NULL, &frame->rs_uc.uc_link); err |= __put_user((void __user *)current->sas_ss_sp, &frame->rs_uc.uc_stack.ss_sp); err |= __put_user(sas_ss_flags(regs->regs[0]), diff --git a/arch/score/kernel/sys_score.c b/arch/score/kernel/sys_score.c index 5b3cc4e..61aff8a 100644 --- a/arch/score/kernel/sys_score.c +++ b/arch/score/kernel/sys_score.c @@ -31,10 +31,7 @@ #include #include -unsigned long shm_align_mask = PAGE_SIZE - 1; -EXPORT_SYMBOL(shm_align_mask); - -asmlinkage unsigned +asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long fd, unsigned long pgoff) { @@ -107,15 +104,6 @@ score_execve(struct pt_regs *regs) } /* - * If we ever come here the user sp is bad. Zap the process right away. - * Due to the bad stack signaling wouldn't work. - */ -void bad_stack(void) -{ - do_exit(SIGSEGV); -} - -/* * Do a system call from kernel instead of calling sys_execve so we * end up with proper pt_regs. */ -- cgit v1.1 From 9b05706a744da939655525eeeae23f1989b434ce Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Sat, 27 Jun 2009 15:22:00 +0200 Subject: score: make irq.h definitions local Some internal definitions of the interrupt controller are only needed in irq.c, so move them out of the global irq.h header. Also add proper __iomem annotations for sparse. Signed-off-by: Arnd Bergmann --- arch/score/include/asm/irq.h | 10 ---------- arch/score/kernel/irq.c | 33 +++++++++++++++++++++++---------- 2 files changed, 23 insertions(+), 20 deletions(-) (limited to 'arch') diff --git a/arch/score/include/asm/irq.h b/arch/score/include/asm/irq.h index 401f670..6edd2a2 100644 --- a/arch/score/include/asm/irq.h +++ b/arch/score/include/asm/irq.h @@ -18,16 +18,6 @@ #define irq_canonicalize(irq) (irq) -#define P_INT_PNDL 0x95F50000 -#define P_INT_PNDH 0x95F50004 -#define P_INT_PRIORITY_M 0x95F50008 -#define P_INT_PRIORITY_SG0 0x95F50010 -#define P_INT_PRIORITY_SG1 0x95F50014 -#define P_INT_PRIORITY_SG2 0x95F50018 -#define P_INT_PRIORITY_SG3 0x95F5001C -#define P_INT_MASKL 0x95F50020 -#define P_INT_MASKH 0x95F50024 - #define IRQ_TIMER (7) /* Timer IRQ number of SPCT6600 */ #endif /* _ASM_SCORE_IRQ_H */ diff --git a/arch/score/kernel/irq.c b/arch/score/kernel/irq.c index 55474e8..47647dd 100644 --- a/arch/score/kernel/irq.c +++ b/arch/score/kernel/irq.c @@ -29,6 +29,19 @@ #include +/* the interrupt controller is hardcoded at this address */ +#define SCORE_PIC ((u32 __iomem __force *)0x95F50000) + +#define INT_PNDL 0 +#define INT_PNDH 1 +#define INT_PRIORITY_M 2 +#define INT_PRIORITY_SG0 4 +#define INT_PRIORITY_SG1 5 +#define INT_PRIORITY_SG2 6 +#define INT_PRIORITY_SG3 7 +#define INT_MASKL 8 +#define INT_MASKH 9 + /* * handles all normal device IRQs */ @@ -44,11 +57,11 @@ static void score_mask(unsigned int irq_nr) unsigned int irq_source = 63 - irq_nr; if (irq_source < 32) - __raw_writel((__raw_readl((void *)P_INT_MASKL) | \ - (1 << irq_source)), (void *)P_INT_MASKL); + __raw_writel((__raw_readl(SCORE_PIC + INT_MASKL) | \ + (1 << irq_source)), SCORE_PIC + INT_MASKL); else - __raw_writel((__raw_readl((void *)P_INT_MASKH) | \ - (1 << (irq_source - 32))), (void *)P_INT_MASKH); + __raw_writel((__raw_readl(SCORE_PIC + INT_MASKH) | \ + (1 << (irq_source - 32))), SCORE_PIC + INT_MASKH); } static void score_unmask(unsigned int irq_nr) @@ -56,11 +69,11 @@ static void score_unmask(unsigned int irq_nr) unsigned int irq_source = 63 - irq_nr; if (irq_source < 32) - __raw_writel((__raw_readl((void *)P_INT_MASKL) & \ - ~(1 << irq_source)), (void *)P_INT_MASKL); + __raw_writel((__raw_readl(SCORE_PIC + INT_MASKL) & \ + ~(1 << irq_source)), SCORE_PIC + INT_MASKL); else - __raw_writel((__raw_readl((void *)P_INT_MASKH) & \ - ~(1 << (irq_source - 32))), (void *)P_INT_MASKH); + __raw_writel((__raw_readl(SCORE_PIC + INT_MASKH) & \ + ~(1 << (irq_source - 32))), SCORE_PIC + INT_MASKH); } struct irq_chip score_irq_chip = { @@ -88,8 +101,8 @@ void __init init_IRQ(void) memcpy((void *)target_addr, \ interrupt_exception_vector, IRQ_VECTOR_SIZE); - __raw_writel(0xffffffff, (void *)P_INT_MASKL); - __raw_writel(0xffffffff, (void *)P_INT_MASKH); + __raw_writel(0xffffffff, SCORE_PIC + INT_MASKL); + __raw_writel(0xffffffff, SCORE_PIC + INT_MASKH); __asm__ __volatile__( "mtcr %0, cr3\n\t" -- cgit v1.1 From fbd85b0e26bab0a13dcf860f2c20e86cb0507b61 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Sat, 27 Jun 2009 15:58:13 +0200 Subject: score: clean up mm/init.c score does not need multiple zero pages, because it does not suffer from cache aliasing problems, so simplify that code. Also make some functions static and include the appropriate header files. Signed-off-by: Arnd Bergmann --- arch/score/mm/init.c | 34 ++++++++++------------------------ 1 file changed, 10 insertions(+), 24 deletions(-) (limited to 'arch') diff --git a/arch/score/mm/init.c b/arch/score/mm/init.c index 7780eec..d496e9f 100644 --- a/arch/score/mm/init.c +++ b/arch/score/mm/init.c @@ -32,44 +32,30 @@ #include #include #include -#include +#include +#include #include DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); -/* - * We have up to 8 empty zeroed pages so we can map one of the right colour - * when needed. - */ -unsigned long zero_page_mask; unsigned long empty_zero_page; EXPORT_SYMBOL_GPL(empty_zero_page); static struct kcore_list kcore_mem, kcore_vmalloc; -unsigned long setup_zero_pages(void) +static unsigned long setup_zero_page(void) { - unsigned int order = 0; - unsigned long size; struct page *page; - empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order); + empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, 0); if (!empty_zero_page) panic("Oh boy, that early out of memory?"); page = virt_to_page((void *) empty_zero_page); - split_page(page, order); - while (page < virt_to_page((void *) (empty_zero_page + - (PAGE_SIZE << order)))) { - SetPageReserved(page); - page++; - } - - size = PAGE_SIZE << order; - zero_page_mask = (size - 1) & PAGE_MASK; + SetPageReserved(page); - return 1UL << order; + return 1UL; } #ifndef CONFIG_NEED_MULTIPLE_NODES @@ -100,7 +86,7 @@ void __init mem_init(void) max_mapnr = max_low_pfn; high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); totalram_pages += free_all_bootmem(); - totalram_pages -= setup_zero_pages(); /* Setup zeroed pages. */ + totalram_pages -= setup_zero_page(); /* Setup zeroed pages. */ reservedpages = 0; for (tmp = 0; tmp < max_low_pfn; tmp++) @@ -129,7 +115,7 @@ void __init mem_init(void) } #endif /* !CONFIG_NEED_MULTIPLE_NODES */ -void free_init_pages(const char *what, unsigned long begin, unsigned long end) +static void free_init_pages(const char *what, unsigned long begin, unsigned long end) { unsigned long pfn; @@ -150,8 +136,8 @@ void free_init_pages(const char *what, unsigned long begin, unsigned long end) void free_initrd_mem(unsigned long start, unsigned long end) { free_init_pages("initrd memory", - virt_to_phys((void *) start), - virt_to_phys((void *) end)); + virt_to_phys((void *) start), + virt_to_phys((void *) end)); } #endif -- cgit v1.1 From a4a874a906ae69c35df4b712fadbc35b15665355 Mon Sep 17 00:00:00 2001 From: Huang Weiyi Date: Thu, 18 Jun 2009 07:05:46 +0800 Subject: kmemcheck: remove duplicated #include Remove duplicated #include in arch/x86/mm/kmemcheck/shadow.c. Signed-off-by: Huang Weiyi Acked-by: Pekka Enberg Signed-off-by: Vegard Nossum --- arch/x86/mm/kmemcheck/shadow.c | 1 - 1 file changed, 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/mm/kmemcheck/shadow.c b/arch/x86/mm/kmemcheck/shadow.c index e773b6b..3f66b82 100644 --- a/arch/x86/mm/kmemcheck/shadow.c +++ b/arch/x86/mm/kmemcheck/shadow.c @@ -1,7 +1,6 @@ #include #include #include -#include #include #include -- cgit v1.1 From 414f3251aa1b4cbd1e070866971eabc004a7dc20 Mon Sep 17 00:00:00 2001 From: Vegard Nossum Date: Mon, 22 Jun 2009 14:31:53 +0200 Subject: kmemcheck: remove useless check This check is a left-over from ancient times. We now have the equivalent check much earlier in both the page fault handler and the debug trap handler (the calls to kmemcheck_active()). Signed-off-by: Vegard Nossum --- arch/x86/mm/kmemcheck/kmemcheck.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c index 2c55ed0..5b99004 100644 --- a/arch/x86/mm/kmemcheck/kmemcheck.c +++ b/arch/x86/mm/kmemcheck/kmemcheck.c @@ -225,9 +225,6 @@ void kmemcheck_hide(struct pt_regs *regs) BUG_ON(!irqs_disabled()); - if (data->balance == 0) - return; - if (unlikely(data->balance != 1)) { kmemcheck_show_all(); kmemcheck_error_save_bug(regs); -- cgit v1.1 From c601a51af10f714292f42eab45fa8c9154dc1414 Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Fri, 3 Jul 2009 16:16:54 +0900 Subject: sh: Use bootmem ontop of lmb Rework the bootmem allocator to use the lmb framework. Signed-off-by: Matt Fleming Signed-off-by: Paul Mundt --- arch/sh/Kconfig | 1 + arch/sh/include/asm/lmb.h | 6 ++++ arch/sh/kernel/setup.c | 71 ++++++++++++++++++++++++++++++++++------------- 3 files changed, 59 insertions(+), 19 deletions(-) create mode 100644 arch/sh/include/asm/lmb.h (limited to 'arch') diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index e2bdd7b..0fb99b0 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -10,6 +10,7 @@ config SUPERH select EMBEDDED select HAVE_CLK select HAVE_IDE + select HAVE_LMB select HAVE_OPROFILE select HAVE_GENERIC_DMA_COHERENT select HAVE_IOREMAP_PROT if MMU diff --git a/arch/sh/include/asm/lmb.h b/arch/sh/include/asm/lmb.h new file mode 100644 index 0000000..9b437f6 --- /dev/null +++ b/arch/sh/include/asm/lmb.h @@ -0,0 +1,6 @@ +#ifndef __ASM_SH_LMB_H +#define __ASM_SH_LMB_H + +#define LMB_REAL_LIMIT 0 + +#endif /* __ASM_SH_LMB_H */ diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c index dd38338..ceb409b 100644 --- a/arch/sh/kernel/setup.c +++ b/arch/sh/kernel/setup.c @@ -30,6 +30,7 @@ #include #include #include +#include #include #include #include @@ -233,39 +234,45 @@ void __init __add_active_range(unsigned int nid, unsigned long start_pfn, void __init setup_bootmem_allocator(unsigned long free_pfn) { unsigned long bootmap_size; + unsigned long bootmap_pages, bootmem_paddr; + u64 total_pages = (lmb_end_of_DRAM() - __MEMORY_START) >> PAGE_SHIFT; + int i; + + bootmap_pages = bootmem_bootmap_pages(total_pages); + + bootmem_paddr = lmb_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE); /* * Find a proper area for the bootmem bitmap. After this * bootstrap step all allocations (until the page allocator * is intact) must be done via bootmem_alloc(). */ - bootmap_size = init_bootmem_node(NODE_DATA(0), free_pfn, + bootmap_size = init_bootmem_node(NODE_DATA(0), + bootmem_paddr >> PAGE_SHIFT, min_low_pfn, max_low_pfn); - __add_active_range(0, min_low_pfn, max_low_pfn); - register_bootmem_low_pages(); - - node_set_online(0); + /* Add active regions with valid PFNs. */ + for (i = 0; i < lmb.memory.cnt; i++) { + unsigned long start_pfn, end_pfn; + start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT; + end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i); + __add_active_range(0, start_pfn, end_pfn); + } /* - * Reserve the kernel text and - * Reserve the bootmem bitmap. We do this in two steps (first step - * was init_bootmem()), because this catches the (definitely buggy) - * case of us accidentally initializing the bootmem allocator with - * an invalid RAM area. + * Add all physical memory to the bootmem map and mark each + * area as present. */ - reserve_bootmem(__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET, - (PFN_PHYS(free_pfn) + bootmap_size + PAGE_SIZE - 1) - - (__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET), - BOOTMEM_DEFAULT); + register_bootmem_low_pages(); - /* - * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET. - */ - if (CONFIG_ZERO_PAGE_OFFSET != 0) - reserve_bootmem(__MEMORY_START, CONFIG_ZERO_PAGE_OFFSET, + /* Reserve the sections we're already using. */ + for (i = 0; i < lmb.reserved.cnt; i++) + reserve_bootmem(lmb.reserved.region[i].base, + lmb_size_bytes(&lmb.reserved, i), BOOTMEM_DEFAULT); + node_set_online(0); + sparse_memory_present_with_active_regions(0); #ifdef CONFIG_BLK_DEV_INITRD @@ -296,12 +303,37 @@ void __init setup_bootmem_allocator(unsigned long free_pfn) static void __init setup_memory(void) { unsigned long start_pfn; + u64 base = min_low_pfn << PAGE_SHIFT; + u64 size = (max_low_pfn << PAGE_SHIFT) - base; /* * Partially used pages are not usable - thus * we are rounding upwards: */ start_pfn = PFN_UP(__pa(_end)); + + lmb_add(base, size); + + /* + * Reserve the kernel text and + * Reserve the bootmem bitmap. We do this in two steps (first step + * was init_bootmem()), because this catches the (definitely buggy) + * case of us accidentally initializing the bootmem allocator with + * an invalid RAM area. + */ + lmb_reserve(__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET, + (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) - + (__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET)); + + /* + * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET. + */ + if (CONFIG_ZERO_PAGE_OFFSET != 0) + lmb_reserve(__MEMORY_START, CONFIG_ZERO_PAGE_OFFSET); + + lmb_analyze(); + lmb_dump_all(); + setup_bootmem_allocator(start_pfn); } #else @@ -402,6 +434,7 @@ void __init setup_arch(char **cmdline_p) nodes_clear(node_online_map); /* Setup bootmem with available RAM */ + lmb_init(); setup_memory(); sparse_init(); -- cgit v1.1 From 788e5abc5441e9046dd91c995c6f1f75bbd144bf Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Sat, 4 Jul 2009 08:10:58 +0900 Subject: percpu: drop @unit_size from embed first chunk allocator The only extra feature @unit_size provides is making dead space at the end of the first chunk which doesn't have any valid usecase. Drop the parameter. This will increase consistency with generalized 4k allocator. James Bottomley spotted missing conversion for the default setup_per_cpu_areas() which caused build breakage on all arcsh which use it. [ Impact: drop unused code path ] Signed-off-by: Tejun Heo Cc: James Bottomley Cc: Ingo Molnar --- arch/x86/kernel/setup_percpu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index 29a3eef..1472820 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -342,7 +342,7 @@ static ssize_t __init setup_pcpu_embed(size_t static_size, bool chosen) return -EINVAL; return pcpu_embed_first_chunk(static_size, PERCPU_FIRST_CHUNK_RESERVE, - reserve - PERCPU_FIRST_CHUNK_RESERVE, -1); + reserve - PERCPU_FIRST_CHUNK_RESERVE); } /* -- cgit v1.1 From d4b95f80399471e4bce5e992700ff7f06ef91f6a Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Sat, 4 Jul 2009 08:10:59 +0900 Subject: x86,percpu: generalize 4k first chunk allocator Generalize and move x86 setup_pcpu_4k() into pcpu_4k_first_chunk(). setup_pcpu_4k() now is a simple wrapper around the generalized version. Other than taking size parameters and using arch supplied callbacks to allocate/free memory, pcpu_4k_first_chunk() is identical to the original implementation. This simplifies arch code and will help converting more archs to dynamic percpu allocator. While at it, s/pcpu_populate_pte_fn_t/pcpu_fc_populate_pte_fn_t/ for consistency. [ Impact: code reorganization and generalization ] Signed-off-by: Tejun Heo Cc: Ingo Molnar --- arch/x86/kernel/setup_percpu.c | 78 ++++++++++-------------------------------- 1 file changed, 19 insertions(+), 59 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index 1472820..ab896b3 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -124,6 +124,19 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size, } /* + * Helpers for first chunk memory allocation + */ +static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size) +{ + return pcpu_alloc_bootmem(cpu, size, size); +} + +static void __init pcpu_fc_free(void *ptr, size_t size) +{ + free_bootmem(__pa(ptr), size); +} + +/* * Large page remap allocator * * This allocator uses PMD page as unit. A PMD page is allocated for @@ -346,22 +359,11 @@ static ssize_t __init setup_pcpu_embed(size_t static_size, bool chosen) } /* - * 4k page allocator + * 4k allocator * - * This is the basic allocator. Static percpu area is allocated - * page-by-page and most of initialization is done by the generic - * setup function. + * Boring fallback 4k allocator. This allocator puts more pressure on + * PTE TLBs but other than that behaves nicely on both UMA and NUMA. */ -static struct page **pcpu4k_pages __initdata; -static int pcpu4k_nr_static_pages __initdata; - -static struct page * __init pcpu4k_get_page(unsigned int cpu, int pageno) -{ - if (pageno < pcpu4k_nr_static_pages) - return pcpu4k_pages[cpu * pcpu4k_nr_static_pages + pageno]; - return NULL; -} - static void __init pcpu4k_populate_pte(unsigned long addr) { populate_extra_pte(addr); @@ -369,51 +371,9 @@ static void __init pcpu4k_populate_pte(unsigned long addr) static ssize_t __init setup_pcpu_4k(size_t static_size) { - size_t pages_size; - unsigned int cpu; - int i, j; - ssize_t ret; - - pcpu4k_nr_static_pages = PFN_UP(static_size); - - /* unaligned allocations can't be freed, round up to page size */ - pages_size = PFN_ALIGN(pcpu4k_nr_static_pages * num_possible_cpus() - * sizeof(pcpu4k_pages[0])); - pcpu4k_pages = alloc_bootmem(pages_size); - - /* allocate and copy */ - j = 0; - for_each_possible_cpu(cpu) - for (i = 0; i < pcpu4k_nr_static_pages; i++) { - void *ptr; - - ptr = pcpu_alloc_bootmem(cpu, PAGE_SIZE, PAGE_SIZE); - if (!ptr) { - pr_warning("PERCPU: failed to allocate " - "4k page for cpu%u\n", cpu); - goto enomem; - } - - memcpy(ptr, __per_cpu_load + i * PAGE_SIZE, PAGE_SIZE); - pcpu4k_pages[j++] = virt_to_page(ptr); - } - - /* we're ready, commit */ - pr_info("PERCPU: Allocated %d 4k pages, static data %zu bytes\n", - pcpu4k_nr_static_pages, static_size); - - ret = pcpu_setup_first_chunk(pcpu4k_get_page, static_size, - PERCPU_FIRST_CHUNK_RESERVE, -1, - -1, NULL, pcpu4k_populate_pte); - goto out_free_ar; - -enomem: - while (--j >= 0) - free_bootmem(__pa(page_address(pcpu4k_pages[j])), PAGE_SIZE); - ret = -ENOMEM; -out_free_ar: - free_bootmem(__pa(pcpu4k_pages), pages_size); - return ret; + return pcpu_4k_first_chunk(static_size, PERCPU_FIRST_CHUNK_RESERVE, + pcpu_fc_alloc, pcpu_fc_free, + pcpu4k_populate_pte); } /* for explicit first chunk allocator selection */ -- cgit v1.1 From 8c4bfc6e8801616ab2e01c38140b2159b388d2ff Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Sat, 4 Jul 2009 08:10:59 +0900 Subject: x86,percpu: generalize lpage first chunk allocator Generalize and move x86 setup_pcpu_lpage() into pcpu_lpage_first_chunk(). setup_pcpu_lpage() now is a simple wrapper around the generalized version. Other than taking size parameters and using arch supplied callbacks to allocate/free/map memory, pcpu_lpage_first_chunk() is identical to the original implementation. This simplifies arch code and will help converting more archs to dynamic percpu allocator. While at it, factor out pcpu_calc_fc_sizes() which is common to pcpu_embed_first_chunk() and pcpu_lpage_first_chunk(). [ Impact: code reorganization and generalization ] Signed-off-by: Tejun Heo Cc: Ingo Molnar --- arch/x86/include/asm/percpu.h | 9 --- arch/x86/kernel/setup_percpu.c | 169 +++-------------------------------------- arch/x86/mm/pageattr.c | 1 + 3 files changed, 12 insertions(+), 167 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index 103f1dd..a18c038 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -156,15 +156,6 @@ do { \ /* We can use this directly for local CPU (faster). */ DECLARE_PER_CPU(unsigned long, this_cpu_off); -#ifdef CONFIG_NEED_MULTIPLE_NODES -void *pcpu_lpage_remapped(void *kaddr); -#else -static inline void *pcpu_lpage_remapped(void *kaddr) -{ - return NULL; -} -#endif - #endif /* !__ASSEMBLY__ */ #ifdef CONFIG_SMP diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index ab896b3..4f2e0ac 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -137,44 +137,21 @@ static void __init pcpu_fc_free(void *ptr, size_t size) } /* - * Large page remap allocator - * - * This allocator uses PMD page as unit. A PMD page is allocated for - * each cpu and each is remapped into vmalloc area using PMD mapping. - * As PMD page is quite large, only part of it is used for the first - * chunk. Unused part is returned to the bootmem allocator. - * - * So, the PMD pages are mapped twice - once to the physical mapping - * and to the vmalloc area for the first percpu chunk. The double - * mapping does add one more PMD TLB entry pressure but still is much - * better than only using 4k mappings while still being NUMA friendly. + * Large page remapping allocator */ #ifdef CONFIG_NEED_MULTIPLE_NODES -struct pcpul_ent { - unsigned int cpu; - void *ptr; -}; - -static size_t pcpul_size; -static struct pcpul_ent *pcpul_map; -static struct vm_struct pcpul_vm; - -static struct page * __init pcpul_get_page(unsigned int cpu, int pageno) +static void __init pcpul_map(void *ptr, size_t size, void *addr) { - size_t off = (size_t)pageno << PAGE_SHIFT; + pmd_t *pmd, pmd_v; - if (off >= pcpul_size) - return NULL; - - return virt_to_page(pcpul_map[cpu].ptr + off); + pmd = populate_extra_pmd((unsigned long)addr); + pmd_v = pfn_pmd(page_to_pfn(virt_to_page(ptr)), PAGE_KERNEL_LARGE); + set_pmd(pmd, pmd_v); } static ssize_t __init setup_pcpu_lpage(size_t static_size, bool chosen) { - size_t map_size, dyn_size; - unsigned int cpu; - int i, j; - ssize_t ret; + size_t reserve = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE; if (!chosen) { size_t vm_size = VMALLOC_END - VMALLOC_START; @@ -198,134 +175,10 @@ static ssize_t __init setup_pcpu_lpage(size_t static_size, bool chosen) return -EINVAL; } - /* - * Currently supports only single page. Supporting multiple - * pages won't be too difficult if it ever becomes necessary. - */ - pcpul_size = PFN_ALIGN(static_size + PERCPU_MODULE_RESERVE + - PERCPU_DYNAMIC_RESERVE); - if (pcpul_size > PMD_SIZE) { - pr_warning("PERCPU: static data is larger than large page, " - "can't use large page\n"); - return -EINVAL; - } - dyn_size = pcpul_size - static_size - PERCPU_FIRST_CHUNK_RESERVE; - - /* allocate pointer array and alloc large pages */ - map_size = PFN_ALIGN(num_possible_cpus() * sizeof(pcpul_map[0])); - pcpul_map = alloc_bootmem(map_size); - - for_each_possible_cpu(cpu) { - pcpul_map[cpu].cpu = cpu; - pcpul_map[cpu].ptr = pcpu_alloc_bootmem(cpu, PMD_SIZE, - PMD_SIZE); - if (!pcpul_map[cpu].ptr) { - pr_warning("PERCPU: failed to allocate large page " - "for cpu%u\n", cpu); - goto enomem; - } - - /* - * Only use pcpul_size bytes and give back the rest. - * - * Ingo: The 2MB up-rounding bootmem is needed to make - * sure the partial 2MB page is still fully RAM - it's - * not well-specified to have a PAT-incompatible area - * (unmapped RAM, device memory, etc.) in that hole. - */ - free_bootmem(__pa(pcpul_map[cpu].ptr + pcpul_size), - PMD_SIZE - pcpul_size); - - memcpy(pcpul_map[cpu].ptr, __per_cpu_load, static_size); - } - - /* allocate address and map */ - pcpul_vm.flags = VM_ALLOC; - pcpul_vm.size = num_possible_cpus() * PMD_SIZE; - vm_area_register_early(&pcpul_vm, PMD_SIZE); - - for_each_possible_cpu(cpu) { - pmd_t *pmd, pmd_v; - - pmd = populate_extra_pmd((unsigned long)pcpul_vm.addr + - cpu * PMD_SIZE); - pmd_v = pfn_pmd(page_to_pfn(virt_to_page(pcpul_map[cpu].ptr)), - PAGE_KERNEL_LARGE); - set_pmd(pmd, pmd_v); - } - - /* we're ready, commit */ - pr_info("PERCPU: Remapped at %p with large pages, static data " - "%zu bytes\n", pcpul_vm.addr, static_size); - - ret = pcpu_setup_first_chunk(pcpul_get_page, static_size, - PERCPU_FIRST_CHUNK_RESERVE, dyn_size, - PMD_SIZE, pcpul_vm.addr, NULL); - - /* sort pcpul_map array for pcpu_lpage_remapped() */ - for (i = 0; i < num_possible_cpus() - 1; i++) - for (j = i + 1; j < num_possible_cpus(); j++) - if (pcpul_map[i].ptr > pcpul_map[j].ptr) { - struct pcpul_ent tmp = pcpul_map[i]; - pcpul_map[i] = pcpul_map[j]; - pcpul_map[j] = tmp; - } - - return ret; - -enomem: - for_each_possible_cpu(cpu) - if (pcpul_map[cpu].ptr) - free_bootmem(__pa(pcpul_map[cpu].ptr), pcpul_size); - free_bootmem(__pa(pcpul_map), map_size); - return -ENOMEM; -} - -/** - * pcpu_lpage_remapped - determine whether a kaddr is in pcpul recycled area - * @kaddr: the kernel address in question - * - * Determine whether @kaddr falls in the pcpul recycled area. This is - * used by pageattr to detect VM aliases and break up the pcpu PMD - * mapping such that the same physical page is not mapped under - * different attributes. - * - * The recycled area is always at the tail of a partially used PMD - * page. - * - * RETURNS: - * Address of corresponding remapped pcpu address if match is found; - * otherwise, NULL. - */ -void *pcpu_lpage_remapped(void *kaddr) -{ - void *pmd_addr = (void *)((unsigned long)kaddr & PMD_MASK); - unsigned long offset = (unsigned long)kaddr & ~PMD_MASK; - int left = 0, right = num_possible_cpus() - 1; - int pos; - - /* pcpul in use at all? */ - if (!pcpul_map) - return NULL; - - /* okay, perform binary search */ - while (left <= right) { - pos = (left + right) / 2; - - if (pcpul_map[pos].ptr < pmd_addr) - left = pos + 1; - else if (pcpul_map[pos].ptr > pmd_addr) - right = pos - 1; - else { - /* it shouldn't be in the area for the first chunk */ - WARN_ON(offset < pcpul_size); - - return pcpul_vm.addr + - pcpul_map[pos].cpu * PMD_SIZE + offset; - } - } - - return NULL; + return pcpu_lpage_first_chunk(static_size, PERCPU_FIRST_CHUNK_RESERVE, + reserve - PERCPU_FIRST_CHUNK_RESERVE, + PMD_SIZE, + pcpu_fc_alloc, pcpu_fc_free, pcpul_map); } #else static ssize_t __init setup_pcpu_lpage(size_t static_size, bool chosen) diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 1b734d7..c106f78 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include -- cgit v1.1 From 38a6be525460f52ac6f2de1c3f73c5615a8853cd Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Sat, 4 Jul 2009 08:10:59 +0900 Subject: percpu: simplify pcpu_setup_first_chunk() Now that all first chunk allocator helpers allocate and map the first chunk themselves, there's no need to have optional default alloc/map in pcpu_setup_first_chunk(). Drop @populate_pte_fn and only leave @dyn_size optional and make all other params mandatory. This makes it much easier to follow what pcpu_setup_first_chunk() is doing and what actual differences tweaking each parameter results in. [ Impact: drop unused code path ] Signed-off-by: Tejun Heo Cc: Ingo Molnar --- arch/sparc/kernel/smp_64.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index fa44eaf..ccad7b2 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c @@ -1528,7 +1528,7 @@ void __init setup_per_cpu_areas(void) pcpu_unit_size = pcpu_setup_first_chunk(pcpur_get_page, static_size, PERCPU_MODULE_RESERVE, dyn_size, - PCPU_CHUNK_SIZE, vm.addr, NULL); + PCPU_CHUNK_SIZE, vm.addr); free_bootmem(__pa(pcpur_ptrs), ptrs_size); -- cgit v1.1 From ce3141a277ff6cc37e51008b8888dc2cb7456ef1 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Sat, 4 Jul 2009 08:11:00 +0900 Subject: percpu: drop pcpu_chunk->page[] percpu core doesn't need to tack all the allocated pages. It needs to know whether certain pages are populated and a way to reverse map address to page when freeing. This patch drops pcpu_chunk->page[] and use populated bitmap and vmalloc_to_page() lookup instead. Using vmalloc_to_page() exclusively is also possible but complicates first chunk handling, inflates cache footprint and prevents non-standard memory allocation for percpu memory. pcpu_chunk->page[] was used to track each page's allocation and allowed asymmetric population which happens during failure path; however, with single bitmap for all units, this is no longer possible. Bite the bullet and rewrite (de)populate functions so that things are done in clearly separated steps such that asymmetric population doesn't happen. This makes the (de)population process much more modular and will also ease implementing non-standard memory usage in the future (e.g. large pages). This makes @get_page_fn parameter to pcpu_setup_first_chunk() unnecessary. The parameter is dropped and all first chunk helpers are updated accordingly. Please note that despite the volume most changes to first chunk helpers are symbol renames for variables which don't need to be referenced outside of the helper anymore. This change reduces memory usage and cache footprint of pcpu_chunk. Now only #unit_pages bits are necessary per chunk. [ Impact: reduced memory usage and cache footprint for bookkeeping ] Signed-off-by: Tejun Heo Cc: Ingo Molnar Cc: David Miller --- arch/sparc/kernel/smp_64.c | 42 +++++++++++++++--------------------------- 1 file changed, 15 insertions(+), 27 deletions(-) (limited to 'arch') diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index ccad7b2..f2f22ee 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c @@ -1415,19 +1415,6 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size, #endif } -static size_t pcpur_size __initdata; -static void **pcpur_ptrs __initdata; - -static struct page * __init pcpur_get_page(unsigned int cpu, int pageno) -{ - size_t off = (size_t)pageno << PAGE_SHIFT; - - if (off >= pcpur_size) - return NULL; - - return virt_to_page(pcpur_ptrs[cpu] + off); -} - #define PCPU_CHUNK_SIZE (4UL * 1024UL * 1024UL) static void __init pcpu_map_range(unsigned long start, unsigned long end, @@ -1491,25 +1478,26 @@ void __init setup_per_cpu_areas(void) size_t dyn_size, static_size = __per_cpu_end - __per_cpu_start; static struct vm_struct vm; unsigned long delta, cpu; - size_t pcpu_unit_size; + size_t size_sum, pcpu_unit_size; size_t ptrs_size; + void **ptrs; - pcpur_size = PFN_ALIGN(static_size + PERCPU_MODULE_RESERVE + - PERCPU_DYNAMIC_RESERVE); - dyn_size = pcpur_size - static_size - PERCPU_MODULE_RESERVE; + size_sum = PFN_ALIGN(static_size + PERCPU_MODULE_RESERVE + + PERCPU_DYNAMIC_RESERVE); + dyn_size = size_sum - static_size - PERCPU_MODULE_RESERVE; - ptrs_size = PFN_ALIGN(num_possible_cpus() * sizeof(pcpur_ptrs[0])); - pcpur_ptrs = alloc_bootmem(ptrs_size); + ptrs_size = PFN_ALIGN(num_possible_cpus() * sizeof(ptrs[0])); + ptrs = alloc_bootmem(ptrs_size); for_each_possible_cpu(cpu) { - pcpur_ptrs[cpu] = pcpu_alloc_bootmem(cpu, PCPU_CHUNK_SIZE, - PCPU_CHUNK_SIZE); + ptrs[cpu] = pcpu_alloc_bootmem(cpu, PCPU_CHUNK_SIZE, + PCPU_CHUNK_SIZE); - free_bootmem(__pa(pcpur_ptrs[cpu] + pcpur_size), - PCPU_CHUNK_SIZE - pcpur_size); + free_bootmem(__pa(ptrs[cpu] + size_sum), + PCPU_CHUNK_SIZE - size_sum); - memcpy(pcpur_ptrs[cpu], __per_cpu_load, static_size); + memcpy(ptrs[cpu], __per_cpu_load, static_size); } /* allocate address and map */ @@ -1523,14 +1511,14 @@ void __init setup_per_cpu_areas(void) start += cpu * PCPU_CHUNK_SIZE; end = start + PCPU_CHUNK_SIZE; - pcpu_map_range(start, end, virt_to_page(pcpur_ptrs[cpu])); + pcpu_map_range(start, end, virt_to_page(ptrs[cpu])); } - pcpu_unit_size = pcpu_setup_first_chunk(pcpur_get_page, static_size, + pcpu_unit_size = pcpu_setup_first_chunk(static_size, PERCPU_MODULE_RESERVE, dyn_size, PCPU_CHUNK_SIZE, vm.addr); - free_bootmem(__pa(pcpur_ptrs), ptrs_size); + free_bootmem(__pa(ptrs), ptrs_size); delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; for_each_possible_cpu(cpu) { -- cgit v1.1 From 2f39e637ea240efb74cf807d31c93a71a0b89174 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Sat, 4 Jul 2009 08:11:00 +0900 Subject: percpu: allow non-linear / sparse cpu -> unit mapping Currently cpu and unit are always identity mapped. To allow more efficient large page support on NUMA and lazy allocation for possible but offline cpus, cpu -> unit mapping needs to be non-linear and/or sparse. This can be easily implemented by adding a cpu -> unit mapping array and using it whenever looking up the matching unit for a cpu. The only unusal conversion is in pcpu_chunk_addr_search(). The passed in address is unit0 based and unit0 might not be in use so it needs to be converted to address of an in-use unit. This is easily done by adding the unit offset for the current processor. [ Impact: allows non-linear/sparse cpu -> unit mapping, no visible change yet ] Signed-off-by: Tejun Heo Cc: Ingo Molnar Cc: David Miller --- arch/sparc/kernel/smp_64.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index f2f22ee..6970333 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c @@ -1516,7 +1516,7 @@ void __init setup_per_cpu_areas(void) pcpu_unit_size = pcpu_setup_first_chunk(static_size, PERCPU_MODULE_RESERVE, dyn_size, - PCPU_CHUNK_SIZE, vm.addr); + PCPU_CHUNK_SIZE, vm.addr, NULL); free_bootmem(__pa(ptrs), ptrs_size); -- cgit v1.1 From a530b7958612bafe2027e21359083dba84f0b3b4 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Sat, 4 Jul 2009 08:11:00 +0900 Subject: percpu: teach large page allocator about NUMA Large page first chunk allocator is primarily used for NUMA machines; however, its NUMA handling is extremely simplistic. Regardless of their proximity, each cpu is put into separate large page just to return most of the allocated space back wasting large amount of vmalloc space and increasing cache footprint. This patch teachs NUMA details to large page allocator. Given processor proximity information, pcpu_lpage_build_unit_map() will find fitting cpu -> unit mapping in which cpus in LOCAL_DISTANCE share the same large page and not too much virtual address space is wasted. This greatly reduces the unit and thus chunk size and wastes much less address space for the first chunk. For example, on 4/4 NUMA machine, the original code occupied 16MB of virtual space for the first chunk while the new code only uses 4MB - one 2MB page for each node. [ Impact: much better space efficiency on NUMA machines ] Signed-off-by: Tejun Heo Cc: Ingo Molnar Cc: Jan Beulich Cc: Andi Kleen Cc: David Miller --- arch/x86/kernel/setup_percpu.c | 72 ++++++++++++++++++++++++++++++++---------- 1 file changed, 55 insertions(+), 17 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index 4f2e0ac..7501bb1 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -149,36 +149,73 @@ static void __init pcpul_map(void *ptr, size_t size, void *addr) set_pmd(pmd, pmd_v); } +static int pcpu_lpage_cpu_distance(unsigned int from, unsigned int to) +{ + if (early_cpu_to_node(from) == early_cpu_to_node(to)) + return LOCAL_DISTANCE; + else + return REMOTE_DISTANCE; +} + static ssize_t __init setup_pcpu_lpage(size_t static_size, bool chosen) { size_t reserve = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE; + size_t dyn_size = reserve - PERCPU_FIRST_CHUNK_RESERVE; + size_t unit_map_size, unit_size; + int *unit_map; + int nr_units; + ssize_t ret; + + /* on non-NUMA, embedding is better */ + if (!chosen && !pcpu_need_numa()) + return -EINVAL; + + /* need PSE */ + if (!cpu_has_pse) { + pr_warning("PERCPU: lpage allocator requires PSE\n"); + return -EINVAL; + } + /* allocate and build unit_map */ + unit_map_size = num_possible_cpus() * sizeof(int); + unit_map = alloc_bootmem_nopanic(unit_map_size); + if (!unit_map) { + pr_warning("PERCPU: failed to allocate unit_map\n"); + return -ENOMEM; + } + + ret = pcpu_lpage_build_unit_map(static_size, + PERCPU_FIRST_CHUNK_RESERVE, + &dyn_size, &unit_size, PMD_SIZE, + unit_map, pcpu_lpage_cpu_distance); + if (ret < 0) { + pr_warning("PERCPU: failed to build unit_map\n"); + goto out_free; + } + nr_units = ret; + + /* do the parameters look okay? */ if (!chosen) { size_t vm_size = VMALLOC_END - VMALLOC_START; - size_t tot_size = num_possible_cpus() * PMD_SIZE; - - /* on non-NUMA, embedding is better */ - if (!pcpu_need_numa()) - return -EINVAL; + size_t tot_size = nr_units * unit_size; /* don't consume more than 20% of vmalloc area */ if (tot_size > vm_size / 5) { pr_info("PERCPU: too large chunk size %zuMB for " "large page remap\n", tot_size >> 20); - return -EINVAL; + ret = -EINVAL; + goto out_free; } } - /* need PSE */ - if (!cpu_has_pse) { - pr_warning("PERCPU: lpage allocator requires PSE\n"); - return -EINVAL; - } - - return pcpu_lpage_first_chunk(static_size, PERCPU_FIRST_CHUNK_RESERVE, - reserve - PERCPU_FIRST_CHUNK_RESERVE, - PMD_SIZE, - pcpu_fc_alloc, pcpu_fc_free, pcpul_map); + ret = pcpu_lpage_first_chunk(static_size, PERCPU_FIRST_CHUNK_RESERVE, + dyn_size, unit_size, PMD_SIZE, + unit_map, nr_units, + pcpu_fc_alloc, pcpu_fc_free, pcpul_map); +out_free: + if (ret < 0) + free_bootmem(__pa(unit_map), unit_map_size); + return ret; } #else static ssize_t __init setup_pcpu_lpage(size_t static_size, bool chosen) @@ -299,7 +336,8 @@ void __init setup_per_cpu_areas(void) /* alrighty, percpu areas up and running */ delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; for_each_possible_cpu(cpu) { - per_cpu_offset(cpu) = delta + cpu * pcpu_unit_size; + per_cpu_offset(cpu) = + delta + pcpu_unit_map[cpu] * pcpu_unit_size; per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu); per_cpu(cpu_number, cpu) = cpu; setup_percpu_segment(cpu); -- cgit v1.1 From 79714acbab080ad351acf4bba9a2bbc21d65c93c Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Fri, 3 Jul 2009 10:08:05 +0000 Subject: sh: hwblk base implementation This patch is the hwblk base implementation, containing structures and shared functions dealing with hardware blocks. A each processor model should provide a list of hwblks and describe which module stop bit that is associated with each hwblck and how the hwblks are grouped together into areas. The shared code keeps track of the usage count for each hwblk and the areas. Fallback implementations for processor specific code are also kept as weak symbols. The clock framework, the runtime pm code and cpuidle will all tie into this hwblk implementation. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/include/asm/hwblk.h | 61 +++++++++++++++++++++ arch/sh/kernel/cpu/Makefile | 2 +- arch/sh/kernel/cpu/hwblk.c | 130 ++++++++++++++++++++++++++++++++++++++++++++ arch/sh/kernel/time.c | 2 + 4 files changed, 194 insertions(+), 1 deletion(-) create mode 100644 arch/sh/include/asm/hwblk.h create mode 100644 arch/sh/kernel/cpu/hwblk.c (limited to 'arch') diff --git a/arch/sh/include/asm/hwblk.h b/arch/sh/include/asm/hwblk.h new file mode 100644 index 0000000..51a46f4 --- /dev/null +++ b/arch/sh/include/asm/hwblk.h @@ -0,0 +1,61 @@ +#ifndef __ASM_SH_HWBLK_H +#define __ASM_SH_HWBLK_H + +#include +#include + +#define HWBLK_AREA_FLAG_PARENT (1 << 0) /* valid parent */ + +#define HWBLK_AREA(_flags, _parent) \ +{ \ + .flags = _flags, \ + .parent = _parent, \ +} + +struct hwblk_area { + unsigned long cnt; + unsigned char parent; + unsigned char flags; +}; + +#define HWBLK(_mstp, _bit, _area) \ +{ \ + .mstp = (void __iomem *)_mstp, \ + .bit = _bit, \ + .area = _area, \ +} + +struct hwblk { + void __iomem *mstp; + unsigned char bit; + unsigned char area; + unsigned long cnt; +}; + +struct hwblk_info { + struct hwblk_area *areas; + int nr_areas; + struct hwblk *hwblks; + int nr_hwblks; +}; + +/* Should be defined by processor-specific code */ +int arch_hwblk_init(void); +int arch_hwblk_sleep_mode(void); + +int hwblk_register(struct hwblk_info *info); +int hwblk_init(void); + +/* allow clocks to enable and disable hardware blocks */ +#define SH_HWBLK_CLK(_name, _id, _parent, _hwblk, _flags) \ +{ \ + .name = _name, \ + .id = _id, \ + .parent = _parent, \ + .arch_flags = _hwblk, \ + .flags = _flags, \ +} + +int sh_hwblk_clk_register(struct clk *clks, int nr); + +#endif /* __ASM_SH_HWBLK_H */ diff --git a/arch/sh/kernel/cpu/Makefile b/arch/sh/kernel/cpu/Makefile index eecad7c..3d6b931 100644 --- a/arch/sh/kernel/cpu/Makefile +++ b/arch/sh/kernel/cpu/Makefile @@ -19,4 +19,4 @@ obj-$(CONFIG_UBC_WAKEUP) += ubc.o obj-$(CONFIG_SH_ADC) += adc.o obj-$(CONFIG_SH_CLK_CPG) += clock-cpg.o -obj-y += irq/ init.o clock.o +obj-y += irq/ init.o clock.o hwblk.o diff --git a/arch/sh/kernel/cpu/hwblk.c b/arch/sh/kernel/cpu/hwblk.c new file mode 100644 index 0000000..7c3a73d --- /dev/null +++ b/arch/sh/kernel/cpu/hwblk.c @@ -0,0 +1,130 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +static DEFINE_SPINLOCK(hwblk_lock); + +static void hwblk_area_inc(struct hwblk_info *info, int area) +{ + struct hwblk_area *hap = info->areas + area; + + hap->cnt++; + if (hap->cnt == 1) + if (hap->flags & HWBLK_AREA_FLAG_PARENT) + hwblk_area_inc(info, hap->parent); +} + +static void hwblk_area_dec(struct hwblk_info *info, int area) +{ + struct hwblk_area *hap = info->areas + area; + + if (hap->cnt == 1) + if (hap->flags & HWBLK_AREA_FLAG_PARENT) + hwblk_area_dec(info, hap->parent); + hap->cnt--; +} + +static void hwblk_enable(struct hwblk_info *info, int hwblk) +{ + struct hwblk *hp = info->hwblks + hwblk; + unsigned long tmp; + unsigned long flags; + + spin_lock_irqsave(&hwblk_lock, flags); + + hp->cnt++; + if (hp->cnt == 1) { + hwblk_area_inc(info, hp->area); + + tmp = __raw_readl(hp->mstp); + tmp &= ~(1 << hp->bit); + __raw_writel(tmp, hp->mstp); + } + + spin_unlock_irqrestore(&hwblk_lock, flags); +} + +static void hwblk_disable(struct hwblk_info *info, int hwblk) +{ + struct hwblk *hp = info->hwblks + hwblk; + unsigned long tmp; + unsigned long flags; + + spin_lock_irqsave(&hwblk_lock, flags); + + if (hp->cnt == 1) { + hwblk_area_dec(info, hp->area); + + tmp = __raw_readl(hp->mstp); + tmp |= 1 << hp->bit; + __raw_writel(tmp, hp->mstp); + } + hp->cnt--; + + spin_unlock_irqrestore(&hwblk_lock, flags); +} + +static struct hwblk_info *hwblk_info; + +int __init hwblk_register(struct hwblk_info *info) +{ + hwblk_info = info; + return 0; +} + +int __init __weak arch_hwblk_init(void) +{ + return 0; +} + +int __weak arch_hwblk_sleep_mode(void) +{ + return SUSP_SH_SLEEP; +} + +int __init hwblk_init(void) +{ + return arch_hwblk_init(); +} + +/* allow clocks to enable and disable hardware blocks */ +static int sh_hwblk_clk_enable(struct clk *clk) +{ + if (!hwblk_info) + return -ENOENT; + + hwblk_enable(hwblk_info, clk->arch_flags); + return 0; +} + +static void sh_hwblk_clk_disable(struct clk *clk) +{ + if (hwblk_info) + hwblk_disable(hwblk_info, clk->arch_flags); +} + +static struct clk_ops sh_hwblk_clk_ops = { + .enable = sh_hwblk_clk_enable, + .disable = sh_hwblk_clk_disable, + .recalc = followparent_recalc, +}; + +int __init sh_hwblk_clk_register(struct clk *clks, int nr) +{ + struct clk *clkp; + int ret = 0; + int k; + + for (k = 0; !ret && (k < nr); k++) { + clkp = clks + k; + clkp->ops = &sh_hwblk_clk_ops; + ret |= clk_register(clkp); + } + + return ret; +} diff --git a/arch/sh/kernel/time.c b/arch/sh/kernel/time.c index 9b352a1..d2424b0 100644 --- a/arch/sh/kernel/time.c +++ b/arch/sh/kernel/time.c @@ -21,6 +21,7 @@ #include #include #include +#include #include /* Dummy RTC ops */ @@ -96,6 +97,7 @@ void __init time_init(void) if (board_time_init) board_time_init(); + hwblk_init(); clk_init(); rtc_sh_get_time(&xtime); -- cgit v1.1 From a61c1a636628a28ab5b42a9d36582a8f6a08893a Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Fri, 3 Jul 2009 10:15:25 +0000 Subject: sh: hwblk for sh7722 This patch contains the sh7722 specific hwblk implementation. Hwblk ids are added to the processor specific header file, module stop bits and areas are kept track of as hwblks, clocks are converted to make use of the shared hwblk code. Code to determine allowed sleep modes is also added. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/include/cpu-sh4/cpu/sh7722.h | 14 +++++ arch/sh/kernel/cpu/sh4a/Makefile | 2 +- arch/sh/kernel/cpu/sh4a/clock-sh7722.c | 60 ++++++++++--------- arch/sh/kernel/cpu/sh4a/hwblk-sh7722.c | 106 +++++++++++++++++++++++++++++++++ 4 files changed, 153 insertions(+), 29 deletions(-) create mode 100644 arch/sh/kernel/cpu/sh4a/hwblk-sh7722.c (limited to 'arch') diff --git a/arch/sh/include/cpu-sh4/cpu/sh7722.h b/arch/sh/include/cpu-sh4/cpu/sh7722.h index 738ea43..4856040 100644 --- a/arch/sh/include/cpu-sh4/cpu/sh7722.h +++ b/arch/sh/include/cpu-sh4/cpu/sh7722.h @@ -221,4 +221,18 @@ enum { GPIO_FN_KEYOUT3, GPIO_FN_KEYOUT4_IN6, GPIO_FN_KEYOUT5_IN5, }; +enum { + HWBLK_UNKNOWN = 0, + HWBLK_TLB, HWBLK_IC, HWBLK_OC, HWBLK_URAM, HWBLK_XYMEM, + HWBLK_INTC, HWBLK_DMAC, HWBLK_SHYWAY, HWBLK_HUDI, + HWBLK_UBC, HWBLK_TMU, HWBLK_CMT, HWBLK_RWDT, HWBLK_FLCTL, + HWBLK_SCIF0, HWBLK_SCIF1, HWBLK_SCIF2, HWBLK_SIO, + HWBLK_SIOF0, HWBLK_SIOF1, HWBLK_IIC, HWBLK_RTC, + HWBLK_TPU, HWBLK_IRDA, HWBLK_SDHI, HWBLK_SIM, HWBLK_KEYSC, + HWBLK_TSIF, HWBLK_USBF, HWBLK_2DG, HWBLK_SIU, HWBLK_VOU, + HWBLK_JPU, HWBLK_BEU, HWBLK_CEU, HWBLK_VEU, HWBLK_VPU, + HWBLK_LCDC, + HWBLK_NR, +}; + #endif /* __ASM_SH7722_H__ */ diff --git a/arch/sh/kernel/cpu/sh4a/Makefile b/arch/sh/kernel/cpu/sh4a/Makefile index ebdd391..3cafda6 100644 --- a/arch/sh/kernel/cpu/sh4a/Makefile +++ b/arch/sh/kernel/cpu/sh4a/Makefile @@ -25,7 +25,7 @@ clock-$(CONFIG_CPU_SUBTYPE_SH7780) := clock-sh7780.o clock-$(CONFIG_CPU_SUBTYPE_SH7785) := clock-sh7785.o clock-$(CONFIG_CPU_SUBTYPE_SH7786) := clock-sh7786.o clock-$(CONFIG_CPU_SUBTYPE_SH7343) := clock-sh7343.o -clock-$(CONFIG_CPU_SUBTYPE_SH7722) := clock-sh7722.o +clock-$(CONFIG_CPU_SUBTYPE_SH7722) := clock-sh7722.o hwblk-sh7722.o clock-$(CONFIG_CPU_SUBTYPE_SH7723) := clock-sh7723.o clock-$(CONFIG_CPU_SUBTYPE_SH7724) := clock-sh7724.o clock-$(CONFIG_CPU_SUBTYPE_SH7366) := clock-sh7366.o diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7722.c b/arch/sh/kernel/cpu/sh4a/clock-sh7722.c index 40f8593..1fa9e1d 100644 --- a/arch/sh/kernel/cpu/sh4a/clock-sh7722.c +++ b/arch/sh/kernel/cpu/sh4a/clock-sh7722.c @@ -22,6 +22,8 @@ #include #include #include +#include +#include /* SH7722 registers */ #define FRQCR 0xa4150000 @@ -140,35 +142,37 @@ struct clk div6_clks[] = { SH_CLK_DIV6("video_clk", &pll_clk, VCLKCR, 0), }; -#define MSTP(_str, _parent, _reg, _bit, _flags) \ - SH_CLK_MSTP32(_str, -1, _parent, _reg, _bit, _flags) +#define R_CLK &r_clk +#define P_CLK &div4_clks[DIV4_P] +#define B_CLK &div4_clks[DIV4_B] +#define U_CLK &div4_clks[DIV4_U] static struct clk mstp_clks[] = { - MSTP("uram0", &div4_clks[DIV4_U], MSTPCR0, 28, CLK_ENABLE_ON_INIT), - MSTP("xymem0", &div4_clks[DIV4_B], MSTPCR0, 26, CLK_ENABLE_ON_INIT), - MSTP("tmu0", &div4_clks[DIV4_P], MSTPCR0, 15, 0), - MSTP("cmt0", &r_clk, MSTPCR0, 14, 0), - MSTP("rwdt0", &r_clk, MSTPCR0, 13, 0), - MSTP("flctl0", &div4_clks[DIV4_P], MSTPCR0, 10, 0), - MSTP("scif0", &div4_clks[DIV4_P], MSTPCR0, 7, 0), - MSTP("scif1", &div4_clks[DIV4_P], MSTPCR0, 6, 0), - MSTP("scif2", &div4_clks[DIV4_P], MSTPCR0, 5, 0), - - MSTP("i2c0", &div4_clks[DIV4_P], MSTPCR1, 9, 0), - MSTP("rtc0", &r_clk, MSTPCR1, 8, 0), - - MSTP("sdhi0", &div4_clks[DIV4_P], MSTPCR2, 18, 0), - MSTP("keysc0", &r_clk, MSTPCR2, 14, 0), - MSTP("usbf0", &div4_clks[DIV4_P], MSTPCR2, 11, 0), - MSTP("2dg0", &div4_clks[DIV4_B], MSTPCR2, 9, 0), - MSTP("siu0", &div4_clks[DIV4_B], MSTPCR2, 8, 0), - MSTP("vou0", &div4_clks[DIV4_B], MSTPCR2, 5, 0), - MSTP("jpu0", &div4_clks[DIV4_B], MSTPCR2, 6, CLK_ENABLE_ON_INIT), - MSTP("beu0", &div4_clks[DIV4_B], MSTPCR2, 4, 0), - MSTP("ceu0", &div4_clks[DIV4_B], MSTPCR2, 3, 0), - MSTP("veu0", &div4_clks[DIV4_B], MSTPCR2, 2, CLK_ENABLE_ON_INIT), - MSTP("vpu0", &div4_clks[DIV4_B], MSTPCR2, 1, CLK_ENABLE_ON_INIT), - MSTP("lcdc0", &div4_clks[DIV4_B], MSTPCR2, 0, 0), + SH_HWBLK_CLK("uram0", -1, U_CLK, HWBLK_URAM, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("xymem0", -1, B_CLK, HWBLK_XYMEM, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("tmu0", -1, P_CLK, HWBLK_TMU, 0), + SH_HWBLK_CLK("cmt0", -1, R_CLK, HWBLK_CMT, 0), + SH_HWBLK_CLK("rwdt0", -1, R_CLK, HWBLK_RWDT, 0), + SH_HWBLK_CLK("flctl0", -1, P_CLK, HWBLK_FLCTL, 0), + SH_HWBLK_CLK("scif0", -1, P_CLK, HWBLK_SCIF0, 0), + SH_HWBLK_CLK("scif1", -1, P_CLK, HWBLK_SCIF1, 0), + SH_HWBLK_CLK("scif2", -1, P_CLK, HWBLK_SCIF2, 0), + + SH_HWBLK_CLK("i2c0", -1, P_CLK, HWBLK_IIC, 0), + SH_HWBLK_CLK("rtc0", -1, R_CLK, HWBLK_RTC, 0), + + SH_HWBLK_CLK("sdhi0", -1, P_CLK, HWBLK_SDHI, 0), + SH_HWBLK_CLK("keysc0", -1, R_CLK, HWBLK_KEYSC, 0), + SH_HWBLK_CLK("usbf0", -1, P_CLK, HWBLK_USBF, 0), + SH_HWBLK_CLK("2dg0", -1, B_CLK, HWBLK_2DG, 0), + SH_HWBLK_CLK("siu0", -1, B_CLK, HWBLK_SIU, 0), + SH_HWBLK_CLK("vou0", -1, B_CLK, HWBLK_VOU, 0), + SH_HWBLK_CLK("jpu0", -1, B_CLK, HWBLK_JPU, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("beu0", -1, B_CLK, HWBLK_BEU, 0), + SH_HWBLK_CLK("ceu0", -1, B_CLK, HWBLK_CEU, 0), + SH_HWBLK_CLK("veu0", -1, B_CLK, HWBLK_VEU, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("vpu0", -1, B_CLK, HWBLK_VPU, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("lcdc0", -1, P_CLK, HWBLK_LCDC, 0), }; int __init arch_clk_init(void) @@ -191,7 +195,7 @@ int __init arch_clk_init(void) ret = sh_clk_div6_register(div6_clks, ARRAY_SIZE(div6_clks)); if (!ret) - ret = sh_clk_mstp32_register(mstp_clks, ARRAY_SIZE(mstp_clks)); + ret = sh_hwblk_clk_register(mstp_clks, ARRAY_SIZE(mstp_clks)); return ret; } diff --git a/arch/sh/kernel/cpu/sh4a/hwblk-sh7722.c b/arch/sh/kernel/cpu/sh4a/hwblk-sh7722.c new file mode 100644 index 0000000..00a1c02 --- /dev/null +++ b/arch/sh/kernel/cpu/sh4a/hwblk-sh7722.c @@ -0,0 +1,106 @@ +/* + * arch/sh/kernel/cpu/sh4a/hwblk-sh7722.c + * + * SH7722 hardware block support + * + * Copyright (C) 2009 Magnus Damm + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#include +#include +#include +#include +#include +#include + +/* SH7722 registers */ +#define MSTPCR0 0xa4150030 +#define MSTPCR1 0xa4150034 +#define MSTPCR2 0xa4150038 + +/* SH7722 Power Domains */ +enum { CORE_AREA, SUB_AREA, CORE_AREA_BM }; +static struct hwblk_area sh7722_hwblk_area[] = { + [CORE_AREA] = HWBLK_AREA(0, 0), + [CORE_AREA_BM] = HWBLK_AREA(HWBLK_AREA_FLAG_PARENT, CORE_AREA), + [SUB_AREA] = HWBLK_AREA(0, 0), +}; + +/* Table mapping HWBLK to Module Stop Bit and Power Domain */ +static struct hwblk sh7722_hwblk[HWBLK_NR] = { + [HWBLK_TLB] = HWBLK(MSTPCR0, 31, CORE_AREA), + [HWBLK_IC] = HWBLK(MSTPCR0, 30, CORE_AREA), + [HWBLK_OC] = HWBLK(MSTPCR0, 29, CORE_AREA), + [HWBLK_URAM] = HWBLK(MSTPCR0, 28, CORE_AREA), + [HWBLK_XYMEM] = HWBLK(MSTPCR0, 26, CORE_AREA), + [HWBLK_INTC] = HWBLK(MSTPCR0, 22, CORE_AREA), + [HWBLK_DMAC] = HWBLK(MSTPCR0, 21, CORE_AREA_BM), + [HWBLK_SHYWAY] = HWBLK(MSTPCR0, 20, CORE_AREA), + [HWBLK_HUDI] = HWBLK(MSTPCR0, 19, CORE_AREA), + [HWBLK_UBC] = HWBLK(MSTPCR0, 17, CORE_AREA), + [HWBLK_TMU] = HWBLK(MSTPCR0, 15, CORE_AREA), + [HWBLK_CMT] = HWBLK(MSTPCR0, 14, SUB_AREA), + [HWBLK_RWDT] = HWBLK(MSTPCR0, 13, SUB_AREA), + [HWBLK_FLCTL] = HWBLK(MSTPCR0, 10, CORE_AREA), + [HWBLK_SCIF0] = HWBLK(MSTPCR0, 7, CORE_AREA), + [HWBLK_SCIF1] = HWBLK(MSTPCR0, 6, CORE_AREA), + [HWBLK_SCIF2] = HWBLK(MSTPCR0, 5, CORE_AREA), + [HWBLK_SIO] = HWBLK(MSTPCR0, 3, CORE_AREA), + [HWBLK_SIOF0] = HWBLK(MSTPCR0, 2, CORE_AREA), + [HWBLK_SIOF1] = HWBLK(MSTPCR0, 1, CORE_AREA), + + [HWBLK_IIC] = HWBLK(MSTPCR1, 9, CORE_AREA), + [HWBLK_RTC] = HWBLK(MSTPCR1, 8, SUB_AREA), + + [HWBLK_TPU] = HWBLK(MSTPCR2, 25, CORE_AREA), + [HWBLK_IRDA] = HWBLK(MSTPCR2, 24, CORE_AREA), + [HWBLK_SDHI] = HWBLK(MSTPCR2, 18, CORE_AREA), + [HWBLK_SIM] = HWBLK(MSTPCR2, 16, CORE_AREA), + [HWBLK_KEYSC] = HWBLK(MSTPCR2, 14, SUB_AREA), + [HWBLK_TSIF] = HWBLK(MSTPCR2, 13, SUB_AREA), + [HWBLK_USBF] = HWBLK(MSTPCR2, 11, CORE_AREA), + [HWBLK_2DG] = HWBLK(MSTPCR2, 9, CORE_AREA_BM), + [HWBLK_SIU] = HWBLK(MSTPCR2, 8, CORE_AREA), + [HWBLK_JPU] = HWBLK(MSTPCR2, 6, CORE_AREA_BM), + [HWBLK_VOU] = HWBLK(MSTPCR2, 5, CORE_AREA_BM), + [HWBLK_BEU] = HWBLK(MSTPCR2, 4, CORE_AREA_BM), + [HWBLK_CEU] = HWBLK(MSTPCR2, 3, CORE_AREA_BM), + [HWBLK_VEU] = HWBLK(MSTPCR2, 2, CORE_AREA_BM), + [HWBLK_VPU] = HWBLK(MSTPCR2, 1, CORE_AREA_BM), + [HWBLK_LCDC] = HWBLK(MSTPCR2, 0, CORE_AREA_BM), +}; + +static struct hwblk_info sh7722_hwblk_info = { + .areas = sh7722_hwblk_area, + .nr_areas = ARRAY_SIZE(sh7722_hwblk_area), + .hwblks = sh7722_hwblk, + .nr_hwblks = ARRAY_SIZE(sh7722_hwblk), +}; + +int arch_hwblk_sleep_mode(void) +{ + if (!sh7722_hwblk_area[CORE_AREA].cnt) + return SUSP_SH_STANDBY | SUSP_SH_SF; + + if (!sh7722_hwblk_area[CORE_AREA_BM].cnt) + return SUSP_SH_SLEEP | SUSP_SH_SF; + + return SUSP_SH_SLEEP; +} + +int __init arch_hwblk_init(void) +{ + return hwblk_register(&sh7722_hwblk_info); +} -- cgit v1.1 From 7426394f20c2e74b7c560bcd266cec1b327a269b Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Fri, 3 Jul 2009 10:28:00 +0000 Subject: sh: cpuidle for SuperH Mobile using hwblk This patch adds cpuidle support for SuperH Mobile. The sleep mode selected by cpuidle is compared with the mode selected by the hwblk sleep code and the best allowed mode is entered. At this point "Sleep mode" and "Sleep mode + SF" are supported. This code can easily be extended to support "Software suspend mode", but the assembly code must first be updated to avoid loosing interrupts. Also, update the code to only copy the assembly snippet into internal memory once at bootup. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/include/asm/suspend.h | 9 +++ arch/sh/kernel/cpu/shmobile/Makefile | 1 + arch/sh/kernel/cpu/shmobile/cpuidle.c | 102 ++++++++++++++++++++++++++++++++++ arch/sh/kernel/cpu/shmobile/pm.c | 26 ++++----- 4 files changed, 125 insertions(+), 13 deletions(-) create mode 100644 arch/sh/kernel/cpu/shmobile/cpuidle.c (limited to 'arch') diff --git a/arch/sh/include/asm/suspend.h b/arch/sh/include/asm/suspend.h index b1b9953..5c8ea28 100644 --- a/arch/sh/include/asm/suspend.h +++ b/arch/sh/include/asm/suspend.h @@ -10,6 +10,15 @@ struct swsusp_arch_regs { struct pt_regs user_regs; unsigned long bank1_regs[8]; }; + +void sh_mobile_call_standby(unsigned long mode); + +#ifdef CONFIG_CPU_IDLE +void sh_mobile_setup_cpuidle(void); +#else +static inline void sh_mobile_setup_cpuidle(void) {} +#endif + #endif /* flags passed to assembly suspend code */ diff --git a/arch/sh/kernel/cpu/shmobile/Makefile b/arch/sh/kernel/cpu/shmobile/Makefile index 08bfa7c..e8a5111 100644 --- a/arch/sh/kernel/cpu/shmobile/Makefile +++ b/arch/sh/kernel/cpu/shmobile/Makefile @@ -4,3 +4,4 @@ # Power Management & Sleep mode obj-$(CONFIG_PM) += pm.o sleep.o +obj-$(CONFIG_CPU_IDLE) += cpuidle.o diff --git a/arch/sh/kernel/cpu/shmobile/cpuidle.c b/arch/sh/kernel/cpu/shmobile/cpuidle.c new file mode 100644 index 0000000..4afdd97 --- /dev/null +++ b/arch/sh/kernel/cpu/shmobile/cpuidle.c @@ -0,0 +1,102 @@ +/* + * arch/sh/kernel/cpu/shmobile/cpuidle.c + * + * Cpuidle support code for SuperH Mobile + * + * Copyright (C) 2009 Magnus Damm + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include +#include +#include +#include +#include +#include +#include +#include + +static unsigned long cpuidle_mode[] = { + SUSP_SH_SLEEP, /* regular sleep mode */ + SUSP_SH_SLEEP | SUSP_SH_SF, /* sleep mode + self refresh */ +}; + +static int cpuidle_sleep_enter(struct cpuidle_device *dev, + struct cpuidle_state *state) +{ + unsigned long allowed_mode = arch_hwblk_sleep_mode(); + ktime_t before, after; + int requested_state = state - &dev->states[0]; + int allowed_state; + int k; + + /* convert allowed mode to allowed state */ + for (k = ARRAY_SIZE(cpuidle_mode) - 1; k > 0; k--) + if (cpuidle_mode[k] == allowed_mode) + break; + + allowed_state = k; + + /* take the following into account for sleep mode selection: + * - allowed_state: best mode allowed by hardware (clock deps) + * - requested_state: best mode allowed by software (latencies) + */ + k = min_t(int, allowed_state, requested_state); + + dev->last_state = &dev->states[k]; + before = ktime_get(); + sh_mobile_call_standby(cpuidle_mode[k]); + after = ktime_get(); + return ktime_to_ns(ktime_sub(after, before)) >> 10; +} + +static struct cpuidle_device cpuidle_dev; +static struct cpuidle_driver cpuidle_driver = { + .name = "sh_idle", + .owner = THIS_MODULE, +}; + +void sh_mobile_setup_cpuidle(void) +{ + struct cpuidle_device *dev = &cpuidle_dev; + struct cpuidle_state *state; + int i; + + cpuidle_register_driver(&cpuidle_driver); + + for (i = 0; i < CPUIDLE_STATE_MAX; i++) { + dev->states[i].name[0] = '\0'; + dev->states[i].desc[0] = '\0'; + } + + i = CPUIDLE_DRIVER_STATE_START; + + state = &dev->states[i++]; + snprintf(state->name, CPUIDLE_NAME_LEN, "C0"); + strncpy(state->desc, "SuperH Sleep Mode", CPUIDLE_DESC_LEN); + state->exit_latency = 1; + state->target_residency = 1 * 2; + state->power_usage = 3; + state->flags = 0; + state->flags |= CPUIDLE_FLAG_SHALLOW; + state->flags |= CPUIDLE_FLAG_TIME_VALID; + state->enter = cpuidle_sleep_enter; + + dev->safe_state = state; + + state = &dev->states[i++]; + snprintf(state->name, CPUIDLE_NAME_LEN, "C1"); + strncpy(state->desc, "SuperH Sleep Mode [SF]", CPUIDLE_DESC_LEN); + state->exit_latency = 100; + state->target_residency = 1 * 2; + state->power_usage = 1; + state->flags = 0; + state->flags |= CPUIDLE_FLAG_TIME_VALID; + state->enter = cpuidle_sleep_enter; + + dev->state_count = i; + + cpuidle_register_device(dev); +} diff --git a/arch/sh/kernel/cpu/shmobile/pm.c b/arch/sh/kernel/cpu/shmobile/pm.c index 8c067adf..de078d2 100644 --- a/arch/sh/kernel/cpu/shmobile/pm.c +++ b/arch/sh/kernel/cpu/shmobile/pm.c @@ -1,5 +1,5 @@ /* - * arch/sh/kernel/cpu/sh4a/pm-sh_mobile.c + * arch/sh/kernel/cpu/shmobile/pm.c * * Power management support code for SuperH Mobile * @@ -32,20 +32,17 @@ * * R-standby mode is unsupported, but will be added in the future * U-standby mode is low priority since it needs bootloader hacks - * - * All modes should be tied in with cpuidle. But before that can - * happen we need to keep track of enabled hardware blocks so we - * can avoid entering sleep modes that stop clocks to hardware - * blocks that are in use even though the cpu core is idle. */ +#define ILRAM_BASE 0xe5200000 + extern const unsigned char sh_mobile_standby[]; extern const unsigned int sh_mobile_standby_size; -static void sh_mobile_call_standby(unsigned long mode) +void sh_mobile_call_standby(unsigned long mode) { extern void *vbr_base; - void *onchip_mem = (void *)0xe5200000; /* ILRAM */ + void *onchip_mem = (void *)ILRAM_BASE; void (*standby_onchip_mem)(unsigned long) = onchip_mem; /* Note: Wake up from sleep may generate exceptions! @@ -55,11 +52,6 @@ static void sh_mobile_call_standby(unsigned long mode) if (mode & SUSP_SH_SF) asm volatile("ldc %0, vbr" : : "r" (onchip_mem) : "memory"); - /* Copy the assembly snippet to the otherwise ununsed ILRAM */ - memcpy(onchip_mem, sh_mobile_standby, sh_mobile_standby_size); - wmb(); - ctrl_barrier(); - /* Let assembly snippet in on-chip memory handle the rest */ standby_onchip_mem(mode); @@ -85,7 +77,15 @@ static struct platform_suspend_ops sh_pm_ops = { static int __init sh_pm_init(void) { + void *onchip_mem = (void *)ILRAM_BASE; + + /* Copy the assembly snippet to the otherwise ununsed ILRAM */ + memcpy(onchip_mem, sh_mobile_standby, sh_mobile_standby_size); + wmb(); + ctrl_barrier(); + suspend_set_ops(&sh_pm_ops); + sh_mobile_setup_cpuidle(); return 0; } -- cgit v1.1 From 5084f61a4d6c9c7bfd3be07fbb5253c1a08cd568 Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Fri, 3 Jul 2009 23:34:51 +0100 Subject: sh: Use bootmem ontop of lmb for NUMA Like the UP case, use lmb as the foundation of memory resource management on NUMA. Signed-off-by: Matt Fleming Signed-off-by: Paul Mundt --- arch/sh/mm/numa.c | 36 +++++++++++++++++++++++------------- 1 file changed, 23 insertions(+), 13 deletions(-) (limited to 'arch') diff --git a/arch/sh/mm/numa.c b/arch/sh/mm/numa.c index 095d93b..9b784fd 100644 --- a/arch/sh/mm/numa.c +++ b/arch/sh/mm/numa.c @@ -9,6 +9,7 @@ */ #include #include +#include #include #include #include @@ -26,6 +27,15 @@ EXPORT_SYMBOL_GPL(node_data); void __init setup_memory(void) { unsigned long free_pfn = PFN_UP(__pa(_end)); + u64 base = min_low_pfn << PAGE_SHIFT; + u64 size = (max_low_pfn << PAGE_SHIFT) - min_low_pfn; + + lmb_add(base, size); + + /* Reserve the LMB regions used by the kernel, initrd, etc.. */ + lmb_reserve(__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET, + (PFN_PHYS(free_pfn) + PAGE_SIZE - 1) - + (__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET)); /* * Node 0 sets up its pgdat at the first available pfn, @@ -45,24 +55,23 @@ void __init setup_memory(void) void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end) { - unsigned long bootmap_pages, bootmap_start, bootmap_size; - unsigned long start_pfn, free_pfn, end_pfn; + unsigned long bootmap_pages; + unsigned long start_pfn, end_pfn; + unsigned long bootmem_paddr; /* Don't allow bogus node assignment */ BUG_ON(nid > MAX_NUMNODES || nid == 0); - /* - * The free pfn starts at the beginning of the range, and is - * advanced as necessary for pgdat and node map allocations. - */ - free_pfn = start_pfn = start >> PAGE_SHIFT; + start_pfn = start >> PAGE_SHIFT; end_pfn = end >> PAGE_SHIFT; + lmb_add(start, end - start); + __add_active_range(nid, start_pfn, end_pfn); /* Node-local pgdat */ - NODE_DATA(nid) = pfn_to_kaddr(free_pfn); - free_pfn += PFN_UP(sizeof(struct pglist_data)); + NODE_DATA(nid) = __va(lmb_alloc_base(sizeof(struct pglist_data), + SMP_CACHE_BYTES, end_pfn)); memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); NODE_DATA(nid)->bdata = &bootmem_node_data[nid]; @@ -71,16 +80,17 @@ void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end) /* Node-local bootmap */ bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn); - bootmap_start = (unsigned long)pfn_to_kaddr(free_pfn); - bootmap_size = init_bootmem_node(NODE_DATA(nid), free_pfn, start_pfn, - end_pfn); + bootmem_paddr = lmb_alloc_base(bootmap_pages << PAGE_SHIFT, + PAGE_SIZE, end_pfn); + init_bootmem_node(NODE_DATA(nid), bootmem_paddr >> PAGE_SHIFT, + start_pfn, end_pfn); free_bootmem_with_active_regions(nid, end_pfn); /* Reserve the pgdat and bootmap space with the bootmem allocator */ reserve_bootmem_node(NODE_DATA(nid), start_pfn << PAGE_SHIFT, sizeof(struct pglist_data), BOOTMEM_DEFAULT); - reserve_bootmem_node(NODE_DATA(nid), free_pfn << PAGE_SHIFT, + reserve_bootmem_node(NODE_DATA(nid), bootmem_paddr, bootmap_pages << PAGE_SHIFT, BOOTMEM_DEFAULT); /* It's up */ -- cgit v1.1 From c63c3105e4991b2991ba73a742b8b59bfdbe4acd Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Sun, 5 Jul 2009 02:50:10 +0900 Subject: sh: use kprobes_built_in() for notify_page_fault(). Kill off the KPROBES ifdef, as per x86. Signed-off-by: Paul Mundt --- arch/sh/mm/fault_32.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault_32.c index 7192594..ce75b88 100644 --- a/arch/sh/mm/fault_32.c +++ b/arch/sh/mm/fault_32.c @@ -25,14 +25,12 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap) { int ret = 0; -#ifdef CONFIG_KPROBES - if (!user_mode(regs)) { + if (kprobes_built_in() && !user_mode(regs)) { preempt_disable(); if (kprobe_running() && kprobe_fault_handler(regs, trap)) ret = 1; preempt_enable(); } -#endif return ret; } -- cgit v1.1 From 0f60bb25b4036d30fd795709be09626c58c52464 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Sun, 5 Jul 2009 03:18:47 +0900 Subject: sh: Tidy up vmalloc fault handling. This rewrites the vmalloc fault handling as per x86, which subsequently allows for easy future tie-in for vmalloc_sync_all(). Signed-off-by: Paul Mundt --- arch/sh/mm/fault_32.c | 153 ++++++++++++++++++++++++++++++++------------------ 1 file changed, 97 insertions(+), 56 deletions(-) (limited to 'arch') diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault_32.c index ce75b88..08d0117 100644 --- a/arch/sh/mm/fault_32.c +++ b/arch/sh/mm/fault_32.c @@ -2,7 +2,7 @@ * Page fault handler for SH with an MMU. * * Copyright (C) 1999 Niibe Yutaka - * Copyright (C) 2003 - 2008 Paul Mundt + * Copyright (C) 2003 - 2009 Paul Mundt * * Based on linux/arch/i386/mm/fault.c: * Copyright (C) 1995 Linus Torvalds @@ -35,6 +35,74 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap) return ret; } +static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address) +{ + unsigned index = pgd_index(address); + pgd_t *pgd_k; + pud_t *pud, *pud_k; + pmd_t *pmd, *pmd_k; + + pgd += index; + pgd_k = init_mm.pgd + index; + + if (!pgd_present(*pgd_k)) + return NULL; + + pud = pud_offset(pgd, address); + pud_k = pud_offset(pgd_k, address); + if (!pud_present(*pud_k)) + return NULL; + + pmd = pmd_offset(pud, address); + pmd_k = pmd_offset(pud_k, address); + if (!pmd_present(*pmd_k)) + return NULL; + + if (!pmd_present(*pmd)) + set_pmd(pmd, *pmd_k); + else + BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k)); + + return pmd_k; +} + +/* + * Handle a fault on the vmalloc or module mapping area + */ +static noinline int vmalloc_fault(unsigned long address) +{ + pgd_t *pgd_k; + pmd_t *pmd_k; + pte_t *pte_k; + + /* Make sure we are in vmalloc area: */ + if (!(address >= VMALLOC_START && address < VMALLOC_END)) + return -1; + + /* + * Synchronize this task's top level page-table + * with the 'reference' page table. + * + * Do _not_ use "current" here. We might be inside + * an interrupt in the middle of a task switch.. + */ + pgd_k = get_TTB(); + pmd_k = vmalloc_sync_one(__va((unsigned long)pgd_k), address); + if (!pmd_k) + return -1; + + pte_k = pte_offset_kernel(pmd_k, address); + if (!pte_present(*pte_k)) + return -1; + + return 0; +} + +static int fault_in_kernel_space(unsigned long address) +{ + return address >= TASK_SIZE; +} + /* * This routine handles page faults. It determines the address, * and the problem, and then passes it off to one of the appropriate @@ -44,6 +112,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, unsigned long writeaccess, unsigned long address) { + unsigned long vec; struct task_struct *tsk; struct mm_struct *mm; struct vm_area_struct * vma; @@ -51,59 +120,30 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, int fault; siginfo_t info; - /* - * We don't bother with any notifier callbacks here, as they are - * all handled through the __do_page_fault() fast-path. - */ - tsk = current; + mm = tsk->mm; si_code = SEGV_MAPERR; + vec = lookup_exception_vector(); - if (unlikely(address >= TASK_SIZE)) { - /* - * Synchronize this task's top level page-table - * with the 'reference' page table. - * - * Do _not_ use "tsk" here. We might be inside - * an interrupt in the middle of a task switch.. - */ - int offset = pgd_index(address); - pgd_t *pgd, *pgd_k; - pud_t *pud, *pud_k; - pmd_t *pmd, *pmd_k; - - pgd = get_TTB() + offset; - pgd_k = swapper_pg_dir + offset; - - if (!pgd_present(*pgd)) { - if (!pgd_present(*pgd_k)) - goto bad_area_nosemaphore; - set_pgd(pgd, *pgd_k); + /* + * We fault-in kernel-space virtual memory on-demand. The + * 'reference' page table is init_mm.pgd. + * + * NOTE! We MUST NOT take any locks for this case. We may + * be in an interrupt or a critical region, and should + * only copy the information from the master page table, + * nothing more. + */ + if (unlikely(fault_in_kernel_space(address))) { + if (vmalloc_fault(address) >= 0) return; - } - - pud = pud_offset(pgd, address); - pud_k = pud_offset(pgd_k, address); - - if (!pud_present(*pud)) { - if (!pud_present(*pud_k)) - goto bad_area_nosemaphore; - set_pud(pud, *pud_k); + if (notify_page_fault(regs, vec)) return; - } - - pmd = pmd_offset(pud, address); - pmd_k = pmd_offset(pud_k, address); - if (pmd_present(*pmd) || !pmd_present(*pmd_k)) - goto bad_area_nosemaphore; - set_pmd(pmd, *pmd_k); - return; + goto bad_area_nosemaphore; } - mm = tsk->mm; - - if (unlikely(notify_page_fault(regs, lookup_exception_vector()))) + if (unlikely(notify_page_fault(regs, vec))) return; /* Only enable interrupts if they were on before the fault */ @@ -113,8 +153,8 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); /* - * If we're in an interrupt or have no user - * context, we must not take the fault.. + * If we're in an interrupt, have no user context or are running + * in an atomic region then we must not take the fault: */ if (in_atomic() || !mm) goto no_context; @@ -130,10 +170,11 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, goto bad_area; if (expand_stack(vma, address)) goto bad_area; -/* - * Ok, we have a good vm_area for this memory access, so - * we can handle it.. - */ + + /* + * Ok, we have a good vm_area for this memory access, so + * we can handle it.. + */ good_area: si_code = SEGV_ACCERR; if (writeaccess) { @@ -171,10 +212,10 @@ survive: up_read(&mm->mmap_sem); return; -/* - * Something tried to access memory that isn't in our memory map.. - * Fix it, but check if it's kernel or user first.. - */ + /* + * Something tried to access memory that isn't in our memory map.. + * Fix it, but check if it's kernel or user first.. + */ bad_area: up_read(&mm->mmap_sem); -- cgit v1.1 From ca0d17277fd101ce4878f92b398b6ab71fb2c287 Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Sun, 28 Jun 2009 12:53:07 +0100 Subject: sh: Fix the value of MCOUNT_INSN_OFFSET It seems that MCOUNT_INSN_OFFSET was calculating the distance between the wrong functions. The value that should have actually been computed is the distance between ftrace_call and ftrace_stub. I discovered this when I added some code to ftrace_caller. Signed-off-by: Matt Fleming Signed-off-by: Paul Mundt --- arch/sh/include/asm/ftrace.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/sh/include/asm/ftrace.h b/arch/sh/include/asm/ftrace.h index 8fea7d8..b09311a 100644 --- a/arch/sh/include/asm/ftrace.h +++ b/arch/sh/include/asm/ftrace.h @@ -11,10 +11,10 @@ extern void mcount(void); #define MCOUNT_ADDR ((long)(mcount)) #ifdef CONFIG_DYNAMIC_FTRACE -#define CALLER_ADDR ((long)(ftrace_caller)) +#define CALL_ADDR ((long)(ftrace_call)) #define STUB_ADDR ((long)(ftrace_stub)) -#define MCOUNT_INSN_OFFSET ((STUB_ADDR - CALLER_ADDR) >> 1) +#define MCOUNT_INSN_OFFSET ((STUB_ADDR - CALL_ADDR) - 4) struct dyn_arch_ftrace { /* No extra data needed on sh */ -- cgit v1.1 From c1340c053be7a43d837a3acb352d5008be865a55 Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Sun, 28 Jun 2009 14:05:44 +0100 Subject: sh: Define HAVE_FUNCTION_TRACE_MCOUNT_TEST Enable HAVE_FUNCTION_TRACE_MCOUNT_TEST and test the value of function_trace_stop from our assembly code as opposed to using the generic C function. This should optimise our mcount/ftrace code path. Signed-off-by: Matt Fleming Signed-off-by: Paul Mundt --- arch/sh/Kconfig | 1 + arch/sh/lib/mcount.S | 17 ++++++++++++++++- 2 files changed, 17 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index e2bdd7b..801a4a7 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -31,6 +31,7 @@ config SUPERH32 select HAVE_FUNCTION_TRACER select HAVE_FTRACE_MCOUNT_RECORD select HAVE_DYNAMIC_FTRACE + select HAVE_FUNCTION_TRACE_MCOUNT_TEST select HAVE_ARCH_KGDB select ARCH_HIBERNATION_POSSIBLE if MMU diff --git a/arch/sh/lib/mcount.S b/arch/sh/lib/mcount.S index 110fbfe..cb87ef5 100644 --- a/arch/sh/lib/mcount.S +++ b/arch/sh/lib/mcount.S @@ -2,7 +2,7 @@ * arch/sh/lib/mcount.S * * Copyright (C) 2008 Paul Mundt - * Copyright (C) 2008 Matt Fleming + * Copyright (C) 2008, 2009 Matt Fleming * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive @@ -35,6 +35,12 @@ .type mcount,@function _mcount: mcount: +#ifndef CONFIG_DYNAMIC_FTRACE + mov.l .Lfunction_trace_stop, r0 + mov.l @r0, r0 + tst r0, r0 + bf ftrace_stub +#endif MCOUNT_ENTER() #ifdef CONFIG_DYNAMIC_FTRACE @@ -62,6 +68,11 @@ skip_trace: #ifdef CONFIG_DYNAMIC_FTRACE .globl ftrace_caller ftrace_caller: + mov.l .Lfunction_trace_stop, r0 + mov.l @r0, r0 + tst r0, r0 + bf ftrace_stub + MCOUNT_ENTER() .globl ftrace_call @@ -88,3 +99,7 @@ ftrace_call: ftrace_stub: rts nop + + .align 2 +.Lfunction_trace_stop: + .long function_trace_stop -- cgit v1.1 From c652d780c9cf7f860141de232b37160fe013feca Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Mon, 6 Jul 2009 20:16:33 +0900 Subject: sh: Add ftrace syscall tracing support Now that I've added TIF_SYSCALL_FTRACE the thread flags do not fit into a single byte any more. Code testing them now needs to be aware of the upper and lower bytes. Signed-off-by: Matt Fleming Signed-off-by: Paul Mundt --- arch/sh/Kconfig | 1 + arch/sh/include/asm/syscall_32.h | 1 + arch/sh/include/asm/thread_info.h | 11 ++++--- arch/sh/kernel/Makefile_32 | 1 + arch/sh/kernel/entry-common.S | 18 ++++++++--- arch/sh/kernel/ftrace.c | 68 +++++++++++++++++++++++++++++++++++++++ arch/sh/kernel/ptrace_32.c | 8 +++++ arch/sh/lib/mcount.S | 2 +- 8 files changed, 100 insertions(+), 10 deletions(-) (limited to 'arch') diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 801a4a7..29e41ec 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -32,6 +32,7 @@ config SUPERH32 select HAVE_FTRACE_MCOUNT_RECORD select HAVE_DYNAMIC_FTRACE select HAVE_FUNCTION_TRACE_MCOUNT_TEST + select HAVE_FTRACE_SYSCALLS select HAVE_ARCH_KGDB select ARCH_HIBERNATION_POSSIBLE if MMU diff --git a/arch/sh/include/asm/syscall_32.h b/arch/sh/include/asm/syscall_32.h index 6f83f2c..7d80df4 100644 --- a/arch/sh/include/asm/syscall_32.h +++ b/arch/sh/include/asm/syscall_32.h @@ -65,6 +65,7 @@ static inline void syscall_get_arguments(struct task_struct *task, case 3: args[2] = regs->regs[6]; case 2: args[1] = regs->regs[5]; case 1: args[0] = regs->regs[4]; + case 0: break; default: BUG(); diff --git a/arch/sh/include/asm/thread_info.h b/arch/sh/include/asm/thread_info.h index f09ac48..499e315 100644 --- a/arch/sh/include/asm/thread_info.h +++ b/arch/sh/include/asm/thread_info.h @@ -97,7 +97,7 @@ static inline struct thread_info *current_thread_info(void) extern struct thread_info *alloc_thread_info(struct task_struct *tsk); extern void free_thread_info(struct thread_info *ti); - + #endif /* THREAD_SHIFT < PAGE_SHIFT */ #endif /* __ASSEMBLY__ */ @@ -116,6 +116,7 @@ extern void free_thread_info(struct thread_info *ti); #define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */ #define TIF_SECCOMP 6 /* secure computing */ #define TIF_NOTIFY_RESUME 7 /* callback before returning to user */ +#define TIF_SYSCALL_FTRACE 8 /* for ftrace syscall instrumentation */ #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */ #define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */ #define TIF_MEMDIE 18 @@ -129,25 +130,27 @@ extern void free_thread_info(struct thread_info *ti); #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) #define _TIF_SECCOMP (1 << TIF_SECCOMP) #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) +#define _TIF_SYSCALL_FTRACE (1 << TIF_SYSCALL_FTRACE) #define _TIF_USEDFPU (1 << TIF_USEDFPU) #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) #define _TIF_FREEZE (1 << TIF_FREEZE) /* - * _TIF_ALLWORK_MASK and _TIF_WORK_MASK need to fit within a byte, or we + * _TIF_ALLWORK_MASK and _TIF_WORK_MASK need to fit within 2 bytes, or we * blow the tst immediate size constraints and need to fix up * arch/sh/kernel/entry-common.S. */ /* work to do in syscall trace */ #define _TIF_WORK_SYSCALL_MASK (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | \ - _TIF_SYSCALL_AUDIT | _TIF_SECCOMP) + _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ + _TIF_SYSCALL_FTRACE) /* work to do on any return to u-space */ #define _TIF_ALLWORK_MASK (_TIF_SYSCALL_TRACE | _TIF_SIGPENDING | \ _TIF_NEED_RESCHED | _TIF_SYSCALL_AUDIT | \ _TIF_SINGLESTEP | _TIF_RESTORE_SIGMASK | \ - _TIF_NOTIFY_RESUME) + _TIF_NOTIFY_RESUME | _TIF_SYSCALL_FTRACE) /* work to do on interrupt/exception return */ #define _TIF_WORK_MASK (_TIF_ALLWORK_MASK & ~(_TIF_SYSCALL_TRACE | \ diff --git a/arch/sh/kernel/Makefile_32 b/arch/sh/kernel/Makefile_32 index 9411e3e..fee924a 100644 --- a/arch/sh/kernel/Makefile_32 +++ b/arch/sh/kernel/Makefile_32 @@ -29,6 +29,7 @@ obj-$(CONFIG_IO_TRAPPED) += io_trapped.o obj-$(CONFIG_KPROBES) += kprobes.o obj-$(CONFIG_GENERIC_GPIO) += gpio.o obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o +obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o obj-$(CONFIG_DUMP_CODE) += disassemble.o obj-$(CONFIG_HIBERNATION) += swsusp.o diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S index d62359c..d621756 100644 --- a/arch/sh/kernel/entry-common.S +++ b/arch/sh/kernel/entry-common.S @@ -131,7 +131,7 @@ ENTRY(resume_userspace) nop #endif mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags - tst #_TIF_WORK_MASK, r0 + tst #(_TIF_WORK_MASK & 0xff), r0 bt/s __restore_all tst #_TIF_NEED_RESCHED, r0 @@ -163,7 +163,7 @@ work_resched: #endif ! mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags - tst #_TIF_WORK_MASK, r0 + tst #(_TIF_WORK_MASK & 0xff), r0 bt __restore_all bra work_pending tst #_TIF_NEED_RESCHED, r0 @@ -181,7 +181,7 @@ work_resched: syscall_exit_work: ! r0: current_thread_info->flags ! r8: current_thread_info - tst #_TIF_WORK_SYSCALL_MASK, r0 + tst #(_TIF_WORK_SYSCALL_MASK & 0xff), r0 bt/s work_pending tst #_TIF_NEED_RESCHED, r0 #ifdef CONFIG_TRACE_IRQFLAGS @@ -331,8 +331,12 @@ ENTRY(system_call) ! get_current_thread_info r8, r10 mov.l @(TI_FLAGS,r8), r8 - mov #_TIF_WORK_SYSCALL_MASK, r10 + mov #(_TIF_WORK_SYSCALL_MASK & 0xff), r10 + mov #(_TIF_WORK_SYSCALL_MASK >> 8), r9 tst r10, r8 + shll8 r9 + bf syscall_trace_entry + tst r9, r8 bf syscall_trace_entry ! mov.l 2f, r8 ! Number of syscalls @@ -359,7 +363,11 @@ syscall_exit: ! get_current_thread_info r8, r0 mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags - tst #_TIF_ALLWORK_MASK, r0 + tst #(_TIF_ALLWORK_MASK & 0xff), r0 + mov #(_TIF_ALLWORK_MASK >> 8), r1 + bf syscall_exit_work + shlr8 r0 + tst r0, r1 bf syscall_exit_work bra __restore_all nop diff --git a/arch/sh/kernel/ftrace.c b/arch/sh/kernel/ftrace.c index 066f37d..4f62ece 100644 --- a/arch/sh/kernel/ftrace.c +++ b/arch/sh/kernel/ftrace.c @@ -18,6 +18,8 @@ #include #include #include +#include +#include static unsigned char ftrace_replaced_code[MCOUNT_INSN_SIZE]; @@ -131,3 +133,69 @@ int __init ftrace_dyn_arch_init(void *data) return 0; } + +#ifdef CONFIG_FTRACE_SYSCALLS + +extern unsigned long __start_syscalls_metadata[]; +extern unsigned long __stop_syscalls_metadata[]; +extern unsigned long *sys_call_table; + +static struct syscall_metadata **syscalls_metadata; + +static struct syscall_metadata *find_syscall_meta(unsigned long *syscall) +{ + struct syscall_metadata *start; + struct syscall_metadata *stop; + char str[KSYM_SYMBOL_LEN]; + + + start = (struct syscall_metadata *)__start_syscalls_metadata; + stop = (struct syscall_metadata *)__stop_syscalls_metadata; + kallsyms_lookup((unsigned long) syscall, NULL, NULL, NULL, str); + + for ( ; start < stop; start++) { + if (start->name && !strcmp(start->name, str)) + return start; + } + + return NULL; +} + +#define FTRACE_SYSCALL_MAX (NR_syscalls - 1) + +struct syscall_metadata *syscall_nr_to_meta(int nr) +{ + if (!syscalls_metadata || nr >= FTRACE_SYSCALL_MAX || nr < 0) + return NULL; + + return syscalls_metadata[nr]; +} + +void arch_init_ftrace_syscalls(void) +{ + int i; + struct syscall_metadata *meta; + unsigned long **psys_syscall_table = &sys_call_table; + static atomic_t refs; + + if (atomic_inc_return(&refs) != 1) + goto end; + + syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) * + FTRACE_SYSCALL_MAX, GFP_KERNEL); + if (!syscalls_metadata) { + WARN_ON(1); + return; + } + + for (i = 0; i < FTRACE_SYSCALL_MAX; i++) { + meta = find_syscall_meta(psys_syscall_table[i]); + syscalls_metadata[i] = meta; + } + return; + + /* Paranoid: avoid overflow */ +end: + atomic_dec(&refs); +} +#endif /* CONFIG_FTRACE_SYSCALLS */ diff --git a/arch/sh/kernel/ptrace_32.c b/arch/sh/kernel/ptrace_32.c index 3392e83..c198ece 100644 --- a/arch/sh/kernel/ptrace_32.c +++ b/arch/sh/kernel/ptrace_32.c @@ -34,6 +34,8 @@ #include #include +#include + /* * This routine will get a word off of the process kernel stack. */ @@ -459,6 +461,9 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs) */ ret = -1L; + if (unlikely(test_thread_flag(TIF_SYSCALL_FTRACE))) + ftrace_syscall_enter(regs); + if (unlikely(current->audit_context)) audit_syscall_entry(audit_arch(), regs->regs[3], regs->regs[4], regs->regs[5], @@ -475,6 +480,9 @@ asmlinkage void do_syscall_trace_leave(struct pt_regs *regs) audit_syscall_exit(AUDITSC_RESULT(regs->regs[0]), regs->regs[0]); + if (unlikely(test_thread_flag(TIF_SYSCALL_FTRACE))) + ftrace_syscall_exit(regs); + step = test_thread_flag(TIF_SINGLESTEP); if (step || test_thread_flag(TIF_SYSCALL_TRACE)) tracehook_report_syscall_exit(regs, step); diff --git a/arch/sh/lib/mcount.S b/arch/sh/lib/mcount.S index cb87ef5..71e87f9 100644 --- a/arch/sh/lib/mcount.S +++ b/arch/sh/lib/mcount.S @@ -72,7 +72,7 @@ ftrace_caller: mov.l @r0, r0 tst r0, r0 bf ftrace_stub - + MCOUNT_ENTER() .globl ftrace_call -- cgit v1.1 From 1dcdd0911b5553f0282ce8525773955b59a56919 Mon Sep 17 00:00:00 2001 From: Michal Simek Date: Thu, 9 Jul 2009 11:27:40 +0900 Subject: microblaze: include EXIT_TEXT to _stext Microblaze wants to throw out EXIT_TEXT during runtime too. This hasn't caused trouble till now because the linker script didn't discard EXIT_TEXT and it ended up in its default output section. As discard definition is about to be unified, include EXIT_TEXT into _stext explicitly and while at it replace explicit exitcall definition to EXIT_CALL. Signed-off-by: Michal Simek Signed-off-by: Tejun Heo --- arch/microblaze/kernel/vmlinux.lds.S | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/microblaze/kernel/vmlinux.lds.S b/arch/microblaze/kernel/vmlinux.lds.S index a207543..81bebdc 100644 --- a/arch/microblaze/kernel/vmlinux.lds.S +++ b/arch/microblaze/kernel/vmlinux.lds.S @@ -23,8 +23,8 @@ SECTIONS { _stext = . ; *(.text .text.*) *(.fixup) - - *(.exitcall.exit) + EXIT_TEXT + EXIT_CALL SCHED_TEXT LOCK_TEXT KPROBES_TEXT -- cgit v1.1 From 023bf6f1b8bf58dc4da7f0dc1cf4787b0d5297c1 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 9 Jul 2009 11:27:40 +0900 Subject: linker script: unify usage of discard definition Discarded sections in different archs share some commonality but have considerable differences. This led to linker script for each arch implementing its own /DISCARD/ definition, which makes maintaining tedious and adding new entries error-prone. This patch makes all linker scripts to move discard definitions to the end of the linker script and use the common DISCARDS macro. As ld uses the first matching section definition, archs can include default discarded sections by including them earlier in the linker script. ia64 is notable because it first throws away some ia64 specific subsections and then include the rest of the sections into the final image, so those sections must be discarded before the inclusion. defconfig compile tested for x86, x86-64, powerpc, powerpc64, ia64, alpha, sparc, sparc64 and s390. Michal Simek tested microblaze. Signed-off-by: Tejun Heo Acked-by: Paul Mundt Acked-by: Mike Frysinger Tested-by: Michal Simek Cc: linux-arch@vger.kernel.org Cc: Michal Simek Cc: microblaze-uclinux@itee.uq.edu.au Cc: Sam Ravnborg Cc: Tony Luck --- arch/alpha/kernel/vmlinux.lds.S | 10 ++-------- arch/avr32/kernel/vmlinux.lds.S | 10 +++------- arch/blackfin/kernel/vmlinux.lds.S | 6 +----- arch/cris/kernel/vmlinux.lds.S | 10 ++-------- arch/frv/kernel/vmlinux.lds.S | 2 +- arch/h8300/kernel/vmlinux.lds.S | 6 ++---- arch/ia64/kernel/vmlinux.lds.S | 17 ++++++++--------- arch/m32r/kernel/vmlinux.lds.S | 11 +++-------- arch/m68k/kernel/vmlinux-std.lds | 11 +++-------- arch/m68k/kernel/vmlinux-sun3.lds | 10 ++-------- arch/m68knommu/kernel/vmlinux.lds.S | 8 +------- arch/microblaze/kernel/vmlinux.lds.S | 2 +- arch/mips/kernel/vmlinux.lds.S | 22 ++++++++++------------ arch/mn10300/kernel/vmlinux.lds.S | 9 +++------ arch/parisc/kernel/vmlinux.lds.S | 9 ++++----- arch/powerpc/kernel/vmlinux.lds.S | 10 +++------- arch/s390/kernel/vmlinux.lds.S | 10 +++------- arch/sh/kernel/vmlinux.lds.S | 11 ++++------- arch/sparc/kernel/vmlinux.lds.S | 9 ++------- arch/um/include/asm/common.lds.S | 5 ----- arch/um/kernel/dyn.lds.S | 2 +- arch/um/kernel/uml.lds.S | 2 +- arch/x86/kernel/vmlinux.lds.S | 11 ++++------- arch/xtensa/kernel/vmlinux.lds.S | 14 ++++---------- 24 files changed, 68 insertions(+), 149 deletions(-) (limited to 'arch') diff --git a/arch/alpha/kernel/vmlinux.lds.S b/arch/alpha/kernel/vmlinux.lds.S index 75fe1d6..6dc03c3 100644 --- a/arch/alpha/kernel/vmlinux.lds.S +++ b/arch/alpha/kernel/vmlinux.lds.S @@ -134,14 +134,6 @@ SECTIONS __bss_stop = .; _end = .; - /* Sections to be discarded */ - /DISCARD/ : { - EXIT_TEXT - EXIT_DATA - *(.exitcall.exit) - *(.discard) - } - .mdebug 0 : { *(.mdebug) } @@ -151,4 +143,6 @@ SECTIONS STABS_DEBUG DWARF_DEBUG + + DISCARDS } diff --git a/arch/avr32/kernel/vmlinux.lds.S b/arch/avr32/kernel/vmlinux.lds.S index b832460..c4b5665 100644 --- a/arch/avr32/kernel/vmlinux.lds.S +++ b/arch/avr32/kernel/vmlinux.lds.S @@ -124,15 +124,11 @@ SECTIONS _end = .; } + DWARF_DEBUG + /* When something in the kernel is NOT compiled as a module, the module * cleanup code and data are put into these segments. Both can then be * thrown away, as cleanup code is never called unless it's a module. */ - /DISCARD/ : { - EXIT_DATA - *(.exitcall.exit) - *(.discard) - } - - DWARF_DEBUG + DISCARDS } diff --git a/arch/blackfin/kernel/vmlinux.lds.S b/arch/blackfin/kernel/vmlinux.lds.S index 6e8eabd..d7ffe29 100644 --- a/arch/blackfin/kernel/vmlinux.lds.S +++ b/arch/blackfin/kernel/vmlinux.lds.S @@ -277,9 +277,5 @@ SECTIONS DWARF_DEBUG - /DISCARD/ : - { - *(.exitcall.exit) - *(.discard) - } + DISCARDS } diff --git a/arch/cris/kernel/vmlinux.lds.S b/arch/cris/kernel/vmlinux.lds.S index a3175eb..6c81836 100644 --- a/arch/cris/kernel/vmlinux.lds.S +++ b/arch/cris/kernel/vmlinux.lds.S @@ -140,13 +140,7 @@ SECTIONS _end = .; __end = .; - /* Sections to be discarded */ - /DISCARD/ : { - EXIT_TEXT - EXIT_DATA - *(.exitcall.exit) - *(.discard) - } - dram_end = dram_start + (CONFIG_ETRAX_DRAM_SIZE - __CONFIG_ETRAX_VMEM_SIZE)*1024*1024; + + DISCARDS } diff --git a/arch/frv/kernel/vmlinux.lds.S b/arch/frv/kernel/vmlinux.lds.S index 64b5a5e..7dbf41f 100644 --- a/arch/frv/kernel/vmlinux.lds.S +++ b/arch/frv/kernel/vmlinux.lds.S @@ -178,7 +178,7 @@ SECTIONS .comment 0 : { *(.comment) } - /DISCARD/ : { *(.discard) } + DISCARDS } __kernel_image_size_no_bss = __bss_start - __kernel_image_start; diff --git a/arch/h8300/kernel/vmlinux.lds.S b/arch/h8300/kernel/vmlinux.lds.S index 03d6c0d..662b02e 100644 --- a/arch/h8300/kernel/vmlinux.lds.S +++ b/arch/h8300/kernel/vmlinux.lds.S @@ -152,10 +152,6 @@ SECTIONS __end = . ; __ramstart = .; } - /DISCARD/ : { - *(.exitcall.exit) - *(.discard) - } .romfs : { *(.romfs*) @@ -166,4 +162,6 @@ SECTIONS COMMAND_START = . - 0x200 ; __ramend = . ; } + + DISCARDS } diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S index 13d9589..eb4214d 100644 --- a/arch/ia64/kernel/vmlinux.lds.S +++ b/arch/ia64/kernel/vmlinux.lds.S @@ -24,15 +24,14 @@ PHDRS { } SECTIONS { - /* Sections to be discarded */ + /* unwind exit sections must be discarded before the rest of the + sections get included. */ /DISCARD/ : { - EXIT_TEXT - EXIT_DATA - *(.exitcall.exit) - *(.discard) *(.IA_64.unwind.exit.text) *(.IA_64.unwind_info.exit.text) - } + *(.comment) + *(.note) + } v = PAGE_OFFSET; /* this symbol is here to make debugging easier... */ phys_start = _start - LOAD_OFFSET; @@ -317,7 +316,7 @@ SECTIONS .debug_funcnames 0 : { *(.debug_funcnames) } .debug_typenames 0 : { *(.debug_typenames) } .debug_varnames 0 : { *(.debug_varnames) } - /* These must appear regardless of . */ - /DISCARD/ : { *(.comment) } - /DISCARD/ : { *(.note) } + + /* Default discards */ + DISCARDS } diff --git a/arch/m32r/kernel/vmlinux.lds.S b/arch/m32r/kernel/vmlinux.lds.S index 480a499..de5e21c 100644 --- a/arch/m32r/kernel/vmlinux.lds.S +++ b/arch/m32r/kernel/vmlinux.lds.S @@ -120,14 +120,6 @@ SECTIONS _end = . ; - /* Sections to be discarded */ - /DISCARD/ : { - EXIT_TEXT - EXIT_DATA - *(.exitcall.exit) - *(.discard) - } - /* Stabs debugging sections. */ .stab 0 : { *(.stab) } .stabstr 0 : { *(.stabstr) } @@ -136,4 +128,7 @@ SECTIONS .stab.index 0 : { *(.stab.index) } .stab.indexstr 0 : { *(.stab.indexstr) } .comment 0 : { *(.comment) } + + /* Sections to be discarded */ + DISCARDS } diff --git a/arch/m68k/kernel/vmlinux-std.lds b/arch/m68k/kernel/vmlinux-std.lds index 905a797..47eac19 100644 --- a/arch/m68k/kernel/vmlinux-std.lds +++ b/arch/m68k/kernel/vmlinux-std.lds @@ -82,14 +82,6 @@ SECTIONS _end = . ; - /* Sections to be discarded */ - /DISCARD/ : { - EXIT_TEXT - EXIT_DATA - *(.exitcall.exit) - *(.discard) - } - /* Stabs debugging sections. */ .stab 0 : { *(.stab) } .stabstr 0 : { *(.stabstr) } @@ -98,4 +90,7 @@ SECTIONS .stab.index 0 : { *(.stab.index) } .stab.indexstr 0 : { *(.stab.indexstr) } .comment 0 : { *(.comment) } + + /* Sections to be discarded */ + DISCARDS } diff --git a/arch/m68k/kernel/vmlinux-sun3.lds b/arch/m68k/kernel/vmlinux-sun3.lds index 47d04be..03efaf0 100644 --- a/arch/m68k/kernel/vmlinux-sun3.lds +++ b/arch/m68k/kernel/vmlinux-sun3.lds @@ -77,14 +77,6 @@ __init_begin = .; _end = . ; - /* Sections to be discarded */ - /DISCARD/ : { - EXIT_TEXT - EXIT_DATA - *(.exitcall.exit) - *(.discard) - } - .crap : { /* Stabs debugging sections. */ *(.stab) @@ -97,4 +89,6 @@ __init_begin = .; *(.note) } + /* Sections to be discarded */ + DISCARDS } diff --git a/arch/m68knommu/kernel/vmlinux.lds.S b/arch/m68knommu/kernel/vmlinux.lds.S index 68111a6..2736a5e 100644 --- a/arch/m68knommu/kernel/vmlinux.lds.S +++ b/arch/m68knommu/kernel/vmlinux.lds.S @@ -184,13 +184,6 @@ SECTIONS { __init_end = .; } > INIT - /DISCARD/ : { - EXIT_TEXT - EXIT_DATA - *(.exitcall.exit) - *(.discard) - } - .bss : { . = ALIGN(4); _sbss = . ; @@ -201,5 +194,6 @@ SECTIONS { _end = . ; } > BSS + DISCARDS } diff --git a/arch/microblaze/kernel/vmlinux.lds.S b/arch/microblaze/kernel/vmlinux.lds.S index 81bebdc..ec5fa91 100644 --- a/arch/microblaze/kernel/vmlinux.lds.S +++ b/arch/microblaze/kernel/vmlinux.lds.S @@ -163,5 +163,5 @@ SECTIONS { . = ALIGN(4096); _end = .; - /DISCARD/ : { *(.discard) } + DISCARDS } diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S index 4590160..1474c18 100644 --- a/arch/mips/kernel/vmlinux.lds.S +++ b/arch/mips/kernel/vmlinux.lds.S @@ -176,18 +176,6 @@ SECTIONS _end = . ; - /* Sections to be discarded */ - /DISCARD/ : { - *(.exitcall.exit) - *(.discard) - - /* ABI crap starts here */ - *(.MIPS.options) - *(.options) - *(.pdr) - *(.reginfo) - } - /* These mark the ABI of the kernel for debuggers. */ .mdebug.abi32 : { KEEP(*(.mdebug.abi32)) @@ -213,4 +201,14 @@ SECTIONS *(.gptab.bss) *(.gptab.sbss) } + + /* Sections to be discarded */ + DISCARDS + /DISCARD/ : { + /* ABI crap starts here */ + *(.MIPS.options) + *(.options) + *(.pdr) + *(.reginfo) + } } diff --git a/arch/mn10300/kernel/vmlinux.lds.S b/arch/mn10300/kernel/vmlinux.lds.S index 5609d49..8fcd0f1 100644 --- a/arch/mn10300/kernel/vmlinux.lds.S +++ b/arch/mn10300/kernel/vmlinux.lds.S @@ -115,13 +115,10 @@ SECTIONS . = ALIGN(PAGE_SIZE); pg0 = .; - /* Sections to be discarded */ - /DISCARD/ : { - EXIT_CALL - *(.discard) - } - STABS_DEBUG DWARF_DEBUG + + /* Sections to be discarded */ + DISCARDS } diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S index ccf5834..aea1784 100644 --- a/arch/parisc/kernel/vmlinux.lds.S +++ b/arch/parisc/kernel/vmlinux.lds.S @@ -237,10 +237,12 @@ SECTIONS /* freed after init ends here */ _end = . ; + STABS_DEBUG + .note 0 : { *(.note) } + /* Sections to be discarded */ + DISCARDS /DISCARD/ : { - *(.exitcall.exit) - *(.discard) #ifdef CONFIG_64BIT /* temporary hack until binutils is fixed to not emit these * for static binaries @@ -253,7 +255,4 @@ SECTIONS *(.gnu.hash) #endif } - - STABS_DEBUG - .note 0 : { *(.note) } } diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index 7fca935..244e365 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -37,13 +37,6 @@ jiffies = jiffies_64 + 4; #endif SECTIONS { - /* Sections to be discarded. */ - /DISCARD/ : { - *(.exitcall.exit) - *(.discard) - EXIT_DATA - } - . = KERNELBASE; /* @@ -299,4 +292,7 @@ SECTIONS . = ALIGN(PAGE_SIZE); _end = . ; PROVIDE32 (end = .); + + /* Sections to be discarded. */ + DISCARDS } diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S index 98867df..82415c7 100644 --- a/arch/s390/kernel/vmlinux.lds.S +++ b/arch/s390/kernel/vmlinux.lds.S @@ -157,14 +157,10 @@ SECTIONS _end = . ; - /* Sections to be discarded */ - /DISCARD/ : { - EXIT_DATA - *(.exitcall.exit) - *(.discard) - } - /* Debugging sections. */ STABS_DEBUG DWARF_DEBUG + + /* Sections to be discarded */ + DISCARDS } diff --git a/arch/sh/kernel/vmlinux.lds.S b/arch/sh/kernel/vmlinux.lds.S index 766976d..0ce254b 100644 --- a/arch/sh/kernel/vmlinux.lds.S +++ b/arch/sh/kernel/vmlinux.lds.S @@ -163,17 +163,14 @@ SECTIONS _end = . ; } + STABS_DEBUG + DWARF_DEBUG + /* * When something in the kernel is NOT compiled as a module, the * module cleanup code and data are put into these segments. Both * can then be thrown away, as cleanup code is never called unless * it's a module. */ - /DISCARD/ : { - *(.exitcall.exit) - *(.discard) - } - - STABS_DEBUG - DWARF_DEBUG + DISCARDS } diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S index d63cf91..866390f 100644 --- a/arch/sparc/kernel/vmlinux.lds.S +++ b/arch/sparc/kernel/vmlinux.lds.S @@ -171,13 +171,8 @@ SECTIONS } _end = . ; - /DISCARD/ : { - EXIT_TEXT - EXIT_DATA - *(.exitcall.exit) - *(.discard) - } - STABS_DEBUG DWARF_DEBUG + + DISCARDS } diff --git a/arch/um/include/asm/common.lds.S b/arch/um/include/asm/common.lds.S index cb02486..37ecc55 100644 --- a/arch/um/include/asm/common.lds.S +++ b/arch/um/include/asm/common.lds.S @@ -123,8 +123,3 @@ __initramfs_end = .; } - /* Sections to be discarded */ - /DISCARD/ : { - *(.exitcall.exit) - } - diff --git a/arch/um/kernel/dyn.lds.S b/arch/um/kernel/dyn.lds.S index 2916d6e..715a188 100644 --- a/arch/um/kernel/dyn.lds.S +++ b/arch/um/kernel/dyn.lds.S @@ -157,5 +157,5 @@ SECTIONS DWARF_DEBUG - /DISCARD/ : { *(.discard) } + DISCARDS } diff --git a/arch/um/kernel/uml.lds.S b/arch/um/kernel/uml.lds.S index 1f8a622..2ebd397 100644 --- a/arch/um/kernel/uml.lds.S +++ b/arch/um/kernel/uml.lds.S @@ -101,5 +101,5 @@ SECTIONS DWARF_DEBUG - /DISCARD/ : { *(.discard) } + DISCARDS } diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index 367e878..b600c84 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -387,15 +387,12 @@ SECTIONS _end = .; } - /* Sections to be discarded */ - /DISCARD/ : { - *(.exitcall.exit) - *(.eh_frame) - *(.discard) - } - STABS_DEBUG DWARF_DEBUG + + /* Sections to be discarded */ + DISCARDS + /DISCARD/ : { *(.eh_frame) } } diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S index b1e2463..921b6ff 100644 --- a/arch/xtensa/kernel/vmlinux.lds.S +++ b/arch/xtensa/kernel/vmlinux.lds.S @@ -280,16 +280,6 @@ SECTIONS *(.ResetVector.text) } - /* Sections to be discarded */ - /DISCARD/ : - { - *(.exit.literal) - EXIT_TEXT - EXIT_DATA - *(.exitcall.exit) - *(.discard) - } - .xt.lit : { *(.xt.lit) } .xt.prop : { *(.xt.prop) } @@ -322,4 +312,8 @@ SECTIONS *(.xt.lit) *(.gnu.linkonce.p*) } + + /* Sections to be discarded */ + DISCARDS + /DISCARD/ : { *(.exit.literal) } } -- cgit v1.1 From c31d96338a6041520ba5f1b6a4a5012ef00686b3 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Thu, 9 Jul 2009 00:31:37 +0200 Subject: x86: mce: Make CONFIG_X86_ANCIENT_MCE dependent on CONFIG_X86_MCE Add a missing depency for ANCIENT_MCE. It didn't matter in practice because the ANCIENT code wasn't compiled without X86_MCE, but it's better to express that clearly in Kconfig. Signed-off-by: Andi Kleen Signed-off-by: H. Peter Anvin --- arch/x86/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 356d2ec..5962b87 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -823,7 +823,7 @@ config X86_MCE_AMD config X86_ANCIENT_MCE def_bool n - depends on X86_32 + depends on X86_32 && X86_MCE prompt "Support for old Pentium 5 / WinChip machine checks" ---help--- Include support for machine check handling on old Pentium 5 or WinChip -- cgit v1.1 From bab9bc6583fe6c1660d6ed36dd14bbb4edfaf393 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Thu, 9 Jul 2009 00:31:38 +0200 Subject: x86: mce: Update X86_MCE description in x86/Kconfig - Clarify that this config controls thermal throttling reporting too - Clarify the types of errors reported by machine checks - Drop references to ancient CPUs. Signed-off-by: Andi Kleen Signed-off-by: H. Peter Anvin --- arch/x86/Kconfig | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) (limited to 'arch') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 5962b87..134a8c0 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -774,20 +774,12 @@ config X86_REROUTE_FOR_BROKEN_BOOT_IRQS increased on these systems. config X86_MCE - bool "Machine Check Exception" + bool "Machine Check / overheating reporting" ---help--- - Machine Check Exception support allows the processor to notify the - kernel if it detects a problem (e.g. overheating, component failure). + Machine Check support allows the processor to notify the + kernel if it detects a problem (e.g. overheating, data corruption). The action the kernel takes depends on the severity of the problem, - ranging from a warning message on the console, to halting the machine. - Your processor must be a Pentium or newer to support this - check the - flags in /proc/cpuinfo for mce. Note that some older Pentium systems - have a design flaw which leads to false MCE events - hence MCE is - disabled on all P5 processors, unless explicitly enabled with "mce" - as a boot argument. Similarly, if MCE is built in and creates a - problem on some new non-standard machine, you can boot with "nomce" - to disable it. MCE support simply ignores non-MCE processors like - the 386 and 486, so nearly everyone can say Y here. + ranging from warning messages to halting the machine. config X86_OLD_MCE depends on X86_32 && X86_MCE -- cgit v1.1 From 5bb38adcb54cf7192b154368ad62982caa11ca0b Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Thu, 9 Jul 2009 00:31:39 +0200 Subject: x86: mce: Remove old i386 machine check code As announced in feature-remove-schedule.txt remove CONFIG_X86_OLD_MCE This patch only removes code. The ancient machine check code for very old systems that are not supported by CONFIG_X86_NEW_MCE is still kept. Signed-off-by: Andi Kleen Signed-off-by: H. Peter Anvin --- arch/x86/Kconfig | 35 +------ arch/x86/include/asm/mce.h | 11 --- arch/x86/kernel/cpu/mcheck/Makefile | 2 - arch/x86/kernel/cpu/mcheck/k7.c | 116 ----------------------- arch/x86/kernel/cpu/mcheck/mce.c | 47 ---------- arch/x86/kernel/cpu/mcheck/non-fatal.c | 94 ------------------- arch/x86/kernel/cpu/mcheck/p4.c | 163 --------------------------------- arch/x86/kernel/cpu/mcheck/p6.c | 127 ------------------------- 8 files changed, 2 insertions(+), 593 deletions(-) delete mode 100644 arch/x86/kernel/cpu/mcheck/k7.c delete mode 100644 arch/x86/kernel/cpu/mcheck/non-fatal.c delete mode 100644 arch/x86/kernel/cpu/mcheck/p4.c delete mode 100644 arch/x86/kernel/cpu/mcheck/p6.c (limited to 'arch') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 134a8c0..d986769 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -781,21 +781,10 @@ config X86_MCE The action the kernel takes depends on the severity of the problem, ranging from warning messages to halting the machine. -config X86_OLD_MCE - depends on X86_32 && X86_MCE - bool "Use legacy machine check code (will go away)" - default n - select X86_ANCIENT_MCE - ---help--- - Use the old i386 machine check code. This is merely intended for - testing in a transition period. Try this if you run into any machine - check related software problems, but report the problem to - linux-kernel. When in doubt say no. - config X86_NEW_MCE depends on X86_MCE bool - default y if (!X86_OLD_MCE && X86_32) || X86_64 + default y config X86_MCE_INTEL def_bool y @@ -835,29 +824,9 @@ config X86_MCE_INJECT If you don't know what a machine check is and you don't do kernel QA it is safe to say n. -config X86_MCE_NONFATAL - tristate "Check for non-fatal errors on AMD Athlon/Duron / Intel Pentium 4" - depends on X86_OLD_MCE - ---help--- - Enabling this feature starts a timer that triggers every 5 seconds which - will look at the machine check registers to see if anything happened. - Non-fatal problems automatically get corrected (but still logged). - Disable this if you don't want to see these messages. - Seeing the messages this option prints out may be indicative of dying - or out-of-spec (ie, overclocked) hardware. - This option only does something on certain CPUs. - (AMD Athlon/Duron and Intel Pentium 4) - -config X86_MCE_P4THERMAL - bool "check for P4 thermal throttling interrupt." - depends on X86_OLD_MCE && X86_MCE && (X86_UP_APIC || SMP) - ---help--- - Enabling this feature will cause a message to be printed when the P4 - enters thermal throttling. - config X86_THERMAL_VECTOR def_bool y - depends on X86_MCE_P4THERMAL || X86_MCE_INTEL + depends on X86_MCE_INTEL config VM86 bool "Enable VM86 support" if EMBEDDED diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index b50b9e9..6b8a974 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h @@ -115,13 +115,6 @@ void mcheck_init(struct cpuinfo_x86 *c); static inline void mcheck_init(struct cpuinfo_x86 *c) {} #endif -#ifdef CONFIG_X86_OLD_MCE -extern int nr_mce_banks; -void amd_mcheck_init(struct cpuinfo_x86 *c); -void intel_p4_mcheck_init(struct cpuinfo_x86 *c); -void intel_p6_mcheck_init(struct cpuinfo_x86 *c); -#endif - #ifdef CONFIG_X86_ANCIENT_MCE void intel_p5_mcheck_init(struct cpuinfo_x86 *c); void winchip_mcheck_init(struct cpuinfo_x86 *c); @@ -208,11 +201,7 @@ extern void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu); void intel_init_thermal(struct cpuinfo_x86 *c); -#ifdef CONFIG_X86_NEW_MCE void mce_log_therm_throt_event(__u64 status); -#else -static inline void mce_log_therm_throt_event(__u64 status) {} -#endif #endif /* __KERNEL__ */ #endif /* _ASM_X86_MCE_H */ diff --git a/arch/x86/kernel/cpu/mcheck/Makefile b/arch/x86/kernel/cpu/mcheck/Makefile index 188a1ca..022a036 100644 --- a/arch/x86/kernel/cpu/mcheck/Makefile +++ b/arch/x86/kernel/cpu/mcheck/Makefile @@ -1,11 +1,9 @@ obj-y = mce.o obj-$(CONFIG_X86_NEW_MCE) += mce-severity.o -obj-$(CONFIG_X86_OLD_MCE) += k7.o p4.o p6.o obj-$(CONFIG_X86_ANCIENT_MCE) += winchip.o p5.o obj-$(CONFIG_X86_MCE_INTEL) += mce_intel.o obj-$(CONFIG_X86_MCE_AMD) += mce_amd.o -obj-$(CONFIG_X86_MCE_NONFATAL) += non-fatal.o obj-$(CONFIG_X86_MCE_THRESHOLD) += threshold.o obj-$(CONFIG_X86_MCE_INJECT) += mce-inject.o diff --git a/arch/x86/kernel/cpu/mcheck/k7.c b/arch/x86/kernel/cpu/mcheck/k7.c deleted file mode 100644 index b945d5d..0000000 --- a/arch/x86/kernel/cpu/mcheck/k7.c +++ /dev/null @@ -1,116 +0,0 @@ -/* - * Athlon specific Machine Check Exception Reporting - * (C) Copyright 2002 Dave Jones - */ -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -/* Machine Check Handler For AMD Athlon/Duron: */ -static void k7_machine_check(struct pt_regs *regs, long error_code) -{ - u32 alow, ahigh, high, low; - u32 mcgstl, mcgsth; - int recover = 1; - int i; - - rdmsr(MSR_IA32_MCG_STATUS, mcgstl, mcgsth); - if (mcgstl & (1<<0)) /* Recoverable ? */ - recover = 0; - - printk(KERN_EMERG "CPU %d: Machine Check Exception: %08x%08x\n", - smp_processor_id(), mcgsth, mcgstl); - - for (i = 1; i < nr_mce_banks; i++) { - rdmsr(MSR_IA32_MC0_STATUS+i*4, low, high); - if (high & (1<<31)) { - char misc[20]; - char addr[24]; - - misc[0] = '\0'; - addr[0] = '\0'; - - if (high & (1<<29)) - recover |= 1; - if (high & (1<<25)) - recover |= 2; - high &= ~(1<<31); - - if (high & (1<<27)) { - rdmsr(MSR_IA32_MC0_MISC+i*4, alow, ahigh); - snprintf(misc, 20, "[%08x%08x]", ahigh, alow); - } - if (high & (1<<26)) { - rdmsr(MSR_IA32_MC0_ADDR+i*4, alow, ahigh); - snprintf(addr, 24, " at %08x%08x", ahigh, alow); - } - - printk(KERN_EMERG "CPU %d: Bank %d: %08x%08x%s%s\n", - smp_processor_id(), i, high, low, misc, addr); - - /* Clear it: */ - wrmsr(MSR_IA32_MC0_STATUS+i*4, 0UL, 0UL); - /* Serialize: */ - wmb(); - add_taint(TAINT_MACHINE_CHECK); - } - } - - if (recover & 2) - panic("CPU context corrupt"); - if (recover & 1) - panic("Unable to continue"); - - printk(KERN_EMERG "Attempting to continue.\n"); - - mcgstl &= ~(1<<2); - wrmsr(MSR_IA32_MCG_STATUS, mcgstl, mcgsth); -} - - -/* AMD K7 machine check is Intel like: */ -void amd_mcheck_init(struct cpuinfo_x86 *c) -{ - u32 l, h; - int i; - - if (!cpu_has(c, X86_FEATURE_MCE)) - return; - - machine_check_vector = k7_machine_check; - /* Make sure the vector pointer is visible before we enable MCEs: */ - wmb(); - - printk(KERN_INFO "Intel machine check architecture supported.\n"); - - rdmsr(MSR_IA32_MCG_CAP, l, h); - if (l & (1<<8)) /* Control register present ? */ - wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff); - nr_mce_banks = l & 0xff; - - /* - * Clear status for MC index 0 separately, we don't touch CTL, - * as some K7 Athlons cause spurious MCEs when its enabled: - */ - if (boot_cpu_data.x86 == 6) { - wrmsr(MSR_IA32_MC0_STATUS, 0x0, 0x0); - i = 1; - } else - i = 0; - - for (; i < nr_mce_banks; i++) { - wrmsr(MSR_IA32_MC0_CTL+4*i, 0xffffffff, 0xffffffff); - wrmsr(MSR_IA32_MC0_STATUS+4*i, 0x0, 0x0); - } - - set_in_cr4(X86_CR4_MCE); - printk(KERN_INFO "Intel machine check reporting enabled on CPU#%d.\n", - smp_processor_id()); -} diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 7da8fec..5ff6362 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -58,8 +58,6 @@ void (*machine_check_vector)(struct pt_regs *, long error_code) = int mce_disabled __read_mostly; -#ifdef CONFIG_X86_NEW_MCE - #define MISC_MCELOG_MINOR 227 #define SPINUNIT 100 /* 100ns */ @@ -1993,51 +1991,6 @@ static __init int mce_init_device(void) device_initcall(mce_init_device); -#else /* CONFIG_X86_OLD_MCE: */ - -int nr_mce_banks; -EXPORT_SYMBOL_GPL(nr_mce_banks); /* non-fatal.o */ - -/* This has to be run for each processor */ -void mcheck_init(struct cpuinfo_x86 *c) -{ - if (mce_disabled) - return; - - switch (c->x86_vendor) { - case X86_VENDOR_AMD: - amd_mcheck_init(c); - break; - - case X86_VENDOR_INTEL: - if (c->x86 == 5) - intel_p5_mcheck_init(c); - if (c->x86 == 6) - intel_p6_mcheck_init(c); - if (c->x86 == 15) - intel_p4_mcheck_init(c); - break; - - case X86_VENDOR_CENTAUR: - if (c->x86 == 5) - winchip_mcheck_init(c); - break; - - default: - break; - } - printk(KERN_INFO "mce: CPU supports %d MCE banks\n", nr_mce_banks); -} - -static int __init mcheck_enable(char *str) -{ - mce_p5_enabled = 1; - return 1; -} -__setup("mce", mcheck_enable); - -#endif /* CONFIG_X86_OLD_MCE */ - /* * Old style boot options parsing. Only for compatibility. */ diff --git a/arch/x86/kernel/cpu/mcheck/non-fatal.c b/arch/x86/kernel/cpu/mcheck/non-fatal.c deleted file mode 100644 index f5f2d6f..0000000 --- a/arch/x86/kernel/cpu/mcheck/non-fatal.c +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Non Fatal Machine Check Exception Reporting - * - * (C) Copyright 2002 Dave Jones. - * - * This file contains routines to check for non-fatal MCEs every 15s - * - */ -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -static int firstbank; - -#define MCE_RATE (15*HZ) /* timer rate is 15s */ - -static void mce_checkregs(void *info) -{ - u32 low, high; - int i; - - for (i = firstbank; i < nr_mce_banks; i++) { - rdmsr(MSR_IA32_MC0_STATUS+i*4, low, high); - - if (!(high & (1<<31))) - continue; - - printk(KERN_INFO "MCE: The hardware reports a non fatal, " - "correctable incident occurred on CPU %d.\n", - smp_processor_id()); - - printk(KERN_INFO "Bank %d: %08x%08x\n", i, high, low); - - /* - * Scrub the error so we don't pick it up in MCE_RATE - * seconds time: - */ - wrmsr(MSR_IA32_MC0_STATUS+i*4, 0UL, 0UL); - - /* Serialize: */ - wmb(); - add_taint(TAINT_MACHINE_CHECK); - } -} - -static void mce_work_fn(struct work_struct *work); -static DECLARE_DELAYED_WORK(mce_work, mce_work_fn); - -static void mce_work_fn(struct work_struct *work) -{ - on_each_cpu(mce_checkregs, NULL, 1); - schedule_delayed_work(&mce_work, round_jiffies_relative(MCE_RATE)); -} - -static int __init init_nonfatal_mce_checker(void) -{ - struct cpuinfo_x86 *c = &boot_cpu_data; - - /* Check for MCE support */ - if (!cpu_has(c, X86_FEATURE_MCE)) - return -ENODEV; - - /* Check for PPro style MCA */ - if (!cpu_has(c, X86_FEATURE_MCA)) - return -ENODEV; - - /* Some Athlons misbehave when we frob bank 0 */ - if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD && - boot_cpu_data.x86 == 6) - firstbank = 1; - else - firstbank = 0; - - /* - * Check for non-fatal errors every MCE_RATE s - */ - schedule_delayed_work(&mce_work, round_jiffies_relative(MCE_RATE)); - printk(KERN_INFO "Machine check exception polling timer started.\n"); - - return 0; -} -module_init(init_nonfatal_mce_checker); - -MODULE_LICENSE("GPL"); diff --git a/arch/x86/kernel/cpu/mcheck/p4.c b/arch/x86/kernel/cpu/mcheck/p4.c deleted file mode 100644 index 4482aea..0000000 --- a/arch/x86/kernel/cpu/mcheck/p4.c +++ /dev/null @@ -1,163 +0,0 @@ -/* - * P4 specific Machine Check Exception Reporting - */ -#include -#include -#include -#include - -#include -#include -#include - -/* as supported by the P4/Xeon family */ -struct intel_mce_extended_msrs { - u32 eax; - u32 ebx; - u32 ecx; - u32 edx; - u32 esi; - u32 edi; - u32 ebp; - u32 esp; - u32 eflags; - u32 eip; - /* u32 *reserved[]; */ -}; - -static int mce_num_extended_msrs; - -/* P4/Xeon Extended MCE MSR retrieval, return 0 if unsupported */ -static void intel_get_extended_msrs(struct intel_mce_extended_msrs *r) -{ - u32 h; - - rdmsr(MSR_IA32_MCG_EAX, r->eax, h); - rdmsr(MSR_IA32_MCG_EBX, r->ebx, h); - rdmsr(MSR_IA32_MCG_ECX, r->ecx, h); - rdmsr(MSR_IA32_MCG_EDX, r->edx, h); - rdmsr(MSR_IA32_MCG_ESI, r->esi, h); - rdmsr(MSR_IA32_MCG_EDI, r->edi, h); - rdmsr(MSR_IA32_MCG_EBP, r->ebp, h); - rdmsr(MSR_IA32_MCG_ESP, r->esp, h); - rdmsr(MSR_IA32_MCG_EFLAGS, r->eflags, h); - rdmsr(MSR_IA32_MCG_EIP, r->eip, h); -} - -static void intel_machine_check(struct pt_regs *regs, long error_code) -{ - u32 alow, ahigh, high, low; - u32 mcgstl, mcgsth; - int recover = 1; - int i; - - rdmsr(MSR_IA32_MCG_STATUS, mcgstl, mcgsth); - if (mcgstl & (1<<0)) /* Recoverable ? */ - recover = 0; - - printk(KERN_EMERG "CPU %d: Machine Check Exception: %08x%08x\n", - smp_processor_id(), mcgsth, mcgstl); - - if (mce_num_extended_msrs > 0) { - struct intel_mce_extended_msrs dbg; - - intel_get_extended_msrs(&dbg); - - printk(KERN_DEBUG "CPU %d: EIP: %08x EFLAGS: %08x\n" - "\teax: %08x ebx: %08x ecx: %08x edx: %08x\n" - "\tesi: %08x edi: %08x ebp: %08x esp: %08x\n", - smp_processor_id(), dbg.eip, dbg.eflags, - dbg.eax, dbg.ebx, dbg.ecx, dbg.edx, - dbg.esi, dbg.edi, dbg.ebp, dbg.esp); - } - - for (i = 0; i < nr_mce_banks; i++) { - rdmsr(MSR_IA32_MC0_STATUS+i*4, low, high); - if (high & (1<<31)) { - char misc[20]; - char addr[24]; - - misc[0] = addr[0] = '\0'; - if (high & (1<<29)) - recover |= 1; - if (high & (1<<25)) - recover |= 2; - high &= ~(1<<31); - if (high & (1<<27)) { - rdmsr(MSR_IA32_MC0_MISC+i*4, alow, ahigh); - snprintf(misc, 20, "[%08x%08x]", ahigh, alow); - } - if (high & (1<<26)) { - rdmsr(MSR_IA32_MC0_ADDR+i*4, alow, ahigh); - snprintf(addr, 24, " at %08x%08x", ahigh, alow); - } - printk(KERN_EMERG "CPU %d: Bank %d: %08x%08x%s%s\n", - smp_processor_id(), i, high, low, misc, addr); - } - } - - if (recover & 2) - panic("CPU context corrupt"); - if (recover & 1) - panic("Unable to continue"); - - printk(KERN_EMERG "Attempting to continue.\n"); - - /* - * Do not clear the MSR_IA32_MCi_STATUS if the error is not - * recoverable/continuable.This will allow BIOS to look at the MSRs - * for errors if the OS could not log the error. - */ - for (i = 0; i < nr_mce_banks; i++) { - u32 msr; - msr = MSR_IA32_MC0_STATUS+i*4; - rdmsr(msr, low, high); - if (high&(1<<31)) { - /* Clear it */ - wrmsr(msr, 0UL, 0UL); - /* Serialize */ - wmb(); - add_taint(TAINT_MACHINE_CHECK); - } - } - mcgstl &= ~(1<<2); - wrmsr(MSR_IA32_MCG_STATUS, mcgstl, mcgsth); -} - -void intel_p4_mcheck_init(struct cpuinfo_x86 *c) -{ - u32 l, h; - int i; - - machine_check_vector = intel_machine_check; - wmb(); - - printk(KERN_INFO "Intel machine check architecture supported.\n"); - rdmsr(MSR_IA32_MCG_CAP, l, h); - if (l & (1<<8)) /* Control register present ? */ - wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff); - nr_mce_banks = l & 0xff; - - for (i = 0; i < nr_mce_banks; i++) { - wrmsr(MSR_IA32_MC0_CTL+4*i, 0xffffffff, 0xffffffff); - wrmsr(MSR_IA32_MC0_STATUS+4*i, 0x0, 0x0); - } - - set_in_cr4(X86_CR4_MCE); - printk(KERN_INFO "Intel machine check reporting enabled on CPU#%d.\n", - smp_processor_id()); - - /* Check for P4/Xeon extended MCE MSRs */ - rdmsr(MSR_IA32_MCG_CAP, l, h); - if (l & (1<<9)) {/* MCG_EXT_P */ - mce_num_extended_msrs = (l >> 16) & 0xff; - printk(KERN_INFO "CPU%d: Intel P4/Xeon Extended MCE MSRs (%d)" - " available\n", - smp_processor_id(), mce_num_extended_msrs); - -#ifdef CONFIG_X86_MCE_P4THERMAL - /* Check for P4/Xeon Thermal monitor */ - intel_init_thermal(c); -#endif - } -} diff --git a/arch/x86/kernel/cpu/mcheck/p6.c b/arch/x86/kernel/cpu/mcheck/p6.c deleted file mode 100644 index 01e4f81..0000000 --- a/arch/x86/kernel/cpu/mcheck/p6.c +++ /dev/null @@ -1,127 +0,0 @@ -/* - * P6 specific Machine Check Exception Reporting - * (C) Copyright 2002 Alan Cox - */ -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -/* Machine Check Handler For PII/PIII */ -static void intel_machine_check(struct pt_regs *regs, long error_code) -{ - u32 alow, ahigh, high, low; - u32 mcgstl, mcgsth; - int recover = 1; - int i; - - rdmsr(MSR_IA32_MCG_STATUS, mcgstl, mcgsth); - if (mcgstl & (1<<0)) /* Recoverable ? */ - recover = 0; - - printk(KERN_EMERG "CPU %d: Machine Check Exception: %08x%08x\n", - smp_processor_id(), mcgsth, mcgstl); - - for (i = 0; i < nr_mce_banks; i++) { - rdmsr(MSR_IA32_MC0_STATUS+i*4, low, high); - if (high & (1<<31)) { - char misc[20]; - char addr[24]; - - misc[0] = '\0'; - addr[0] = '\0'; - - if (high & (1<<29)) - recover |= 1; - if (high & (1<<25)) - recover |= 2; - high &= ~(1<<31); - - if (high & (1<<27)) { - rdmsr(MSR_IA32_MC0_MISC+i*4, alow, ahigh); - snprintf(misc, 20, "[%08x%08x]", ahigh, alow); - } - if (high & (1<<26)) { - rdmsr(MSR_IA32_MC0_ADDR+i*4, alow, ahigh); - snprintf(addr, 24, " at %08x%08x", ahigh, alow); - } - - printk(KERN_EMERG "CPU %d: Bank %d: %08x%08x%s%s\n", - smp_processor_id(), i, high, low, misc, addr); - } - } - - if (recover & 2) - panic("CPU context corrupt"); - if (recover & 1) - panic("Unable to continue"); - - printk(KERN_EMERG "Attempting to continue.\n"); - /* - * Do not clear the MSR_IA32_MCi_STATUS if the error is not - * recoverable/continuable.This will allow BIOS to look at the MSRs - * for errors if the OS could not log the error: - */ - for (i = 0; i < nr_mce_banks; i++) { - unsigned int msr; - - msr = MSR_IA32_MC0_STATUS+i*4; - rdmsr(msr, low, high); - if (high & (1<<31)) { - /* Clear it: */ - wrmsr(msr, 0UL, 0UL); - /* Serialize: */ - wmb(); - add_taint(TAINT_MACHINE_CHECK); - } - } - mcgstl &= ~(1<<2); - wrmsr(MSR_IA32_MCG_STATUS, mcgstl, mcgsth); -} - -/* Set up machine check reporting for processors with Intel style MCE: */ -void intel_p6_mcheck_init(struct cpuinfo_x86 *c) -{ - u32 l, h; - int i; - - /* Check for MCE support */ - if (!cpu_has(c, X86_FEATURE_MCE)) - return; - - /* Check for PPro style MCA */ - if (!cpu_has(c, X86_FEATURE_MCA)) - return; - - /* Ok machine check is available */ - machine_check_vector = intel_machine_check; - /* Make sure the vector pointer is visible before we enable MCEs: */ - wmb(); - - printk(KERN_INFO "Intel machine check architecture supported.\n"); - rdmsr(MSR_IA32_MCG_CAP, l, h); - if (l & (1<<8)) /* Control register present ? */ - wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff); - nr_mce_banks = l & 0xff; - - /* - * Following the example in IA-32 SDM Vol 3: - * - MC0_CTL should not be written - * - Status registers on all banks should be cleared on reset - */ - for (i = 1; i < nr_mce_banks; i++) - wrmsr(MSR_IA32_MC0_CTL+4*i, 0xffffffff, 0xffffffff); - - for (i = 0; i < nr_mce_banks; i++) - wrmsr(MSR_IA32_MC0_STATUS+4*i, 0x0, 0x0); - - set_in_cr4(X86_CR4_MCE); - printk(KERN_INFO "Intel machine check reporting enabled on CPU#%d.\n", - smp_processor_id()); -} -- cgit v1.1 From c1ebf835617035b1f08f734247dcb981e17aac6b Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Thu, 9 Jul 2009 00:31:41 +0200 Subject: x86: mce: Rename CONFIG_X86_NEW_MCE to CONFIG_X86_MCE Drop the CONFIG_X86_NEW_MCE symbol and change all references to it to check for CONFIG_X86_MCE directly. No code changes Signed-off-by: Andi Kleen Signed-off-by: H. Peter Anvin --- arch/x86/Kconfig | 11 +++-------- arch/x86/include/asm/entry_arch.h | 2 +- arch/x86/kernel/apic/nmi.c | 2 +- arch/x86/kernel/cpu/mcheck/Makefile | 3 +-- arch/x86/kernel/irq.c | 4 ++-- arch/x86/kernel/irqinit.c | 2 +- arch/x86/kernel/signal.c | 2 +- 7 files changed, 10 insertions(+), 16 deletions(-) (limited to 'arch') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index d986769..06880ca 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -781,15 +781,10 @@ config X86_MCE The action the kernel takes depends on the severity of the problem, ranging from warning messages to halting the machine. -config X86_NEW_MCE - depends on X86_MCE - bool - default y - config X86_MCE_INTEL def_bool y prompt "Intel MCE features" - depends on X86_NEW_MCE && X86_LOCAL_APIC + depends on X86_MCE && X86_LOCAL_APIC ---help--- Additional support for intel specific MCE features such as the thermal monitor. @@ -797,7 +792,7 @@ config X86_MCE_INTEL config X86_MCE_AMD def_bool y prompt "AMD MCE features" - depends on X86_NEW_MCE && X86_LOCAL_APIC + depends on X86_MCE && X86_LOCAL_APIC ---help--- Additional support for AMD specific MCE features such as the DRAM Error Threshold. @@ -817,7 +812,7 @@ config X86_MCE_THRESHOLD default y config X86_MCE_INJECT - depends on X86_NEW_MCE + depends on X86_MCE tristate "Machine check injector support" ---help--- Provide support for injecting machine checks for testing purposes. diff --git a/arch/x86/include/asm/entry_arch.h b/arch/x86/include/asm/entry_arch.h index ff8cbfa..5e3f204 100644 --- a/arch/x86/include/asm/entry_arch.h +++ b/arch/x86/include/asm/entry_arch.h @@ -61,7 +61,7 @@ BUILD_INTERRUPT(thermal_interrupt,THERMAL_APIC_VECTOR) BUILD_INTERRUPT(threshold_interrupt,THRESHOLD_APIC_VECTOR) #endif -#ifdef CONFIG_X86_NEW_MCE +#ifdef CONFIG_X86_MCE BUILD_INTERRUPT(mce_self_interrupt,MCE_SELF_VECTOR) #endif diff --git a/arch/x86/kernel/apic/nmi.c b/arch/x86/kernel/apic/nmi.c index b3025b4..f422728 100644 --- a/arch/x86/kernel/apic/nmi.c +++ b/arch/x86/kernel/apic/nmi.c @@ -66,7 +66,7 @@ static inline unsigned int get_nmi_count(int cpu) static inline int mce_in_progress(void) { -#if defined(CONFIG_X86_NEW_MCE) +#if defined(CONFIG_X86_MCE) return atomic_read(&mce_entry) > 0; #endif return 0; diff --git a/arch/x86/kernel/cpu/mcheck/Makefile b/arch/x86/kernel/cpu/mcheck/Makefile index 022a036..4ac6d48 100644 --- a/arch/x86/kernel/cpu/mcheck/Makefile +++ b/arch/x86/kernel/cpu/mcheck/Makefile @@ -1,6 +1,5 @@ -obj-y = mce.o +obj-y = mce.o mce-severity.o -obj-$(CONFIG_X86_NEW_MCE) += mce-severity.o obj-$(CONFIG_X86_ANCIENT_MCE) += winchip.o p5.o obj-$(CONFIG_X86_MCE_INTEL) += mce_intel.o obj-$(CONFIG_X86_MCE_AMD) += mce_amd.o diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index b0cdde6..74656d1 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c @@ -104,7 +104,7 @@ static int show_other_interrupts(struct seq_file *p, int prec) seq_printf(p, " Threshold APIC interrupts\n"); # endif #endif -#ifdef CONFIG_X86_NEW_MCE +#ifdef CONFIG_X86_MCE seq_printf(p, "%*s: ", prec, "MCE"); for_each_online_cpu(j) seq_printf(p, "%10u ", per_cpu(mce_exception_count, j)); @@ -200,7 +200,7 @@ u64 arch_irq_stat_cpu(unsigned int cpu) sum += irq_stats(cpu)->irq_threshold_count; # endif #endif -#ifdef CONFIG_X86_NEW_MCE +#ifdef CONFIG_X86_MCE sum += per_cpu(mce_exception_count, cpu); sum += per_cpu(mce_poll_count, cpu); #endif diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c index 696f0e4..8a194ad 100644 --- a/arch/x86/kernel/irqinit.c +++ b/arch/x86/kernel/irqinit.c @@ -190,7 +190,7 @@ static void __init apic_intr_init(void) #ifdef CONFIG_X86_THRESHOLD alloc_intr_gate(THRESHOLD_APIC_VECTOR, threshold_interrupt); #endif -#if defined(CONFIG_X86_NEW_MCE) && defined(CONFIG_X86_LOCAL_APIC) +#if defined(CONFIG_X86_MCE) && defined(CONFIG_X86_LOCAL_APIC) alloc_intr_gate(MCE_SELF_VECTOR, mce_self_interrupt); #endif diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index 4c57875..cc26ad4c 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c @@ -856,7 +856,7 @@ static void do_signal(struct pt_regs *regs) void do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags) { -#ifdef CONFIG_X86_NEW_MCE +#ifdef CONFIG_X86_MCE /* notify userspace of pending MCEs */ if (thread_info_flags & _TIF_MCE_NOTIFY) mce_notify_process(); -- cgit v1.1 From 9eda8cb3ac235217e4ffa01cb9cedee1c1550599 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Thu, 9 Jul 2009 00:31:42 +0200 Subject: x86: mce: Move code in mce.c Now that the X86_OLD_MCE ifdefs are gone move some code that used to be outside the big ifdef to a more natural place near its user. No code change. Signed-off-by: Andi Kleen Signed-off-by: H. Peter Anvin --- arch/x86/kernel/cpu/mcheck/mce.c | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 5ff6362..e16271f 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -45,17 +45,6 @@ #include "mce-internal.h" -/* Handle unconfigured int18 (should never happen) */ -static void unexpected_machine_check(struct pt_regs *regs, long error_code) -{ - printk(KERN_ERR "CPU#%d: Unexpected int18 (Machine Check).\n", - smp_processor_id()); -} - -/* Call the installed machine check handler for this CPU setup. */ -void (*machine_check_vector)(struct pt_regs *, long error_code) = - unexpected_machine_check; - int mce_disabled __read_mostly; #define MISC_MCELOG_MINOR 227 @@ -1322,6 +1311,17 @@ static void mce_init_timer(void) add_timer(t); } +/* Handle unconfigured int18 (should never happen) */ +static void unexpected_machine_check(struct pt_regs *regs, long error_code) +{ + printk(KERN_ERR "CPU#%d: Unexpected int18 (Machine Check).\n", + smp_processor_id()); +} + +/* Call the installed machine check handler for this CPU setup. */ +void (*machine_check_vector)(struct pt_regs *, long error_code) = + unexpected_machine_check; + /* * Called for each booted CPU to set up machine checks. * Must be called with preempt off: -- cgit v1.1 From cebe182033f156b430952370fb0f9dbe6e89b081 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Thu, 9 Jul 2009 00:31:43 +0200 Subject: x86: mce: Move per bank data in a single datastructure This addresses one of the leftover review comments. Move the per bank data into a single structure. This avoids several separate variables and also separate allocation of sysfs objects. I didn't move the CMCI ownership information so far because that would have needed some non trivial changes in the algorithms. Signed-off-by: Andi Kleen Signed-off-by: H. Peter Anvin --- arch/x86/kernel/cpu/mcheck/mce-internal.h | 14 ++++ arch/x86/kernel/cpu/mcheck/mce.c | 109 +++++++++++++++--------------- 2 files changed, 67 insertions(+), 56 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/mcheck/mce-internal.h b/arch/x86/kernel/cpu/mcheck/mce-internal.h index 54dcb8f..6bd51e7 100644 --- a/arch/x86/kernel/cpu/mcheck/mce-internal.h +++ b/arch/x86/kernel/cpu/mcheck/mce-internal.h @@ -1,3 +1,4 @@ +#include #include enum severity_level { @@ -10,6 +11,19 @@ enum severity_level { MCE_PANIC_SEVERITY, }; +#define ATTR_LEN 16 + +/* One object for each MCE bank, shared by all CPUs */ +struct mce_bank { + u64 ctl; /* subevents to enable */ + unsigned char init; /* initialise bank? */ + struct sysdev_attribute attr; /* sysdev attribute */ + char attrname[ATTR_LEN]; /* attribute name */ +}; + int mce_severity(struct mce *a, int tolerant, char **msg); extern int mce_ser; + +extern struct mce_bank *mce_banks; + diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index e16271f..a04806e 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -64,7 +64,6 @@ DEFINE_PER_CPU(unsigned, mce_exception_count); */ static int tolerant __read_mostly = 1; static int banks __read_mostly; -static u64 *bank __read_mostly; static int rip_msr __read_mostly; static int mce_bootlog __read_mostly = -1; static int monarch_timeout __read_mostly = -1; @@ -74,13 +73,13 @@ int mce_cmci_disabled __read_mostly; int mce_ignore_ce __read_mostly; int mce_ser __read_mostly; +struct mce_bank *mce_banks __read_mostly; + /* User mode helper program triggered by machine check event */ static unsigned long mce_need_notify; static char mce_helper[128]; static char *mce_helper_argv[2] = { mce_helper, NULL }; -static unsigned long dont_init_banks; - static DECLARE_WAIT_QUEUE_HEAD(mce_wait); static DEFINE_PER_CPU(struct mce, mces_seen); static int cpu_missing; @@ -91,11 +90,6 @@ DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = { [0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL }; -static inline int skip_bank_init(int i) -{ - return i < BITS_PER_LONG && test_bit(i, &dont_init_banks); -} - static DEFINE_PER_CPU(struct work_struct, mce_work); /* Do initial initialization of a struct mce */ @@ -482,7 +476,7 @@ void machine_check_poll(enum mcp_flags flags, mce_banks_t *b) m.mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS); for (i = 0; i < banks; i++) { - if (!bank[i] || !test_bit(i, *b)) + if (!mce_banks[i].ctl || !test_bit(i, *b)) continue; m.misc = 0; @@ -903,7 +897,7 @@ void do_machine_check(struct pt_regs *regs, long error_code) order = mce_start(&no_way_out); for (i = 0; i < banks; i++) { __clear_bit(i, toclear); - if (!bank[i]) + if (!mce_banks[i].ctl) continue; m.misc = 0; @@ -1146,6 +1140,21 @@ int mce_notify_irq(void) } EXPORT_SYMBOL_GPL(mce_notify_irq); +static int mce_banks_init(void) +{ + int i; + + mce_banks = kzalloc(banks * sizeof(struct mce_bank), GFP_KERNEL); + if (!mce_banks) + return -ENOMEM; + for (i = 0; i < banks; i++) { + struct mce_bank *b = &mce_banks[i]; + b->ctl = -1ULL; + b->init = 1; + } + return 0; +} + /* * Initialize Machine Checks for a CPU. */ @@ -1169,11 +1178,10 @@ static int mce_cap_init(void) /* Don't support asymmetric configurations today */ WARN_ON(banks != 0 && b != banks); banks = b; - if (!bank) { - bank = kmalloc(banks * sizeof(u64), GFP_KERNEL); - if (!bank) - return -ENOMEM; - memset(bank, 0xff, banks * sizeof(u64)); + if (!mce_banks) { + int err = mce_banks_init(); + if (err) + return err; } /* Use accurate RIP reporting if available. */ @@ -1205,9 +1213,10 @@ static void mce_init(void) wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff); for (i = 0; i < banks; i++) { - if (skip_bank_init(i)) + struct mce_bank *b = &mce_banks[i]; + if (!b->init) continue; - wrmsrl(MSR_IA32_MC0_CTL+4*i, bank[i]); + wrmsrl(MSR_IA32_MC0_CTL+4*i, b->ctl); wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0); } } @@ -1223,7 +1232,7 @@ static void mce_cpu_quirks(struct cpuinfo_x86 *c) * trips off incorrectly with the IOMMU & 3ware * & Cerberus: */ - clear_bit(10, (unsigned long *)&bank[4]); + clear_bit(10, (unsigned long *)&mce_banks[4].ctl); } if (c->x86 <= 17 && mce_bootlog < 0) { /* @@ -1237,7 +1246,7 @@ static void mce_cpu_quirks(struct cpuinfo_x86 *c) * by default. */ if (c->x86 == 6 && banks > 0) - bank[0] = 0; + mce_banks[0].ctl = 0; } if (c->x86_vendor == X86_VENDOR_INTEL) { @@ -1250,8 +1259,8 @@ static void mce_cpu_quirks(struct cpuinfo_x86 *c) * valid event later, merely don't write CTL0. */ - if (c->x86 == 6 && c->x86_model < 0x1A) - __set_bit(0, &dont_init_banks); + if (c->x86 == 6 && c->x86_model < 0x1A && banks > 0) + mce_banks[0].init = 0; /* * All newer Intel systems support MCE broadcasting. Enable @@ -1578,7 +1587,8 @@ static int mce_disable(void) int i; for (i = 0; i < banks; i++) { - if (!skip_bank_init(i)) + struct mce_bank *b = &mce_banks[i]; + if (b->init) wrmsrl(MSR_IA32_MC0_CTL + i*4, 0); } return 0; @@ -1654,14 +1664,15 @@ DEFINE_PER_CPU(struct sys_device, mce_dev); __cpuinitdata void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu); -static struct sysdev_attribute *bank_attrs; +static inline struct mce_bank *attr_to_bank(struct sysdev_attribute *attr) +{ + return container_of(attr, struct mce_bank, attr); +} static ssize_t show_bank(struct sys_device *s, struct sysdev_attribute *attr, char *buf) { - u64 b = bank[attr - bank_attrs]; - - return sprintf(buf, "%llx\n", b); + return sprintf(buf, "%llx\n", attr_to_bank(attr)->ctl); } static ssize_t set_bank(struct sys_device *s, struct sysdev_attribute *attr, @@ -1672,7 +1683,7 @@ static ssize_t set_bank(struct sys_device *s, struct sysdev_attribute *attr, if (strict_strtoull(buf, 0, &new) < 0) return -EINVAL; - bank[attr - bank_attrs] = new; + attr_to_bank(attr)->ctl = new; mce_restart(); return size; @@ -1816,7 +1827,7 @@ static __cpuinit int mce_create_device(unsigned int cpu) } for (j = 0; j < banks; j++) { err = sysdev_create_file(&per_cpu(mce_dev, cpu), - &bank_attrs[j]); + &mce_banks[j].attr); if (err) goto error2; } @@ -1825,10 +1836,10 @@ static __cpuinit int mce_create_device(unsigned int cpu) return 0; error2: while (--j >= 0) - sysdev_remove_file(&per_cpu(mce_dev, cpu), &bank_attrs[j]); + sysdev_remove_file(&per_cpu(mce_dev, cpu), &mce_banks[j].attr); error: while (--i >= 0) - sysdev_remove_file(&per_cpu(mce_dev, cpu), mce_attrs[i]); + sysdev_remove_file(&per_cpu(mce_dev, cpu), &mce_banks[i].attr); sysdev_unregister(&per_cpu(mce_dev, cpu)); @@ -1846,7 +1857,7 @@ static __cpuinit void mce_remove_device(unsigned int cpu) sysdev_remove_file(&per_cpu(mce_dev, cpu), mce_attrs[i]); for (i = 0; i < banks; i++) - sysdev_remove_file(&per_cpu(mce_dev, cpu), &bank_attrs[i]); + sysdev_remove_file(&per_cpu(mce_dev, cpu), &mce_banks[i].attr); sysdev_unregister(&per_cpu(mce_dev, cpu)); cpumask_clear_cpu(cpu, mce_dev_initialized); @@ -1863,7 +1874,8 @@ static void mce_disable_cpu(void *h) if (!(action & CPU_TASKS_FROZEN)) cmci_clear(); for (i = 0; i < banks; i++) { - if (!skip_bank_init(i)) + struct mce_bank *b = &mce_banks[i]; + if (b->init) wrmsrl(MSR_IA32_MC0_CTL + i*4, 0); } } @@ -1879,8 +1891,9 @@ static void mce_reenable_cpu(void *h) if (!(action & CPU_TASKS_FROZEN)) cmci_reenable(); for (i = 0; i < banks; i++) { - if (!skip_bank_init(i)) - wrmsrl(MSR_IA32_MC0_CTL + i*4, bank[i]); + struct mce_bank *b = &mce_banks[i]; + if (b->init) + wrmsrl(MSR_IA32_MC0_CTL + i*4, b->ctl); } } @@ -1928,35 +1941,21 @@ static struct notifier_block mce_cpu_notifier __cpuinitdata = { .notifier_call = mce_cpu_callback, }; -static __init int mce_init_banks(void) +static __init void mce_init_banks(void) { int i; - bank_attrs = kzalloc(sizeof(struct sysdev_attribute) * banks, - GFP_KERNEL); - if (!bank_attrs) - return -ENOMEM; - for (i = 0; i < banks; i++) { - struct sysdev_attribute *a = &bank_attrs[i]; + struct mce_bank *b = &mce_banks[i]; + struct sysdev_attribute *a = &b->attr; - a->attr.name = kasprintf(GFP_KERNEL, "bank%d", i); - if (!a->attr.name) - goto nomem; + a->attr.name = b->attrname; + snprintf(b->attrname, ATTR_LEN, "bank%d", i); a->attr.mode = 0644; a->show = show_bank; a->store = set_bank; } - return 0; - -nomem: - while (--i >= 0) - kfree(bank_attrs[i].attr.name); - kfree(bank_attrs); - bank_attrs = NULL; - - return -ENOMEM; } static __init int mce_init_device(void) @@ -1969,9 +1968,7 @@ static __init int mce_init_device(void) zalloc_cpumask_var(&mce_dev_initialized, GFP_KERNEL); - err = mce_init_banks(); - if (err) - return err; + mce_init_banks(); err = sysdev_class_register(&mce_sysclass); if (err) -- cgit v1.1 From a2d32bcbc008aa0f9c301a7c6f3494cb23e6af54 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Thu, 9 Jul 2009 00:31:44 +0200 Subject: x86: mce: macros to compute banks MSRs Instead of open coded calculations for bank MSRs hide the indexing of higher banks MCE register MSRs in new macros. No semantic changes. Signed-off-by: Andi Kleen Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/msr-index.h | 7 +++++++ arch/x86/kernel/cpu/mcheck/mce.c | 34 +++++++++++++++++----------------- arch/x86/kernel/cpu/mcheck/mce_intel.c | 10 +++++----- 3 files changed, 29 insertions(+), 22 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 1692fb5..3d1ce09 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -81,8 +81,15 @@ #define MSR_IA32_MC0_ADDR 0x00000402 #define MSR_IA32_MC0_MISC 0x00000403 +#define MSR_IA32_MCx_CTL(x) (MSR_IA32_MC0_CTL + 4*(x)) +#define MSR_IA32_MCx_STATUS(x) (MSR_IA32_MC0_STATUS + 4*(x)) +#define MSR_IA32_MCx_ADDR(x) (MSR_IA32_MC0_ADDR + 4*(x)) +#define MSR_IA32_MCx_MISC(x) (MSR_IA32_MC0_MISC + 4*(x)) + /* These are consecutive and not in the normal 4er MCE bank block */ #define MSR_IA32_MC0_CTL2 0x00000280 +#define MSR_IA32_MCx_CTL2(x) (MSR_IA32_MC0_CTL2 + (x)) + #define CMCI_EN (1ULL << 30) #define CMCI_THRESHOLD_MASK 0xffffULL diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index a04806e..07139a0 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -267,11 +267,11 @@ static int msr_to_offset(u32 msr) unsigned bank = __get_cpu_var(injectm.bank); if (msr == rip_msr) return offsetof(struct mce, ip); - if (msr == MSR_IA32_MC0_STATUS + bank*4) + if (msr == MSR_IA32_MCx_STATUS(bank)) return offsetof(struct mce, status); - if (msr == MSR_IA32_MC0_ADDR + bank*4) + if (msr == MSR_IA32_MCx_ADDR(bank)) return offsetof(struct mce, addr); - if (msr == MSR_IA32_MC0_MISC + bank*4) + if (msr == MSR_IA32_MCx_MISC(bank)) return offsetof(struct mce, misc); if (msr == MSR_IA32_MCG_STATUS) return offsetof(struct mce, mcgstatus); @@ -485,7 +485,7 @@ void machine_check_poll(enum mcp_flags flags, mce_banks_t *b) m.tsc = 0; barrier(); - m.status = mce_rdmsrl(MSR_IA32_MC0_STATUS + i*4); + m.status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i)); if (!(m.status & MCI_STATUS_VAL)) continue; @@ -500,9 +500,9 @@ void machine_check_poll(enum mcp_flags flags, mce_banks_t *b) continue; if (m.status & MCI_STATUS_MISCV) - m.misc = mce_rdmsrl(MSR_IA32_MC0_MISC + i*4); + m.misc = mce_rdmsrl(MSR_IA32_MCx_MISC(i)); if (m.status & MCI_STATUS_ADDRV) - m.addr = mce_rdmsrl(MSR_IA32_MC0_ADDR + i*4); + m.addr = mce_rdmsrl(MSR_IA32_MCx_ADDR(i)); if (!(flags & MCP_TIMESTAMP)) m.tsc = 0; @@ -518,7 +518,7 @@ void machine_check_poll(enum mcp_flags flags, mce_banks_t *b) /* * Clear state for this bank. */ - mce_wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0); + mce_wrmsrl(MSR_IA32_MCx_STATUS(i), 0); } /* @@ -539,7 +539,7 @@ static int mce_no_way_out(struct mce *m, char **msg) int i; for (i = 0; i < banks; i++) { - m->status = mce_rdmsrl(MSR_IA32_MC0_STATUS + i*4); + m->status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i)); if (mce_severity(m, tolerant, msg) >= MCE_PANIC_SEVERITY) return 1; } @@ -823,7 +823,7 @@ static void mce_clear_state(unsigned long *toclear) for (i = 0; i < banks; i++) { if (test_bit(i, toclear)) - mce_wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0); + mce_wrmsrl(MSR_IA32_MCx_STATUS(i), 0); } } @@ -904,7 +904,7 @@ void do_machine_check(struct pt_regs *regs, long error_code) m.addr = 0; m.bank = i; - m.status = mce_rdmsrl(MSR_IA32_MC0_STATUS + i*4); + m.status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i)); if ((m.status & MCI_STATUS_VAL) == 0) continue; @@ -945,9 +945,9 @@ void do_machine_check(struct pt_regs *regs, long error_code) kill_it = 1; if (m.status & MCI_STATUS_MISCV) - m.misc = mce_rdmsrl(MSR_IA32_MC0_MISC + i*4); + m.misc = mce_rdmsrl(MSR_IA32_MCx_MISC(i)); if (m.status & MCI_STATUS_ADDRV) - m.addr = mce_rdmsrl(MSR_IA32_MC0_ADDR + i*4); + m.addr = mce_rdmsrl(MSR_IA32_MCx_ADDR(i)); /* * Action optional error. Queue address for later processing. @@ -1216,8 +1216,8 @@ static void mce_init(void) struct mce_bank *b = &mce_banks[i]; if (!b->init) continue; - wrmsrl(MSR_IA32_MC0_CTL+4*i, b->ctl); - wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0); + wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl); + wrmsrl(MSR_IA32_MCx_STATUS(i), 0); } } @@ -1589,7 +1589,7 @@ static int mce_disable(void) for (i = 0; i < banks; i++) { struct mce_bank *b = &mce_banks[i]; if (b->init) - wrmsrl(MSR_IA32_MC0_CTL + i*4, 0); + wrmsrl(MSR_IA32_MCx_CTL(i), 0); } return 0; } @@ -1876,7 +1876,7 @@ static void mce_disable_cpu(void *h) for (i = 0; i < banks; i++) { struct mce_bank *b = &mce_banks[i]; if (b->init) - wrmsrl(MSR_IA32_MC0_CTL + i*4, 0); + wrmsrl(MSR_IA32_MCx_CTL(i), 0); } } @@ -1893,7 +1893,7 @@ static void mce_reenable_cpu(void *h) for (i = 0; i < banks; i++) { struct mce_bank *b = &mce_banks[i]; if (b->init) - wrmsrl(MSR_IA32_MC0_CTL + i*4, b->ctl); + wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl); } } diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel.c b/arch/x86/kernel/cpu/mcheck/mce_intel.c index e1acec0f..889f665 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_intel.c +++ b/arch/x86/kernel/cpu/mcheck/mce_intel.c @@ -90,7 +90,7 @@ static void cmci_discover(int banks, int boot) if (test_bit(i, owned)) continue; - rdmsrl(MSR_IA32_MC0_CTL2 + i, val); + rdmsrl(MSR_IA32_MCx_CTL2(i), val); /* Already owned by someone else? */ if (val & CMCI_EN) { @@ -101,8 +101,8 @@ static void cmci_discover(int banks, int boot) } val |= CMCI_EN | CMCI_THRESHOLD; - wrmsrl(MSR_IA32_MC0_CTL2 + i, val); - rdmsrl(MSR_IA32_MC0_CTL2 + i, val); + wrmsrl(MSR_IA32_MCx_CTL2(i), val); + rdmsrl(MSR_IA32_MCx_CTL2(i), val); /* Did the enable bit stick? -- the bank supports CMCI */ if (val & CMCI_EN) { @@ -152,9 +152,9 @@ void cmci_clear(void) if (!test_bit(i, __get_cpu_var(mce_banks_owned))) continue; /* Disable CMCI */ - rdmsrl(MSR_IA32_MC0_CTL2 + i, val); + rdmsrl(MSR_IA32_MCx_CTL2(i), val); val &= ~(CMCI_EN|CMCI_THRESHOLD_MASK); - wrmsrl(MSR_IA32_MC0_CTL2 + i, val); + wrmsrl(MSR_IA32_MCx_CTL2(i), val); __clear_bit(i, __get_cpu_var(mce_banks_owned)); } spin_unlock_irqrestore(&cmci_discover_lock, flags); -- cgit v1.1 From 3ccdccfadbd2548abe38682b587f4ba27eac2fc9 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Thu, 9 Jul 2009 00:31:45 +0200 Subject: x86: mce: Lower maximum number of banks to architecture limit The Intel x86 architecture right now only supports 32 machine check banks, more would bump into other MSRs. So lower the max define to 32. This only affects a few bitmaps, most data structures are dynamically sized anyways. Signed-off-by: Andi Kleen Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/mce.h | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index 6b8a974..ad75353 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h @@ -130,10 +130,11 @@ void mce_log(struct mce *m); DECLARE_PER_CPU(struct sys_device, mce_dev); /* - * To support more than 128 would need to escape the predefined - * Linux defined extended banks first. + * Maximum banks number. + * This is the limit of the current register layout on + * Intel CPUs. */ -#define MAX_NR_BANKS (MCE_EXTENDED_BANK - 1) +#define MAX_NR_BANKS 32 #ifdef CONFIG_X86_MCE_INTEL extern int mce_cmci_disabled; -- cgit v1.1 From 2802e34590f290173a3f2aa5a4d662ae5373b420 Mon Sep 17 00:00:00 2001 From: Tim Abbott Date: Thu, 9 Jul 2009 14:45:59 +0000 Subject: sh: Clean up linker script using new linker script macros. This patch converts the sh architecture to use the new linker script macros in include/asm-generic/vmlinux.lds.h. Signed-off-by: Tim Abbott Cc: Paul Mundt Cc: Sam Ravnborg Cc: linux-sh@vger.kernel.org Signed-off-by: Paul Mundt --- arch/sh/kernel/vmlinux.lds.S | 87 ++++++-------------------------------------- 1 file changed, 11 insertions(+), 76 deletions(-) (limited to 'arch') diff --git a/arch/sh/kernel/vmlinux.lds.S b/arch/sh/kernel/vmlinux.lds.S index f53c76a..674ed8f 100644 --- a/arch/sh/kernel/vmlinux.lds.S +++ b/arch/sh/kernel/vmlinux.lds.S @@ -50,12 +50,7 @@ SECTIONS _etext = .; /* End of text section */ } = 0x0009 - . = ALIGN(16); /* Exception table */ - __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { - __start___ex_table = .; - *(__ex_table) - __stop___ex_table = .; - } + EXCEPTION_TABLE(16) NOTES RO_DATA(PAGE_SIZE) @@ -71,69 +66,14 @@ SECTIONS __uncached_end = .; } - . = ALIGN(THREAD_SIZE); - .data : AT(ADDR(.data) - LOAD_OFFSET) { /* Data */ - *(.data.init_task) - - . = ALIGN(L1_CACHE_BYTES); - *(.data.cacheline_aligned) - - . = ALIGN(L1_CACHE_BYTES); - *(.data.read_mostly) - - . = ALIGN(PAGE_SIZE); - *(.data.page_aligned) - - __nosave_begin = .; - *(.data.nosave) - . = ALIGN(PAGE_SIZE); - __nosave_end = .; - - DATA_DATA - CONSTRUCTORS - } + RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) _edata = .; /* End of data section */ . = ALIGN(PAGE_SIZE); /* Init code and data */ - .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { - __init_begin = .; - _sinittext = .; - INIT_TEXT - _einittext = .; - } - - .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { INIT_DATA } - - . = ALIGN(16); - .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) { - __setup_start = .; - *(.init.setup) - __setup_end = .; - } - - .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) { - __initcall_start = .; - INITCALLS - __initcall_end = .; - } - - .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) { - __con_initcall_start = .; - *(.con_initcall.init) - __con_initcall_end = .; - } - - SECURITY_INIT - -#ifdef CONFIG_BLK_DEV_INITRD - . = ALIGN(PAGE_SIZE); - .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) { - __initramfs_start = .; - *(.init.ramfs) - __initramfs_end = .; - } -#endif + __init_begin = .; + INIT_TEXT_SECTION(PAGE_SIZE) + INIT_DATA_SECTION(16) . = ALIGN(4); .machvec.init : AT(ADDR(.machvec.init) - LOAD_OFFSET) { @@ -152,16 +92,11 @@ SECTIONS .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) { EXIT_DATA } . = ALIGN(PAGE_SIZE); - .bss : AT(ADDR(.bss) - LOAD_OFFSET) { - __init_end = .; - __bss_start = .; /* BSS */ - *(.bss.page_aligned) - *(.bss) - *(COMMON) - . = ALIGN(4); - _ebss = .; /* uClinux MTD sucks */ - _end = . ; - } + __init_end = .; + BSS(PAGE_SIZE) + . = ALIGN(4); + _ebss = .; /* uClinux MTD sucks */ + _end = . ; /* * When something in the kernel is NOT compiled as a module, the @@ -170,7 +105,7 @@ SECTIONS * it's a module. */ /DISCARD/ : { - *(.exitcall.exit) + EXIT_CALL } STABS_DEBUG -- cgit v1.1 From b99610fb9cdf390965c62c22322596d961591160 Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Sat, 11 Jul 2009 01:00:23 +0000 Subject: sh: Provide diagnostic kernel stack checks Enable kernel stack checking code in both the dynamic ftrace and mcount code paths. Check the stack to see if it's overflowing and make sure that the stack pointer contains an address that's either in init_stack or after the bss. Signed-off-by: Matt Fleming Signed-off-by: Paul Mundt --- arch/sh/Kconfig.debug | 11 ++++++ arch/sh/kernel/asm-offsets.c | 1 + arch/sh/lib/mcount.S | 85 ++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 97 insertions(+) (limited to 'arch') diff --git a/arch/sh/Kconfig.debug b/arch/sh/Kconfig.debug index 39224b5..52a132c 100644 --- a/arch/sh/Kconfig.debug +++ b/arch/sh/Kconfig.debug @@ -123,4 +123,15 @@ config SH64_SR_WATCH bool "Debug: set SR.WATCH to enable hardware watchpoints and trace" depends on SUPERH64 +config STACK_DEBUG + bool "Enable diagnostic checks of the kernel stack" + depends on FUNCTION_TRACER + select DEBUG_STACKOVERFLOW + default n + help + This option allows checks to be performed on the kernel stack + at runtime. Saying Y here will add overhead to every function + call and will therefore incur a major performance hit. Most + users should say N. + endmenu diff --git a/arch/sh/kernel/asm-offsets.c b/arch/sh/kernel/asm-offsets.c index 99aceb28..d218e80 100644 --- a/arch/sh/kernel/asm-offsets.c +++ b/arch/sh/kernel/asm-offsets.c @@ -26,6 +26,7 @@ int main(void) DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count)); DEFINE(TI_RESTART_BLOCK,offsetof(struct thread_info, restart_block)); + DEFINE(TI_SIZE, sizeof(struct thread_info)); #ifdef CONFIG_HIBERNATION DEFINE(PBE_ADDRESS, offsetof(struct pbe, address)); diff --git a/arch/sh/lib/mcount.S b/arch/sh/lib/mcount.S index 71e87f9..8596483 100644 --- a/arch/sh/lib/mcount.S +++ b/arch/sh/lib/mcount.S @@ -9,6 +9,8 @@ * for more details. */ #include +#include +#include #define MCOUNT_ENTER() \ mov.l r4, @-r15; \ @@ -28,6 +30,55 @@ rts; \ mov.l @r15+, r4 +#ifdef CONFIG_STACK_DEBUG +/* + * Perform diagnostic checks on the state of the kernel stack. + * + * Check for stack overflow. If there is less than 1KB free + * then it has overflowed. + * + * Make sure the stack pointer contains a valid address. Valid + * addresses for kernel stacks are anywhere after the bss + * (after _ebss) and anywhere in init_thread_union (init_stack). + */ +#define STACK_CHECK() \ + mov #(THREAD_SIZE >> 10), r0; \ + shll8 r0; \ + shll2 r0; \ + \ + /* r1 = sp & (THREAD_SIZE - 1) */ \ + mov #-1, r1; \ + add r0, r1; \ + and r15, r1; \ + \ + mov #TI_SIZE, r3; \ + mov #(STACK_WARN >> 8), r2; \ + shll8 r2; \ + add r3, r2; \ + \ + /* Is the stack overflowing? */ \ + cmp/hi r2, r1; \ + bf stack_panic; \ + \ + /* If sp > _ebss then we're OK. */ \ + mov.l .L_ebss, r1; \ + cmp/hi r1, r15; \ + bt 1f; \ + \ + /* If sp < init_stack, we're not OK. */ \ + mov.l .L_init_thread_union, r1; \ + cmp/hs r1, r15; \ + bf stack_panic; \ + \ + /* If sp > init_stack && sp < _ebss, not OK. */ \ + add r0, r1; \ + cmp/hs r1, r15; \ + bt stack_panic; \ +1: +#else +#define STACK_CHECK() +#endif /* CONFIG_STACK_DEBUG */ + .align 2 .globl _mcount .type _mcount,@function @@ -41,6 +92,8 @@ mcount: tst r0, r0 bf ftrace_stub #endif + STACK_CHECK() + MCOUNT_ENTER() #ifdef CONFIG_DYNAMIC_FTRACE @@ -73,6 +126,8 @@ ftrace_caller: tst r0, r0 bf ftrace_stub + STACK_CHECK() + MCOUNT_ENTER() .globl ftrace_call @@ -100,6 +155,36 @@ ftrace_stub: rts nop +#ifdef CONFIG_STACK_DEBUG + .globl stack_panic +stack_panic: + mov.l .Ldump_stack, r0 + jsr @r0 + nop + + mov.l .Lpanic, r0 + jsr @r0 + mov.l .Lpanic_s, r4 + + rts + nop + .align 2 .Lfunction_trace_stop: .long function_trace_stop +.L_ebss: + .long _ebss +.L_init_thread_union: + .long init_thread_union +.Lpanic: + .long panic +.Lpanic_s: + .long .Lpanic_str +.Ldump_stack: + .long dump_stack + + .section .rodata + .align 2 +.Lpanic_str: + .string "Stack error" +#endif /* CONFIG_STACK_DEBUG */ -- cgit v1.1 From 327933f5d6cdf083284d3c06e0370d1de464aef4 Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Sat, 11 Jul 2009 00:29:03 +0000 Subject: sh: Function graph tracer support Add both dynamic and static function graph tracer support for sh. Signed-off-by: Matt Fleming Signed-off-by: Paul Mundt --- arch/sh/Kconfig | 1 + arch/sh/include/asm/ftrace.h | 3 + arch/sh/kernel/Makefile_32 | 1 + arch/sh/kernel/ftrace.c | 122 ++++++++++++++++++++++++++++++++++++++++ arch/sh/kernel/vmlinux_64.lds.S | 0 arch/sh/lib/Makefile | 1 + arch/sh/lib/mcount.S | 117 +++++++++++++++++++++++++++++++++++++- 7 files changed, 244 insertions(+), 1 deletion(-) create mode 100644 arch/sh/kernel/vmlinux_64.lds.S (limited to 'arch') diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 29e41ec..6d110a4 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -33,6 +33,7 @@ config SUPERH32 select HAVE_DYNAMIC_FTRACE select HAVE_FUNCTION_TRACE_MCOUNT_TEST select HAVE_FTRACE_SYSCALLS + select HAVE_FUNCTION_GRAPH_TRACER select HAVE_ARCH_KGDB select ARCH_HIBERNATION_POSSIBLE if MMU diff --git a/arch/sh/include/asm/ftrace.h b/arch/sh/include/asm/ftrace.h index b09311a..7e0bcc4 100644 --- a/arch/sh/include/asm/ftrace.h +++ b/arch/sh/include/asm/ftrace.h @@ -13,8 +13,11 @@ extern void mcount(void); #ifdef CONFIG_DYNAMIC_FTRACE #define CALL_ADDR ((long)(ftrace_call)) #define STUB_ADDR ((long)(ftrace_stub)) +#define GRAPH_ADDR ((long)(ftrace_graph_call)) +#define CALLER_ADDR ((long)(ftrace_caller)) #define MCOUNT_INSN_OFFSET ((STUB_ADDR - CALL_ADDR) - 4) +#define GRAPH_INSN_OFFSET ((CALLER_ADDR - GRAPH_ADDR) - 4) struct dyn_arch_ftrace { /* No extra data needed on sh */ diff --git a/arch/sh/kernel/Makefile_32 b/arch/sh/kernel/Makefile_32 index fee924a..94ed99b 100644 --- a/arch/sh/kernel/Makefile_32 +++ b/arch/sh/kernel/Makefile_32 @@ -30,6 +30,7 @@ obj-$(CONFIG_KPROBES) += kprobes.o obj-$(CONFIG_GENERIC_GPIO) += gpio.o obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o +obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o obj-$(CONFIG_DUMP_CODE) += disassemble.o obj-$(CONFIG_HIBERNATION) += swsusp.o diff --git a/arch/sh/kernel/ftrace.c b/arch/sh/kernel/ftrace.c index 4f62ece..6647dfc 100644 --- a/arch/sh/kernel/ftrace.c +++ b/arch/sh/kernel/ftrace.c @@ -16,11 +16,13 @@ #include #include #include +#include #include #include #include #include +#ifdef CONFIG_DYNAMIC_FTRACE static unsigned char ftrace_replaced_code[MCOUNT_INSN_SIZE]; static unsigned char ftrace_nop[4]; @@ -133,6 +135,126 @@ int __init ftrace_dyn_arch_init(void *data) return 0; } +#endif /* CONFIG_DYNAMIC_FTRACE */ + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +#ifdef CONFIG_DYNAMIC_FTRACE +extern void ftrace_graph_call(void); + +static int ftrace_mod(unsigned long ip, unsigned long old_addr, + unsigned long new_addr) +{ + unsigned char code[MCOUNT_INSN_SIZE]; + + if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE)) + return -EFAULT; + + if (old_addr != __raw_readl((unsigned long *)code)) + return -EINVAL; + + __raw_writel(new_addr, ip); + return 0; +} + +int ftrace_enable_ftrace_graph_caller(void) +{ + unsigned long ip, old_addr, new_addr; + + ip = (unsigned long)(&ftrace_graph_call) + GRAPH_INSN_OFFSET; + old_addr = (unsigned long)(&skip_trace); + new_addr = (unsigned long)(&ftrace_graph_caller); + + return ftrace_mod(ip, old_addr, new_addr); +} + +int ftrace_disable_ftrace_graph_caller(void) +{ + unsigned long ip, old_addr, new_addr; + + ip = (unsigned long)(&ftrace_graph_call) + GRAPH_INSN_OFFSET; + old_addr = (unsigned long)(&ftrace_graph_caller); + new_addr = (unsigned long)(&skip_trace); + + return ftrace_mod(ip, old_addr, new_addr); +} +#endif /* CONFIG_DYNAMIC_FTRACE */ + +/* + * Hook the return address and push it in the stack of return addrs + * in the current thread info. + * + * This is the main routine for the function graph tracer. The function + * graph tracer essentially works like this: + * + * parent is the stack address containing self_addr's return address. + * We pull the real return address out of parent and store it in + * current's ret_stack. Then, we replace the return address on the stack + * with the address of return_to_handler. self_addr is the function that + * called mcount. + * + * When self_addr returns, it will jump to return_to_handler which calls + * ftrace_return_to_handler. ftrace_return_to_handler will pull the real + * return address off of current's ret_stack and jump to it. + */ +void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) +{ + unsigned long old; + int faulted, err; + struct ftrace_graph_ent trace; + unsigned long return_hooker = (unsigned long)&return_to_handler; + + if (unlikely(atomic_read(¤t->tracing_graph_pause))) + return; + + /* + * Protect against fault, even if it shouldn't + * happen. This tool is too much intrusive to + * ignore such a protection. + */ + __asm__ __volatile__( + "1: \n\t" + "mov.l @%2, %0 \n\t" + "2: \n\t" + "mov.l %3, @%2 \n\t" + "mov #0, %1 \n\t" + "3: \n\t" + ".section .fixup, \"ax\" \n\t" + "4: \n\t" + "mov.l 5f, %0 \n\t" + "jmp @%0 \n\t" + " mov #1, %1 \n\t" + ".balign 4 \n\t" + "5: .long 3b \n\t" + ".previous \n\t" + ".section __ex_table,\"a\" \n\t" + ".long 1b, 4b \n\t" + ".long 2b, 4b \n\t" + ".previous \n\t" + : "=&r" (old), "=r" (faulted) + : "r" (parent), "r" (return_hooker) + ); + + if (unlikely(faulted)) { + ftrace_graph_stop(); + WARN_ON(1); + return; + } + + err = ftrace_push_return_trace(old, self_addr, &trace.depth, 0); + if (err == -EBUSY) { + __raw_writel(old, parent); + return; + } + + trace.func = self_addr; + + /* Only trace if the calling function expects to */ + if (!ftrace_graph_entry(&trace)) { + current->curr_ret_stack--; + __raw_writel(old, parent); + } +} +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ #ifdef CONFIG_FTRACE_SYSCALLS diff --git a/arch/sh/kernel/vmlinux_64.lds.S b/arch/sh/kernel/vmlinux_64.lds.S new file mode 100644 index 0000000..e69de29 diff --git a/arch/sh/lib/Makefile b/arch/sh/lib/Makefile index aaea580..19328d9 100644 --- a/arch/sh/lib/Makefile +++ b/arch/sh/lib/Makefile @@ -25,6 +25,7 @@ memcpy-$(CONFIG_CPU_SH4) := memcpy-sh4.o lib-$(CONFIG_MMU) += copy_page.o clear_page.o lib-$(CONFIG_FUNCTION_TRACER) += mcount.o +lib-$(CONFIG_FUNCTION_GRAPH_TRACER) += mcount.o lib-y += $(memcpy-y) $(udivsi3-y) EXTRA_CFLAGS += -Werror diff --git a/arch/sh/lib/mcount.S b/arch/sh/lib/mcount.S index 8596483..bd3ec64 100644 --- a/arch/sh/lib/mcount.S +++ b/arch/sh/lib/mcount.S @@ -111,14 +111,62 @@ mcount_call: jsr @r6 nop +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + mov.l .Lftrace_graph_return, r6 + mov.l .Lftrace_stub, r7 + cmp/eq r6, r7 + bt 1f + + mov.l .Lftrace_graph_caller, r0 + jmp @r0 + nop + +1: + mov.l .Lftrace_graph_entry, r6 + mov.l .Lftrace_graph_entry_stub, r7 + cmp/eq r6, r7 + bt skip_trace + + mov.l .Lftrace_graph_caller, r0 + jmp @r0 + nop + + .align 2 +.Lftrace_graph_return: + .long ftrace_graph_return +.Lftrace_graph_entry: + .long ftrace_graph_entry +.Lftrace_graph_entry_stub: + .long ftrace_graph_entry_stub +.Lftrace_graph_caller: + .long ftrace_graph_caller +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ + + .globl skip_trace skip_trace: MCOUNT_LEAVE() .align 2 .Lftrace_trace_function: - .long ftrace_trace_function + .long ftrace_trace_function #ifdef CONFIG_DYNAMIC_FTRACE +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +/* + * NOTE: Do not move either ftrace_graph_call or ftrace_caller + * as this will affect the calculation of GRAPH_INSN_OFFSET. + */ + .globl ftrace_graph_call +ftrace_graph_call: + mov.l .Lskip_trace, r0 + jmp @r0 + nop + + .align 2 +.Lskip_trace: + .long skip_trace +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ + .globl ftrace_caller ftrace_caller: mov.l .Lfunction_trace_stop, r0 @@ -136,7 +184,12 @@ ftrace_call: jsr @r6 nop +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + bra ftrace_graph_call + nop +#else MCOUNT_LEAVE() +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ #endif /* CONFIG_DYNAMIC_FTRACE */ /* @@ -188,3 +241,65 @@ stack_panic: .Lpanic_str: .string "Stack error" #endif /* CONFIG_STACK_DEBUG */ + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + .globl ftrace_graph_caller +ftrace_graph_caller: + mov.l 2f, r0 + mov.l @r0, r0 + tst r0, r0 + bt 1f + + mov.l 3f, r1 + jmp @r1 + nop +1: + /* + * MCOUNT_ENTER() pushed 5 registers onto the stack, so + * the stack address containing our return address is + * r15 + 20. + */ + mov #20, r0 + add r15, r0 + mov r0, r4 + + mov.l .Lprepare_ftrace_return, r0 + jsr @r0 + nop + + MCOUNT_LEAVE() + + .align 2 +2: .long function_trace_stop +3: .long skip_trace +.Lprepare_ftrace_return: + .long prepare_ftrace_return + + .globl return_to_handler +return_to_handler: + /* + * Save the return values. + */ + mov.l r0, @-r15 + mov.l r1, @-r15 + + mov #0, r4 + + mov.l .Lftrace_return_to_handler, r0 + jsr @r0 + nop + + /* + * The return value from ftrace_return_handler has the real + * address that we should return to. + */ + lds r0, pr + mov.l @r15+, r1 + rts + mov.l @r15+, r0 + + + .align 2 +.Lftrace_return_to_handler: + .long ftrace_return_to_handler +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ -- cgit v1.1 From 7816fecd03e480ed0b47d674ed772ca0b45e1b5e Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Sat, 11 Jul 2009 00:29:04 +0000 Subject: sh: Mark __switch_to() as __notrace_funcgraph Annotate __switch_to() so that the function graph tracer does not try to trace it. Use __notrace_funcgraph, as opposed to notrace, so that other tracers can continue to trace __switch_to(). The reason that we don't want to trace __switch_to() with the function graph tracer is because of how the return address stack in task_struct is implemented. When we enter __switch_to we store the real return address on prev's ret_stack. When we return from __switch_to() we've patched the return address on the kernel stack to be return_to_handler. Calling return_to_handler we do, -> ftrace_return_to_handler() -> ftrace_pop_return_ftrace() Which tries to pop the real return address from current->ret_stack. The problem being that we stored the return address on prev->ret_stack, but current now points to next, and next->ret_stack doesn't contain the correct return address (and is possibly even empty). Signed-off-by: Matt Fleming Signed-off-by: Paul Mundt --- arch/sh/kernel/process_32.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c index 92d7740..9fee977 100644 --- a/arch/sh/kernel/process_32.c +++ b/arch/sh/kernel/process_32.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -264,8 +265,8 @@ static void ubc_set_tracing(int asid, unsigned long pc) * switch_to(x,y) should switch tasks from x to y. * */ -struct task_struct *__switch_to(struct task_struct *prev, - struct task_struct *next) +__notrace_funcgraph struct task_struct * +__switch_to(struct task_struct *prev, struct task_struct *next) { #if defined(CONFIG_SH_FPU) unlazy_fpu(prev, task_pt_regs(prev)); -- cgit v1.1 From 473d1cf4ee623b043790838bcf77e77958840bf2 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Sat, 11 Jul 2009 19:56:58 +0900 Subject: sh: Decouple mcount from ftrace. This adds a general CONFIG_MCOUNT in order to permit mcount generation without ftrace support. This is primarily for allowing platforms to enable aggressive stack overflow checking without having to enable ftrace support. Based on the sparc64 implementation. Signed-off-by: Paul Mundt --- arch/sh/Kconfig.debug | 5 +++++ arch/sh/Makefile | 4 ++++ arch/sh/boot/compressed/Makefile | 2 +- arch/sh/kernel/sh_ksyms_32.c | 2 +- arch/sh/lib/Makefile | 3 +-- 5 files changed, 12 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/sh/Kconfig.debug b/arch/sh/Kconfig.debug index 52a132c..75b5f4e 100644 --- a/arch/sh/Kconfig.debug +++ b/arch/sh/Kconfig.debug @@ -134,4 +134,9 @@ config STACK_DEBUG call and will therefore incur a major performance hit. Most users should say N. +config MCOUNT + def_bool y + depends on SUPERH32 + depends on STACK_DEBUG || FUNCTION_TRACER + endmenu diff --git a/arch/sh/Makefile b/arch/sh/Makefile index 75d049b0..52c34bf 100644 --- a/arch/sh/Makefile +++ b/arch/sh/Makefile @@ -186,6 +186,10 @@ KBUILD_CFLAGS += -pipe $(cflags-y) KBUILD_CPPFLAGS += $(cflags-y) KBUILD_AFLAGS += $(cflags-y) +ifeq ($(CONFIG_MCOUNT),y) + KBUILD_CFLAGS += -pg +endif + libs-$(CONFIG_SUPERH32) := arch/sh/lib/ $(libs-y) libs-$(CONFIG_SUPERH64) := arch/sh/lib64/ $(libs-y) diff --git a/arch/sh/boot/compressed/Makefile b/arch/sh/boot/compressed/Makefile index 9531bf1..3af239c 100644 --- a/arch/sh/boot/compressed/Makefile +++ b/arch/sh/boot/compressed/Makefile @@ -23,7 +23,7 @@ IMAGE_OFFSET := $(shell /bin/bash -c 'printf "0x%08x" \ LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name) -ifeq ($(CONFIG_FUNCTION_TRACER),y) +ifeq ($(CONFIG_MCOUNT),y) ORIG_CFLAGS := $(KBUILD_CFLAGS) KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS)) endif diff --git a/arch/sh/kernel/sh_ksyms_32.c b/arch/sh/kernel/sh_ksyms_32.c index fcc5de3..5b81116 100644 --- a/arch/sh/kernel/sh_ksyms_32.c +++ b/arch/sh/kernel/sh_ksyms_32.c @@ -106,7 +106,7 @@ EXPORT_SYMBOL(flush_dcache_page); EXPORT_SYMBOL(clear_user_page); #endif -#ifdef CONFIG_FUNCTION_TRACER +#ifdef CONFIG_MCOUNT EXPORT_SYMBOL(mcount); #endif EXPORT_SYMBOL(csum_partial); diff --git a/arch/sh/lib/Makefile b/arch/sh/lib/Makefile index 19328d9..c2b28d8 100644 --- a/arch/sh/lib/Makefile +++ b/arch/sh/lib/Makefile @@ -24,8 +24,7 @@ memcpy-y := memcpy.o memcpy-$(CONFIG_CPU_SH4) := memcpy-sh4.o lib-$(CONFIG_MMU) += copy_page.o clear_page.o -lib-$(CONFIG_FUNCTION_TRACER) += mcount.o -lib-$(CONFIG_FUNCTION_GRAPH_TRACER) += mcount.o +lib-$(CONFIG_MCOUNT) += mcount.o lib-y += $(memcpy-y) $(udivsi3-y) EXTRA_CFLAGS += -Werror -- cgit v1.1 From 9f14b84afda297d301b81a5bcbd65e83d7b02034 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Sat, 11 Jul 2009 20:05:34 +0900 Subject: sh: Replace DEBUG_STACKOVERFLOW with STACK_DEBUG. STACK_DEBUG ties in to mcount in order to do function-granular stack overflow checks as opposed to lazily checking from IRQ context. As the default is nohz, the frequency of overflow checking is too irregular to catch much useful information, and so the mcount approach employed by sparc64 is adopted instead. This kills off the old check entirely from the do_IRQ() path and now adopts CONFIG_MCOUNT instead. Signed-off-by: Paul Mundt --- arch/sh/Kconfig.debug | 17 ++++------------- arch/sh/kernel/irq.c | 17 ----------------- 2 files changed, 4 insertions(+), 30 deletions(-) (limited to 'arch') diff --git a/arch/sh/Kconfig.debug b/arch/sh/Kconfig.debug index 75b5f4e..b440fd9 100644 --- a/arch/sh/Kconfig.debug +++ b/arch/sh/Kconfig.debug @@ -61,12 +61,14 @@ config EARLY_PRINTK select both the EARLY_SCIF_CONSOLE and SH_STANDARD_BIOS, using the kernel command line option to toggle back and forth. -config DEBUG_STACKOVERFLOW +config STACK_DEBUG bool "Check for stack overflows" depends on DEBUG_KERNEL && SUPERH32 help This option will cause messages to be printed if free stack space - drops below a certain limit. + drops below a certain limit. Saying Y here will add overhead to + every function call and will therefore incur a major + performance hit. Most users should say N. config DEBUG_STACK_USAGE bool "Stack utilization instrumentation" @@ -123,17 +125,6 @@ config SH64_SR_WATCH bool "Debug: set SR.WATCH to enable hardware watchpoints and trace" depends on SUPERH64 -config STACK_DEBUG - bool "Enable diagnostic checks of the kernel stack" - depends on FUNCTION_TRACER - select DEBUG_STACKOVERFLOW - default n - help - This option allows checks to be performed on the kernel stack - at runtime. Saying Y here will add overhead to every function - call and will therefore incur a major performance hit. Most - users should say N. - config MCOUNT def_bool y depends on SUPERH32 diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c index 3d09062..278c68c 100644 --- a/arch/sh/kernel/irq.c +++ b/arch/sh/kernel/irq.c @@ -114,23 +114,6 @@ asmlinkage int do_IRQ(unsigned int irq, struct pt_regs *regs) #endif irq_enter(); - -#ifdef CONFIG_DEBUG_STACKOVERFLOW - /* Debugging check for stack overflow: is there less than 1KB free? */ - { - long sp; - - __asm__ __volatile__ ("and r15, %0" : - "=r" (sp) : "0" (THREAD_SIZE - 1)); - - if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) { - printk("do_IRQ: stack overflow: %ld\n", - sp - sizeof(struct thread_info)); - dump_stack(); - } - } -#endif - irq = irq_demux(intc_evt2irq(irq)); #ifdef CONFIG_IRQSTACKS -- cgit v1.1 From fe27932052aebf77ac5f3e73962825d2aeb457a0 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Sat, 11 Jul 2009 20:32:14 +0900 Subject: sh: Use DECLARE_EXPORT() for mcount symbol export. The function prototype for mcount is not defined if we are not building with ftrace support enabled, so use DECLARE_EXPORT() to stub one in. Signed-off-by: Paul Mundt --- arch/sh/kernel/sh_ksyms_32.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/sh/kernel/sh_ksyms_32.c b/arch/sh/kernel/sh_ksyms_32.c index 5b81116..cec6108 100644 --- a/arch/sh/kernel/sh_ksyms_32.c +++ b/arch/sh/kernel/sh_ksyms_32.c @@ -107,7 +107,7 @@ EXPORT_SYMBOL(clear_user_page); #endif #ifdef CONFIG_MCOUNT -EXPORT_SYMBOL(mcount); +DECLARE_EXPORT(mcount); #endif EXPORT_SYMBOL(csum_partial); EXPORT_SYMBOL(csum_partial_copy_generic); -- cgit v1.1 From a470b95e99ea77ef1e307ff181e59a4a16caa4f4 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Sat, 11 Jul 2009 20:33:34 +0900 Subject: sh: Fix up ftrace build error when STACK_DEBUG=n. Presently the closest reference to function_trace_stop is within a CONFIG_STACK_DEBUG block. When this is turned off, the build bails out with a pcrel too far error. Reorder things a bit to handle the various combinations. Signed-off-by: Paul Mundt --- arch/sh/lib/mcount.S | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/sh/lib/mcount.S b/arch/sh/lib/mcount.S index bd3ec64..9e397aa 100644 --- a/arch/sh/lib/mcount.S +++ b/arch/sh/lib/mcount.S @@ -192,6 +192,10 @@ ftrace_call: #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ #endif /* CONFIG_DYNAMIC_FTRACE */ + .align 2 +.Lfunction_trace_stop: + .long function_trace_stop + /* * NOTE: From here on the locations of the .Lftrace_stub label and * ftrace_stub itself are fixed. Adding additional data here will skew @@ -199,7 +203,6 @@ ftrace_call: * Place new labels either after the ftrace_stub body, or before * ftrace_caller. You have been warned. */ - .align 2 .Lftrace_stub: .long ftrace_stub @@ -223,8 +226,6 @@ stack_panic: nop .align 2 -.Lfunction_trace_stop: - .long function_trace_stop .L_ebss: .long _ebss .L_init_thread_union: -- cgit v1.1 From e460ab27b6c3ea313762169713086529d5bfb8bc Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Sat, 11 Jul 2009 21:06:53 +0900 Subject: sh: Fix up stack overflow check with ftrace disabled. Presently the STACK_CHECK() code is called in to multiple times, although it's only necessary from the mcount entry. The code still attempts to treat the nop case as an ftrace path resulting in superfluous code flow for the case where ftrace is disabled. And finally, this also fixes up references to a few undefined symbols when FUNCTION_TRACER=n. Signed-off-by: Paul Mundt --- arch/sh/lib/mcount.S | 76 +++++++++++++++++++++++++++------------------------- 1 file changed, 40 insertions(+), 36 deletions(-) (limited to 'arch') diff --git a/arch/sh/lib/mcount.S b/arch/sh/lib/mcount.S index 9e397aa..84a5776 100644 --- a/arch/sh/lib/mcount.S +++ b/arch/sh/lib/mcount.S @@ -1,7 +1,7 @@ /* * arch/sh/lib/mcount.S * - * Copyright (C) 2008 Paul Mundt + * Copyright (C) 2008, 2009 Paul Mundt * Copyright (C) 2008, 2009 Matt Fleming * * This file is subject to the terms and conditions of the GNU General Public @@ -86,13 +86,18 @@ .type mcount,@function _mcount: mcount: + STACK_CHECK() + +#ifndef CONFIG_FUNCTION_TRACER + rts + nop +#else #ifndef CONFIG_DYNAMIC_FTRACE mov.l .Lfunction_trace_stop, r0 mov.l @r0, r0 tst r0, r0 bf ftrace_stub #endif - STACK_CHECK() MCOUNT_ENTER() @@ -174,8 +179,6 @@ ftrace_caller: tst r0, r0 bf ftrace_stub - STACK_CHECK() - MCOUNT_ENTER() .globl ftrace_call @@ -211,38 +214,6 @@ ftrace_stub: rts nop -#ifdef CONFIG_STACK_DEBUG - .globl stack_panic -stack_panic: - mov.l .Ldump_stack, r0 - jsr @r0 - nop - - mov.l .Lpanic, r0 - jsr @r0 - mov.l .Lpanic_s, r4 - - rts - nop - - .align 2 -.L_ebss: - .long _ebss -.L_init_thread_union: - .long init_thread_union -.Lpanic: - .long panic -.Lpanic_s: - .long .Lpanic_str -.Ldump_stack: - .long dump_stack - - .section .rodata - .align 2 -.Lpanic_str: - .string "Stack error" -#endif /* CONFIG_STACK_DEBUG */ - #ifdef CONFIG_FUNCTION_GRAPH_TRACER .globl ftrace_graph_caller ftrace_graph_caller: @@ -304,3 +275,36 @@ return_to_handler: .Lftrace_return_to_handler: .long ftrace_return_to_handler #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ +#endif /* CONFIG_FUNCTION_TRACER */ + +#ifdef CONFIG_STACK_DEBUG + .globl stack_panic +stack_panic: + mov.l .Ldump_stack, r0 + jsr @r0 + nop + + mov.l .Lpanic, r0 + jsr @r0 + mov.l .Lpanic_s, r4 + + rts + nop + + .align 2 +.L_ebss: + .long _ebss +.L_init_thread_union: + .long init_thread_union +.Lpanic: + .long panic +.Lpanic_s: + .long .Lpanic_str +.Ldump_stack: + .long dump_stack + + .section .rodata + .align 2 +.Lpanic_str: + .string "Stack error" +#endif /* CONFIG_STACK_DEBUG */ -- cgit v1.1 From df8ce2595fbac8b046322fce9df61ce1cf8ddf62 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Sun, 12 Jul 2009 01:37:30 +0900 Subject: sh: Tidy up gzip-based zImage decompression. This brings the zImage handling in to the current century, in preparation for handling the other compression types. Signed-off-by: Paul Mundt --- arch/sh/Kconfig | 1 + arch/sh/boot/compressed/misc_32.c | 106 ++++--------------------------- arch/sh/boot/compressed/misc_64.c | 127 +++----------------------------------- 3 files changed, 19 insertions(+), 215 deletions(-) (limited to 'arch') diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 120bd31..9f531ca 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -17,6 +17,7 @@ config SUPERH select HAVE_ARCH_TRACEHOOK select HAVE_DMA_API_DEBUG select HAVE_PERF_COUNTERS + select HAVE_KERNEL_GZIP select RTC_LIB select GENERIC_ATOMIC64 help diff --git a/arch/sh/boot/compressed/misc_32.c b/arch/sh/boot/compressed/misc_32.c index efdba6b..1ab4f49 100644 --- a/arch/sh/boot/compressed/misc_32.c +++ b/arch/sh/boot/compressed/misc_32.c @@ -14,73 +14,23 @@ #include #include #include -#ifdef CONFIG_SH_STANDARD_BIOS #include -#endif /* * gzip declarations */ -#define OF(args) args #define STATIC static #undef memset #undef memcpy #define memzero(s, n) memset ((s), 0, (n)) -typedef unsigned char uch; -typedef unsigned short ush; -typedef unsigned long ulg; - -#define WSIZE 0x8000 /* Window size must be at least 32k, */ - /* and a power of two */ - -static uch *inbuf; /* input buffer */ -static uch window[WSIZE]; /* Sliding window buffer */ - -static unsigned insize = 0; /* valid bytes in inbuf */ -static unsigned inptr = 0; /* index of next byte to be processed in inbuf */ -static unsigned outcnt = 0; /* bytes in output buffer */ - -/* gzip flag byte */ -#define ASCII_FLAG 0x01 /* bit 0 set: file probably ASCII text */ -#define CONTINUATION 0x02 /* bit 1 set: continuation of multi-part gzip file */ -#define EXTRA_FIELD 0x04 /* bit 2 set: extra field present */ -#define ORIG_NAME 0x08 /* bit 3 set: original file name present */ -#define COMMENT 0x10 /* bit 4 set: file comment present */ -#define ENCRYPTED 0x20 /* bit 5 set: file is encrypted */ -#define RESERVED 0xC0 /* bit 6,7: reserved */ - -#define get_byte() (inptr < insize ? inbuf[inptr++] : fill_inbuf()) - -/* Diagnostic functions */ -#ifdef DEBUG -# define Assert(cond,msg) {if(!(cond)) error(msg);} -# define Trace(x) fprintf x -# define Tracev(x) {if (verbose) fprintf x ;} -# define Tracevv(x) {if (verbose>1) fprintf x ;} -# define Tracec(c,x) {if (verbose && (c)) fprintf x ;} -# define Tracecv(c,x) {if (verbose>1 && (c)) fprintf x ;} -#else -# define Assert(cond,msg) -# define Trace(x) -# define Tracev(x) -# define Tracevv(x) -# define Tracec(c,x) -# define Tracecv(c,x) -#endif - -static int fill_inbuf(void); -static void flush_window(void); static void error(char *m); extern char input_data[]; extern int input_len; - -static long bytes_out = 0; -static uch *output_data; -static unsigned long output_ptr = 0; +static unsigned char *output; static void error(char *m); @@ -93,7 +43,9 @@ static unsigned long free_mem_end_ptr; #define HEAP_SIZE 0x10000 -#include "../../../../lib/inflate.c" +#ifdef CONFIG_KERNEL_GZIP +#include "../../../../lib/decompress_inflate.c" +#endif #ifdef CONFIG_SH_STANDARD_BIOS size_t strlen(const char *s) @@ -138,44 +90,6 @@ void* memcpy(void* __dest, __const void* __src, return __dest; } -/* =========================================================================== - * Fill the input buffer. This is called only when the buffer is empty - * and at least one byte is really needed. - */ -static int fill_inbuf(void) -{ - if (insize != 0) { - error("ran out of input data"); - } - - inbuf = input_data; - insize = input_len; - inptr = 1; - return inbuf[0]; -} - -/* =========================================================================== - * Write the output window window[0..outcnt-1] and update crc and bytes_out. - * (Used for the decompressed data only.) - */ -static void flush_window(void) -{ - ulg c = crc; /* temporary variable */ - unsigned n; - uch *in, *out, ch; - - in = window; - out = &output_data[output_ptr]; - for (n = 0; n < outcnt; n++) { - ch = *out++ = *in++; - c = crc_32_tab[((int)c ^ ch) & 0xff] ^ (c >> 8); - } - crc = c; - bytes_out += (ulg)outcnt; - output_ptr += (ulg)outcnt; - outcnt = 0; -} - static void error(char *x) { puts("\n\n"); @@ -191,16 +105,18 @@ long* stack_start = &user_stack[STACK_SIZE]; void decompress_kernel(void) { - output_data = NULL; - output_ptr = PHYSADDR((unsigned long)&_text+PAGE_SIZE); + unsigned long output_addr; + + output_addr = PHYSADDR((unsigned long)&_text+PAGE_SIZE); #ifdef CONFIG_29BIT - output_ptr |= P2SEG; + output_addr |= P2SEG; #endif + + output = (unsigned char *)output_addr; free_mem_ptr = (unsigned long)&_end; free_mem_end_ptr = free_mem_ptr + HEAP_SIZE; - makecrc(); puts("Uncompressing Linux... "); - gunzip(); + decompress(input_data, input_len, NULL, NULL, output, NULL, error); puts("Ok, booting the kernel.\n"); } diff --git a/arch/sh/boot/compressed/misc_64.c b/arch/sh/boot/compressed/misc_64.c index 2941657..0c6894e 100644 --- a/arch/sh/boot/compressed/misc_64.c +++ b/arch/sh/boot/compressed/misc_64.c @@ -20,67 +20,18 @@ int cache_control(unsigned int command); * gzip declarations */ -#define OF(args) args #define STATIC static #undef memset #undef memcpy #define memzero(s, n) memset ((s), 0, (n)) -typedef unsigned char uch; -typedef unsigned short ush; -typedef unsigned long ulg; - -#define WSIZE 0x8000 /* Window size must be at least 32k, */ - /* and a power of two */ - -static uch *inbuf; /* input buffer */ -static uch window[WSIZE]; /* Sliding window buffer */ - -static unsigned insize = 0; /* valid bytes in inbuf */ -static unsigned inptr = 0; /* index of next byte to be processed in inbuf */ -static unsigned outcnt = 0; /* bytes in output buffer */ - -/* gzip flag byte */ -#define ASCII_FLAG 0x01 /* bit 0 set: file probably ASCII text */ -#define CONTINUATION 0x02 /* bit 1 set: continuation of multi-part gzip file */ -#define EXTRA_FIELD 0x04 /* bit 2 set: extra field present */ -#define ORIG_NAME 0x08 /* bit 3 set: original file name present */ -#define COMMENT 0x10 /* bit 4 set: file comment present */ -#define ENCRYPTED 0x20 /* bit 5 set: file is encrypted */ -#define RESERVED 0xC0 /* bit 6,7: reserved */ - -#define get_byte() (inptr < insize ? inbuf[inptr++] : fill_inbuf()) - -/* Diagnostic functions */ -#ifdef DEBUG -# define Assert(cond,msg) {if(!(cond)) error(msg);} -# define Trace(x) fprintf x -# define Tracev(x) {if (verbose) fprintf x ;} -# define Tracevv(x) {if (verbose>1) fprintf x ;} -# define Tracec(c,x) {if (verbose && (c)) fprintf x ;} -# define Tracecv(c,x) {if (verbose>1 && (c)) fprintf x ;} -#else -# define Assert(cond,msg) -# define Trace(x) -# define Tracev(x) -# define Tracevv(x) -# define Tracec(c,x) -# define Tracecv(c,x) -#endif - -static int fill_inbuf(void); -static void flush_window(void); static void error(char *m); extern char input_data[]; extern int input_len; -static long bytes_out = 0; -static uch *output_data; -static unsigned long output_ptr = 0; - -static void error(char *m); +static unsigned char *output_data; static void puts(const char *); @@ -91,7 +42,9 @@ static unsigned long free_mem_end_ptr; #define HEAP_SIZE 0x10000 -#include "../../../../lib/inflate.c" +#ifdef CONFIG_KERNEL_GZIP +#include "../../../../lib/decompress_inflate.c" +#endif void puts(const char *s) { @@ -117,45 +70,6 @@ void *memcpy(void *__dest, __const void *__src, size_t __n) return __dest; } -/* =========================================================================== - * Fill the input buffer. This is called only when the buffer is empty - * and at least one byte is really needed. - */ -static int fill_inbuf(void) -{ - if (insize != 0) { - error("ran out of input data\n"); - } - - inbuf = input_data; - insize = input_len; - inptr = 1; - return inbuf[0]; -} - -/* =========================================================================== - * Write the output window window[0..outcnt-1] and update crc and bytes_out. - * (Used for the decompressed data only.) - */ -static void flush_window(void) -{ - ulg c = crc; /* temporary variable */ - unsigned n; - uch *in, *out, ch; - - in = window; - out = &output_data[output_ptr]; - for (n = 0; n < outcnt; n++) { - ch = *out++ = *in++; - c = crc_32_tab[((int) c ^ ch) & 0xff] ^ (c >> 8); - } - crc = c; - bytes_out += (ulg) outcnt; - output_ptr += (ulg) outcnt; - outcnt = 0; - puts("."); -} - static void error(char *x) { puts("\n\n"); @@ -171,40 +85,13 @@ long *stack_start = &user_stack[STACK_SIZE]; void decompress_kernel(void) { - output_data = (uch *) (CONFIG_MEMORY_START + 0x2000); + output_data = (unsigned char *) (CONFIG_MEMORY_START + 0x2000); free_mem_ptr = (unsigned long) &_end; free_mem_end_ptr = free_mem_ptr + HEAP_SIZE; - makecrc(); puts("Uncompressing Linux... "); cache_control(CACHE_ENABLE); - gunzip(); - puts("\n"); - -#if 0 - /* When booting from ROM may want to do something like this if the - * boot loader doesn't. - */ - - /* Set up the parameters and command line */ - { - volatile unsigned int *parambase = - (int *) (CONFIG_MEMORY_START + 0x1000); - - parambase[0] = 0x1; /* MOUNT_ROOT_RDONLY */ - parambase[1] = 0x0; /* RAMDISK_FLAGS */ - parambase[2] = 0x0200; /* ORIG_ROOT_DEV */ - parambase[3] = 0x0; /* LOADER_TYPE */ - parambase[4] = 0x0; /* INITRD_START */ - parambase[5] = 0x0; /* INITRD_SIZE */ - parambase[6] = 0; - - strcpy((char *) ((int) parambase + 0x100), - "console=ttySC0,38400"); - } -#endif - - puts("Ok, booting the kernel.\n"); - + decompress(input_data, input_len, NULL, NULL, output_data, NULL, error); cache_control(CACHE_DISABLE); + puts("Ok, booting the kernel.\n"); } -- cgit v1.1 From 07e88e1bfc128681a80578724fde6a872f413862 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Sat, 11 Jul 2009 13:21:19 -0400 Subject: sh: bzip2/lzma zImage support. This plugs in bzip2 and lzma support for zImages. Signed-off-by: Paul Mundt --- arch/sh/Kconfig | 2 ++ arch/sh/boot/compressed/.gitignore | 1 + arch/sh/boot/compressed/Makefile | 19 ++++++++++++++++--- arch/sh/boot/compressed/misc_32.c | 14 +++++++++++++- arch/sh/boot/compressed/misc_64.c | 14 +++++++++++++- arch/sh/boot/compressed/piggy.S | 8 -------- arch/sh/boot/compressed/vmlinux.scr | 10 ++++++++++ 7 files changed, 55 insertions(+), 13 deletions(-) create mode 100644 arch/sh/boot/compressed/.gitignore delete mode 100644 arch/sh/boot/compressed/piggy.S create mode 100644 arch/sh/boot/compressed/vmlinux.scr (limited to 'arch') diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 9f531ca..c4a955d 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -18,6 +18,8 @@ config SUPERH select HAVE_DMA_API_DEBUG select HAVE_PERF_COUNTERS select HAVE_KERNEL_GZIP + select HAVE_KERNEL_BZIP2 + select HAVE_KERNEL_LZMA select RTC_LIB select GENERIC_ATOMIC64 help diff --git a/arch/sh/boot/compressed/.gitignore b/arch/sh/boot/compressed/.gitignore new file mode 100644 index 0000000..2374a83 --- /dev/null +++ b/arch/sh/boot/compressed/.gitignore @@ -0,0 +1 @@ +vmlinux.bin.* diff --git a/arch/sh/boot/compressed/Makefile b/arch/sh/boot/compressed/Makefile index 9531bf1..0a4e7af 100644 --- a/arch/sh/boot/compressed/Makefile +++ b/arch/sh/boot/compressed/Makefile @@ -5,6 +5,7 @@ # targets := vmlinux vmlinux.bin vmlinux.bin.gz \ + vmlinux.bin.bz2 vmlinux.bin.lzma \ head_$(BITS).o misc_$(BITS).o piggy.o OBJECTS = $(obj)/head_$(BITS).o $(obj)/misc_$(BITS).o $(obj)/cache.o @@ -38,10 +39,22 @@ $(obj)/vmlinux: $(OBJECTS) $(obj)/piggy.o $(LIBGCC) FORCE $(obj)/vmlinux.bin: vmlinux FORCE $(call if_changed,objcopy) -$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE +vmlinux.bin.all-y := $(obj)/vmlinux.bin + +$(obj)/vmlinux.bin.gz: $(vmlinux.bin.all-y) FORCE $(call if_changed,gzip) +$(obj)/vmlinux.bin.bz2: $(vmlinux.bin.all-y) FORCE + $(call if_changed,bzip2) +$(obj)/vmlinux.bin.lzma: $(vmlinux.bin.all-y) FORCE + $(call if_changed,lzma) + +suffix-$(CONFIG_KERNEL_GZIP) := gz +suffix-$(CONFIG_KERNEL_BZIP2) := bz2 +suffix-$(CONFIG_KERNEL_LZMA) := lzma OBJCOPYFLAGS += -R .empty_zero_page -$(obj)/piggy.o: $(obj)/piggy.S $(obj)/vmlinux.bin.gz FORCE - $(call if_changed,as_o_S) +LDFLAGS_piggy.o := -r --format binary --oformat $(ld-bfd) -T + +$(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.$(suffix-y) FORCE + $(call if_changed,ld) diff --git a/arch/sh/boot/compressed/misc_32.c b/arch/sh/boot/compressed/misc_32.c index 1ab4f49..b86e359 100644 --- a/arch/sh/boot/compressed/misc_32.c +++ b/arch/sh/boot/compressed/misc_32.c @@ -41,12 +41,24 @@ extern int _end; static unsigned long free_mem_ptr; static unsigned long free_mem_end_ptr; -#define HEAP_SIZE 0x10000 +#ifdef CONFIG_HAVE_KERNEL_BZIP2 +#define HEAP_SIZE 0x400000 +#else +#define HEAP_SIZE 0x10000 +#endif #ifdef CONFIG_KERNEL_GZIP #include "../../../../lib/decompress_inflate.c" #endif +#ifdef CONFIG_KERNEL_BZIP2 +#include "../../../../lib/decompress_bunzip2.c" +#endif + +#ifdef CONFIG_KERNEL_LZMA +#include "../../../../lib/decompress_unlzma.c" +#endif + #ifdef CONFIG_SH_STANDARD_BIOS size_t strlen(const char *s) { diff --git a/arch/sh/boot/compressed/misc_64.c b/arch/sh/boot/compressed/misc_64.c index 0c6894e..09b7b7c 100644 --- a/arch/sh/boot/compressed/misc_64.c +++ b/arch/sh/boot/compressed/misc_64.c @@ -40,12 +40,24 @@ extern int _end; static unsigned long free_mem_ptr; static unsigned long free_mem_end_ptr; -#define HEAP_SIZE 0x10000 +#ifdef CONFIG_HAVE_KERNEL_BZIP2 +#define HEAP_SIZE 0x400000 +#else +#define HEAP_SIZE 0x10000 +#endif #ifdef CONFIG_KERNEL_GZIP #include "../../../../lib/decompress_inflate.c" #endif +#ifdef CONFIG_KERNEL_BZIP2 +#include "../../../../lib/decompress_bunzip2.c" +#endif + +#ifdef CONFIG_KERNEL_LZMA +#include "../../../../lib/decompress_unlzma.c" +#endif + void puts(const char *s) { } diff --git a/arch/sh/boot/compressed/piggy.S b/arch/sh/boot/compressed/piggy.S deleted file mode 100644 index 5660719..0000000 --- a/arch/sh/boot/compressed/piggy.S +++ /dev/null @@ -1,8 +0,0 @@ - .global input_len, input_data - .data -input_len: - .long input_data_end - input_data -input_data: - .incbin "arch/sh/boot/compressed/vmlinux.bin.gz" -input_data_end: - .end diff --git a/arch/sh/boot/compressed/vmlinux.scr b/arch/sh/boot/compressed/vmlinux.scr new file mode 100644 index 0000000..f02382a --- /dev/null +++ b/arch/sh/boot/compressed/vmlinux.scr @@ -0,0 +1,10 @@ +SECTIONS +{ + .rodata.compressed : { + input_len = .; + LONG(input_data_end - input_data) input_data = .; + *(.data) + output_len = . - 4; + input_data_end = .; + } +} -- cgit v1.1 From b14c6d428a54fb3235e69fd78fba9080c96645be Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Sat, 11 Jul 2009 13:30:38 -0400 Subject: sh: Consolidate the sh64 changes in arch/sh/boot/compressed/misc_32.c This makes some minor changes to misc_32.c so that it can be used by sh64. Signed-off-by: Paul Mundt --- arch/sh/boot/compressed/misc_32.c | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/sh/boot/compressed/misc_32.c b/arch/sh/boot/compressed/misc_32.c index b86e359..4eb27e6 100644 --- a/arch/sh/boot/compressed/misc_32.c +++ b/arch/sh/boot/compressed/misc_32.c @@ -26,7 +26,10 @@ #undef memcpy #define memzero(s, n) memset ((s), 0, (n)) -static void error(char *m); +/* cache.c */ +#define CACHE_ENABLE 0 +#define CACHE_DISABLE 1 +int cache_control(unsigned int command); extern char input_data[]; extern int input_len; @@ -111,9 +114,15 @@ static void error(char *x) while(1); /* Halt */ } +#ifdef CONFIG_SUPERH64 +#define stackalign 8 +#else +#define stackalign 4 +#endif + #define STACK_SIZE (4096) -long user_stack [STACK_SIZE]; -long* stack_start = &user_stack[STACK_SIZE]; +long __attribute__ ((aligned(stackalign))) user_stack[STACK_SIZE]; +long *stack_start = &user_stack[STACK_SIZE]; void decompress_kernel(void) { @@ -129,6 +138,8 @@ void decompress_kernel(void) free_mem_end_ptr = free_mem_ptr + HEAP_SIZE; puts("Uncompressing Linux... "); + cache_control(CACHE_ENABLE); decompress(input_data, input_len, NULL, NULL, output, NULL, error); + cache_control(CACHE_DISABLE); puts("Ok, booting the kernel.\n"); } -- cgit v1.1 From 59f002964f4e6668a0132cd796b82f7f8a4803f0 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Sat, 11 Jul 2009 13:32:24 -0400 Subject: sh: rename arch/sh/boot/compressed/misc_32.c -> misc.c This is now used by both sh64 and regular sh, kill off the old sh64 version now too. Signed-off-by: Paul Mundt --- arch/sh/boot/compressed/Makefile | 4 +- arch/sh/boot/compressed/misc.c | 145 ++++++++++++++++++++++++++++++++++++++ arch/sh/boot/compressed/misc_32.c | 145 -------------------------------------- arch/sh/boot/compressed/misc_64.c | 109 ---------------------------- 4 files changed, 147 insertions(+), 256 deletions(-) create mode 100644 arch/sh/boot/compressed/misc.c delete mode 100644 arch/sh/boot/compressed/misc_32.c delete mode 100644 arch/sh/boot/compressed/misc_64.c (limited to 'arch') diff --git a/arch/sh/boot/compressed/Makefile b/arch/sh/boot/compressed/Makefile index 0a4e7af..3324019 100644 --- a/arch/sh/boot/compressed/Makefile +++ b/arch/sh/boot/compressed/Makefile @@ -6,9 +6,9 @@ targets := vmlinux vmlinux.bin vmlinux.bin.gz \ vmlinux.bin.bz2 vmlinux.bin.lzma \ - head_$(BITS).o misc_$(BITS).o piggy.o + head_$(BITS).o misc.o piggy.o -OBJECTS = $(obj)/head_$(BITS).o $(obj)/misc_$(BITS).o $(obj)/cache.o +OBJECTS = $(obj)/head_$(BITS).o $(obj)/misc.o $(obj)/cache.o ifdef CONFIG_SH_STANDARD_BIOS OBJECTS += $(obj)/../../kernel/sh_bios.o diff --git a/arch/sh/boot/compressed/misc.c b/arch/sh/boot/compressed/misc.c new file mode 100644 index 0000000..4eb27e6 --- /dev/null +++ b/arch/sh/boot/compressed/misc.c @@ -0,0 +1,145 @@ +/* + * arch/sh/boot/compressed/misc.c + * + * This is a collection of several routines from gzip-1.0.3 + * adapted for Linux. + * + * malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994 + * + * Adapted for SH by Stuart Menefy, Aug 1999 + * + * Modified to use standard LinuxSH BIOS by Greg Banks 7Jul2000 + */ + +#include +#include +#include +#include + +/* + * gzip declarations + */ + +#define STATIC static + +#undef memset +#undef memcpy +#define memzero(s, n) memset ((s), 0, (n)) + +/* cache.c */ +#define CACHE_ENABLE 0 +#define CACHE_DISABLE 1 +int cache_control(unsigned int command); + +extern char input_data[]; +extern int input_len; +static unsigned char *output; + +static void error(char *m); + +int puts(const char *); + +extern int _text; /* Defined in vmlinux.lds.S */ +extern int _end; +static unsigned long free_mem_ptr; +static unsigned long free_mem_end_ptr; + +#ifdef CONFIG_HAVE_KERNEL_BZIP2 +#define HEAP_SIZE 0x400000 +#else +#define HEAP_SIZE 0x10000 +#endif + +#ifdef CONFIG_KERNEL_GZIP +#include "../../../../lib/decompress_inflate.c" +#endif + +#ifdef CONFIG_KERNEL_BZIP2 +#include "../../../../lib/decompress_bunzip2.c" +#endif + +#ifdef CONFIG_KERNEL_LZMA +#include "../../../../lib/decompress_unlzma.c" +#endif + +#ifdef CONFIG_SH_STANDARD_BIOS +size_t strlen(const char *s) +{ + int i = 0; + + while (*s++) + i++; + return i; +} + +int puts(const char *s) +{ + int len = strlen(s); + sh_bios_console_write(s, len); + return len; +} +#else +int puts(const char *s) +{ + /* This should be updated to use the sh-sci routines */ + return 0; +} +#endif + +void* memset(void* s, int c, size_t n) +{ + int i; + char *ss = (char*)s; + + for (i=0;i -#include -#include -#include - -/* - * gzip declarations - */ - -#define STATIC static - -#undef memset -#undef memcpy -#define memzero(s, n) memset ((s), 0, (n)) - -/* cache.c */ -#define CACHE_ENABLE 0 -#define CACHE_DISABLE 1 -int cache_control(unsigned int command); - -extern char input_data[]; -extern int input_len; -static unsigned char *output; - -static void error(char *m); - -int puts(const char *); - -extern int _text; /* Defined in vmlinux.lds.S */ -extern int _end; -static unsigned long free_mem_ptr; -static unsigned long free_mem_end_ptr; - -#ifdef CONFIG_HAVE_KERNEL_BZIP2 -#define HEAP_SIZE 0x400000 -#else -#define HEAP_SIZE 0x10000 -#endif - -#ifdef CONFIG_KERNEL_GZIP -#include "../../../../lib/decompress_inflate.c" -#endif - -#ifdef CONFIG_KERNEL_BZIP2 -#include "../../../../lib/decompress_bunzip2.c" -#endif - -#ifdef CONFIG_KERNEL_LZMA -#include "../../../../lib/decompress_unlzma.c" -#endif - -#ifdef CONFIG_SH_STANDARD_BIOS -size_t strlen(const char *s) -{ - int i = 0; - - while (*s++) - i++; - return i; -} - -int puts(const char *s) -{ - int len = strlen(s); - sh_bios_console_write(s, len); - return len; -} -#else -int puts(const char *s) -{ - /* This should be updated to use the sh-sci routines */ - return 0; -} -#endif - -void* memset(void* s, int c, size_t n) -{ - int i; - char *ss = (char*)s; - - for (i=0;i - -/* cache.c */ -#define CACHE_ENABLE 0 -#define CACHE_DISABLE 1 -int cache_control(unsigned int command); - -/* - * gzip declarations - */ - -#define STATIC static - -#undef memset -#undef memcpy -#define memzero(s, n) memset ((s), 0, (n)) - -static void error(char *m); - -extern char input_data[]; -extern int input_len; - -static unsigned char *output_data; - -static void puts(const char *); - -extern int _text; /* Defined in vmlinux.lds.S */ -extern int _end; -static unsigned long free_mem_ptr; -static unsigned long free_mem_end_ptr; - -#ifdef CONFIG_HAVE_KERNEL_BZIP2 -#define HEAP_SIZE 0x400000 -#else -#define HEAP_SIZE 0x10000 -#endif - -#ifdef CONFIG_KERNEL_GZIP -#include "../../../../lib/decompress_inflate.c" -#endif - -#ifdef CONFIG_KERNEL_BZIP2 -#include "../../../../lib/decompress_bunzip2.c" -#endif - -#ifdef CONFIG_KERNEL_LZMA -#include "../../../../lib/decompress_unlzma.c" -#endif - -void puts(const char *s) -{ -} - -void *memset(void *s, int c, size_t n) -{ - int i; - char *ss = (char *) s; - - for (i = 0; i < n; i++) - ss[i] = c; - return s; -} - -void *memcpy(void *__dest, __const void *__src, size_t __n) -{ - int i; - char *d = (char *) __dest, *s = (char *) __src; - - for (i = 0; i < __n; i++) - d[i] = s[i]; - return __dest; -} - -static void error(char *x) -{ - puts("\n\n"); - puts(x); - puts("\n\n -- System halted"); - - while (1) ; /* Halt */ -} - -#define STACK_SIZE (4096) -long __attribute__ ((aligned(8))) user_stack[STACK_SIZE]; -long *stack_start = &user_stack[STACK_SIZE]; - -void decompress_kernel(void) -{ - output_data = (unsigned char *) (CONFIG_MEMORY_START + 0x2000); - free_mem_ptr = (unsigned long) &_end; - free_mem_end_ptr = free_mem_ptr + HEAP_SIZE; - - puts("Uncompressing Linux... "); - cache_control(CACHE_ENABLE); - decompress(input_data, input_len, NULL, NULL, output_data, NULL, error); - cache_control(CACHE_DISABLE); - puts("Ok, booting the kernel.\n"); -} -- cgit v1.1 From 040f43e0bf70935cbe8a775110206d11367e11db Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Sat, 11 Jul 2009 13:36:25 -0400 Subject: sh64: Don't use PHYSADDR() for output_addr calculation. Opencode the MEMORY_START offset directly, sh64 uses a slightly different calculation. Signed-off-by: Paul Mundt --- arch/sh/boot/compressed/misc.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch') diff --git a/arch/sh/boot/compressed/misc.c b/arch/sh/boot/compressed/misc.c index 4eb27e6..fd56a71 100644 --- a/arch/sh/boot/compressed/misc.c +++ b/arch/sh/boot/compressed/misc.c @@ -128,10 +128,14 @@ void decompress_kernel(void) { unsigned long output_addr; +#ifdef CONFIG_SUPERH64 + output_addr = (CONFIG_MEMORY_START + 0x2000); +#else output_addr = PHYSADDR((unsigned long)&_text+PAGE_SIZE); #ifdef CONFIG_29BIT output_addr |= P2SEG; #endif +#endif output = (unsigned char *)output_addr; free_mem_ptr = (unsigned long)&_end; -- cgit v1.1 From 05dd2cd3bb3299540e33ff60c5b401dd88f273bd Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Mon, 13 Jul 2009 11:38:04 +0000 Subject: sh: Restore previous behaviour on kernel fault The last commit changed the behaviour on kernel faults when we were doing something other than syncing the page tables. vmalloc_sync_one() needs to return NULL if the page tables are up to date, because the reason for the fault was not a missing/inconsitent page table entry. By returning NULL if the page tables are sync'd we signal to the calling function that further work must be done to resolve this fault. Also, remove the superfluous __va() around the first argument to vmalloc_sync_one(). The value of pgd_k is already a virtual address and using it wth __va() causes a NULL dereference. Signed-off-by: Matt Fleming Signed-off-by: Paul Mundt --- arch/sh/mm/fault_32.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault_32.c index 08d0117..dbbdeba 100644 --- a/arch/sh/mm/fault_32.c +++ b/arch/sh/mm/fault_32.c @@ -60,8 +60,15 @@ static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address) if (!pmd_present(*pmd)) set_pmd(pmd, *pmd_k); - else + else { + /* + * The page tables are fully synchronised so there must + * be another reason for the fault. Return NULL here to + * signal that we have not taken care of the fault. + */ BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k)); + return NULL; + } return pmd_k; } @@ -87,7 +94,7 @@ static noinline int vmalloc_fault(unsigned long address) * an interrupt in the middle of a task switch.. */ pgd_k = get_TTB(); - pmd_k = vmalloc_sync_one(__va((unsigned long)pgd_k), address); + pmd_k = vmalloc_sync_one(pgd_k, address); if (!pmd_k) return -1; -- cgit v1.1 From 72849873cdf213b5d7b8ae006e6740b2d78f6ad8 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 14 Jul 2009 06:51:54 -0400 Subject: sh: Kill off zero-sized vmlinux_64.lds.S This seems to be some merge damage, kill it off. Signed-off-by: Paul Mundt --- arch/sh/kernel/vmlinux_64.lds.S | 0 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 arch/sh/kernel/vmlinux_64.lds.S (limited to 'arch') diff --git a/arch/sh/kernel/vmlinux_64.lds.S b/arch/sh/kernel/vmlinux_64.lds.S deleted file mode 100644 index e69de29..0000000 -- cgit v1.1 From 872fb6dd6b07986417964e089074e7acfd025f4c Mon Sep 17 00:00:00 2001 From: Fenghua Yu Date: Mon, 13 Jul 2009 13:09:43 -0700 Subject: ia64: Fix setup_per_cpu_areas() compilation error Fix ia64 build setup_per_cpu_areas() redifinition issue in UP configuration. When compiling ia64 kernel in UP configuration, the following compilation errors are reported: arch/ia64/kernel/setup.c:860: error: redefinition of 'setup_per_cpu_areas' include/linux/percpu.h:185: error: previous definition of 'setup_per_cpu_areas' was here The patch fixes the issue in arch/ia64/kernel/setup.c Signed-off-by: Fenghua Yu Signed-off-by: Tejun Heo --- arch/ia64/kernel/setup.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'arch') diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index 1b23ec12..1de86c9 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c @@ -855,11 +855,17 @@ identify_cpu (struct cpuinfo_ia64 *c) c->unimpl_pa_mask = ~((1L<<63) | ((1L << phys_addr_size) - 1)); } +/* + * In UP configuration, setup_per_cpu_areas() is defined in + * include/linux/percpu.h + */ +#ifdef CONFIG_SMP void __init setup_per_cpu_areas (void) { /* start_kernel() requires this... */ } +#endif /* * Do the following calculations: -- cgit v1.1 From 0f8ee1874fa80899debc0a0670e2bed0a28d2548 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Fri, 17 Jul 2009 14:24:55 +0000 Subject: sh: Add support for multiple hwblk counters Extend the SuperH hwblk code to support more than one counter. Contains ground work for the future Runtime PM implementation. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/include/asm/hwblk.h | 13 ++++++- arch/sh/kernel/cpu/hwblk.c | 69 +++++++++++++++++++++++----------- arch/sh/kernel/cpu/sh4a/hwblk-sh7722.c | 4 +- 3 files changed, 60 insertions(+), 26 deletions(-) (limited to 'arch') diff --git a/arch/sh/include/asm/hwblk.h b/arch/sh/include/asm/hwblk.h index 51a46f4..c01d72c 100644 --- a/arch/sh/include/asm/hwblk.h +++ b/arch/sh/include/asm/hwblk.h @@ -4,6 +4,9 @@ #include #include +#define HWBLK_CNT_USAGE 0 +#define HWBLK_CNT_NR 1 + #define HWBLK_AREA_FLAG_PARENT (1 << 0) /* valid parent */ #define HWBLK_AREA(_flags, _parent) \ @@ -13,7 +16,7 @@ } struct hwblk_area { - unsigned long cnt; + int cnt[HWBLK_CNT_NR]; unsigned char parent; unsigned char flags; }; @@ -29,7 +32,7 @@ struct hwblk { void __iomem *mstp; unsigned char bit; unsigned char area; - unsigned long cnt; + int cnt[HWBLK_CNT_NR]; }; struct hwblk_info { @@ -46,6 +49,12 @@ int arch_hwblk_sleep_mode(void); int hwblk_register(struct hwblk_info *info); int hwblk_init(void); +void hwblk_enable(struct hwblk_info *info, int hwblk); +void hwblk_disable(struct hwblk_info *info, int hwblk); + +void hwblk_cnt_inc(struct hwblk_info *info, int hwblk, int cnt); +void hwblk_cnt_dec(struct hwblk_info *info, int hwblk, int cnt); + /* allow clocks to enable and disable hardware blocks */ #define SH_HWBLK_CLK(_name, _id, _parent, _hwblk, _flags) \ { \ diff --git a/arch/sh/kernel/cpu/hwblk.c b/arch/sh/kernel/cpu/hwblk.c index 7c3a73d..c0ad7d4 100644 --- a/arch/sh/kernel/cpu/hwblk.c +++ b/arch/sh/kernel/cpu/hwblk.c @@ -9,38 +9,64 @@ static DEFINE_SPINLOCK(hwblk_lock); -static void hwblk_area_inc(struct hwblk_info *info, int area) +static void hwblk_area_mod_cnt(struct hwblk_info *info, + int area, int counter, int value, int goal) { struct hwblk_area *hap = info->areas + area; - hap->cnt++; - if (hap->cnt == 1) - if (hap->flags & HWBLK_AREA_FLAG_PARENT) - hwblk_area_inc(info, hap->parent); + hap->cnt[counter] += value; + + if (hap->cnt[counter] != goal) + return; + + if (hap->flags & HWBLK_AREA_FLAG_PARENT) + hwblk_area_mod_cnt(info, hap->parent, counter, value, goal); } -static void hwblk_area_dec(struct hwblk_info *info, int area) + +static int __hwblk_mod_cnt(struct hwblk_info *info, int hwblk, + int counter, int value, int goal) { - struct hwblk_area *hap = info->areas + area; + struct hwblk *hp = info->hwblks + hwblk; + + hp->cnt[counter] += value; + if (hp->cnt[counter] == goal) + hwblk_area_mod_cnt(info, hp->area, counter, value, goal); - if (hap->cnt == 1) - if (hap->flags & HWBLK_AREA_FLAG_PARENT) - hwblk_area_dec(info, hap->parent); - hap->cnt--; + return hp->cnt[counter]; } -static void hwblk_enable(struct hwblk_info *info, int hwblk) +static void hwblk_mod_cnt(struct hwblk_info *info, int hwblk, + int counter, int value, int goal) +{ + unsigned long flags; + + spin_lock_irqsave(&hwblk_lock, flags); + __hwblk_mod_cnt(info, hwblk, counter, value, goal); + spin_unlock_irqrestore(&hwblk_lock, flags); +} + +void hwblk_cnt_inc(struct hwblk_info *info, int hwblk, int counter) +{ + hwblk_mod_cnt(info, hwblk, counter, 1, 1); +} + +void hwblk_cnt_dec(struct hwblk_info *info, int hwblk, int counter) +{ + hwblk_mod_cnt(info, hwblk, counter, -1, 0); +} + +void hwblk_enable(struct hwblk_info *info, int hwblk) { struct hwblk *hp = info->hwblks + hwblk; unsigned long tmp; unsigned long flags; + int ret; spin_lock_irqsave(&hwblk_lock, flags); - hp->cnt++; - if (hp->cnt == 1) { - hwblk_area_inc(info, hp->area); - + ret = __hwblk_mod_cnt(info, hwblk, HWBLK_CNT_USAGE, 1, 1); + if (ret == 1) { tmp = __raw_readl(hp->mstp); tmp &= ~(1 << hp->bit); __raw_writel(tmp, hp->mstp); @@ -49,27 +75,26 @@ static void hwblk_enable(struct hwblk_info *info, int hwblk) spin_unlock_irqrestore(&hwblk_lock, flags); } -static void hwblk_disable(struct hwblk_info *info, int hwblk) +void hwblk_disable(struct hwblk_info *info, int hwblk) { struct hwblk *hp = info->hwblks + hwblk; unsigned long tmp; unsigned long flags; + int ret; spin_lock_irqsave(&hwblk_lock, flags); - if (hp->cnt == 1) { - hwblk_area_dec(info, hp->area); - + ret = __hwblk_mod_cnt(info, hwblk, HWBLK_CNT_USAGE, -1, 0); + if (ret == 0) { tmp = __raw_readl(hp->mstp); tmp |= 1 << hp->bit; __raw_writel(tmp, hp->mstp); } - hp->cnt--; spin_unlock_irqrestore(&hwblk_lock, flags); } -static struct hwblk_info *hwblk_info; +struct hwblk_info *hwblk_info; int __init hwblk_register(struct hwblk_info *info) { diff --git a/arch/sh/kernel/cpu/sh4a/hwblk-sh7722.c b/arch/sh/kernel/cpu/sh4a/hwblk-sh7722.c index 00a1c02..a288b5d 100644 --- a/arch/sh/kernel/cpu/sh4a/hwblk-sh7722.c +++ b/arch/sh/kernel/cpu/sh4a/hwblk-sh7722.c @@ -91,10 +91,10 @@ static struct hwblk_info sh7722_hwblk_info = { int arch_hwblk_sleep_mode(void) { - if (!sh7722_hwblk_area[CORE_AREA].cnt) + if (!sh7722_hwblk_area[CORE_AREA].cnt[HWBLK_CNT_USAGE]) return SUSP_SH_STANDBY | SUSP_SH_SF; - if (!sh7722_hwblk_area[CORE_AREA_BM].cnt) + if (!sh7722_hwblk_area[CORE_AREA_BM].cnt[HWBLK_CNT_USAGE]) return SUSP_SH_SLEEP | SUSP_SH_SF; return SUSP_SH_SLEEP; -- cgit v1.1 From 2094e504a7489a2d774d544592360e578d9325f8 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Fri, 17 Jul 2009 14:43:38 +0000 Subject: sh: hwblk support for sh7723 This patch adds hwblk support for the sh7723 processor. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/include/cpu-sh4/cpu/sh7723.h | 17 +++++ arch/sh/kernel/cpu/sh4a/Makefile | 2 +- arch/sh/kernel/cpu/sh4a/clock-sh7723.c | 110 ++++++++++++++++--------------- arch/sh/kernel/cpu/sh4a/hwblk-sh7723.c | 117 +++++++++++++++++++++++++++++++++ 4 files changed, 193 insertions(+), 53 deletions(-) create mode 100644 arch/sh/kernel/cpu/sh4a/hwblk-sh7723.c (limited to 'arch') diff --git a/arch/sh/include/cpu-sh4/cpu/sh7723.h b/arch/sh/include/cpu-sh4/cpu/sh7723.h index 14c8ca9..9b36fae 100644 --- a/arch/sh/include/cpu-sh4/cpu/sh7723.h +++ b/arch/sh/include/cpu-sh4/cpu/sh7723.h @@ -265,4 +265,21 @@ enum { GPIO_FN_IDEA1, GPIO_FN_IDEA0, }; +enum { + HWBLK_UNKNOWN = 0, + HWBLK_TLB, HWBLK_IC, HWBLK_OC, HWBLK_L2C, HWBLK_ILMEM, HWBLK_FPU, + HWBLK_INTC, HWBLK_DMAC0, HWBLK_SHYWAY, + HWBLK_HUDI, HWBLK_DBG, HWBLK_UBC, HWBLK_SUBC, + HWBLK_TMU0, HWBLK_CMT, HWBLK_RWDT, HWBLK_DMAC1, HWBLK_TMU1, + HWBLK_FLCTL, + HWBLK_SCIF0, HWBLK_SCIF1, HWBLK_SCIF2, + HWBLK_SCIF3, HWBLK_SCIF4, HWBLK_SCIF5, + HWBLK_MSIOF0, HWBLK_MSIOF1, HWBLK_MERAM, HWBLK_IIC, HWBLK_RTC, + HWBLK_ATAPI, HWBLK_ADC, HWBLK_TPU, HWBLK_IRDA, HWBLK_TSIF, HWBLK_ICB, + HWBLK_SDHI0, HWBLK_SDHI1, HWBLK_KEYSC, HWBLK_USB, + HWBLK_2DG, HWBLK_SIU, HWBLK_VEU2H1, HWBLK_VOU, HWBLK_BEU, HWBLK_CEU, + HWBLK_VEU2H0, HWBLK_VPU, HWBLK_LCDC, + HWBLK_NR, +}; + #endif /* __ASM_SH7723_H__ */ diff --git a/arch/sh/kernel/cpu/sh4a/Makefile b/arch/sh/kernel/cpu/sh4a/Makefile index 3cafda6..1d7ae38 100644 --- a/arch/sh/kernel/cpu/sh4a/Makefile +++ b/arch/sh/kernel/cpu/sh4a/Makefile @@ -26,7 +26,7 @@ clock-$(CONFIG_CPU_SUBTYPE_SH7785) := clock-sh7785.o clock-$(CONFIG_CPU_SUBTYPE_SH7786) := clock-sh7786.o clock-$(CONFIG_CPU_SUBTYPE_SH7343) := clock-sh7343.o clock-$(CONFIG_CPU_SUBTYPE_SH7722) := clock-sh7722.o hwblk-sh7722.o -clock-$(CONFIG_CPU_SUBTYPE_SH7723) := clock-sh7723.o +clock-$(CONFIG_CPU_SUBTYPE_SH7723) := clock-sh7723.o hwblk-sh7723.o clock-$(CONFIG_CPU_SUBTYPE_SH7724) := clock-sh7724.o clock-$(CONFIG_CPU_SUBTYPE_SH7366) := clock-sh7366.o clock-$(CONFIG_CPU_SUBTYPE_SHX3) := clock-shx3.o diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7723.c b/arch/sh/kernel/cpu/sh4a/clock-sh7723.c index e67c267..bf64c78 100644 --- a/arch/sh/kernel/cpu/sh4a/clock-sh7723.c +++ b/arch/sh/kernel/cpu/sh4a/clock-sh7723.c @@ -22,6 +22,8 @@ #include #include #include +#include +#include /* SH7723 registers */ #define FRQCR 0xa4150000 @@ -140,60 +142,64 @@ struct clk div6_clks[] = { SH_CLK_DIV6("video_clk", &pll_clk, VCLKCR, 0), }; -#define MSTP(_str, _parent, _reg, _bit, _force_on, _need_cpg, _need_ram) \ - SH_CLK_MSTP32(_str, -1, _parent, _reg, _bit, _force_on * CLK_ENABLE_ON_INIT) +#define R_CLK (&r_clk) +#define P_CLK (&div4_clks[DIV4_P]) +#define B_CLK (&div4_clks[DIV4_B]) +#define U_CLK (&div4_clks[DIV4_U]) +#define I_CLK (&div4_clks[DIV4_I]) +#define SH_CLK (&div4_clks[DIV4_SH]) static struct clk mstp_clks[] = { /* See page 60 of Datasheet V1.0: Overview -> Block Diagram */ - MSTP("tlb0", &div4_clks[DIV4_I], MSTPCR0, 31, 1, 1, 0), - MSTP("ic0", &div4_clks[DIV4_I], MSTPCR0, 30, 1, 1, 0), - MSTP("oc0", &div4_clks[DIV4_I], MSTPCR0, 29, 1, 1, 0), - MSTP("l2c0", &div4_clks[DIV4_SH], MSTPCR0, 28, 1, 1, 0), - MSTP("ilmem0", &div4_clks[DIV4_I], MSTPCR0, 27, 1, 1, 0), - MSTP("fpu0", &div4_clks[DIV4_I], MSTPCR0, 24, 1, 1, 0), - MSTP("intc0", &div4_clks[DIV4_I], MSTPCR0, 22, 1, 1, 0), - MSTP("dmac0", &div4_clks[DIV4_B], MSTPCR0, 21, 0, 1, 1), - MSTP("sh0", &div4_clks[DIV4_SH], MSTPCR0, 20, 0, 1, 0), - MSTP("hudi0", &div4_clks[DIV4_P], MSTPCR0, 19, 0, 1, 0), - MSTP("ubc0", &div4_clks[DIV4_I], MSTPCR0, 17, 0, 1, 0), - MSTP("tmu0", &div4_clks[DIV4_P], MSTPCR0, 15, 0, 1, 0), - MSTP("cmt0", &r_clk, MSTPCR0, 14, 0, 0, 0), - MSTP("rwdt0", &r_clk, MSTPCR0, 13, 0, 0, 0), - MSTP("dmac1", &div4_clks[DIV4_B], MSTPCR0, 12, 0, 1, 1), - MSTP("tmu1", &div4_clks[DIV4_P], MSTPCR0, 11, 0, 1, 0), - MSTP("flctl0", &div4_clks[DIV4_P], MSTPCR0, 10, 0, 1, 0), - MSTP("scif0", &div4_clks[DIV4_P], MSTPCR0, 9, 0, 1, 0), - MSTP("scif1", &div4_clks[DIV4_P], MSTPCR0, 8, 0, 1, 0), - MSTP("scif2", &div4_clks[DIV4_P], MSTPCR0, 7, 0, 1, 0), - MSTP("scif3", &div4_clks[DIV4_B], MSTPCR0, 6, 0, 1, 0), - MSTP("scif4", &div4_clks[DIV4_B], MSTPCR0, 5, 0, 1, 0), - MSTP("scif5", &div4_clks[DIV4_B], MSTPCR0, 4, 0, 1, 0), - MSTP("msiof0", &div4_clks[DIV4_B], MSTPCR0, 2, 0, 1, 0), - MSTP("msiof1", &div4_clks[DIV4_B], MSTPCR0, 1, 0, 1, 0), - MSTP("meram0", &div4_clks[DIV4_SH], MSTPCR0, 0, 1, 1, 0), - - MSTP("i2c0", &div4_clks[DIV4_P], MSTPCR1, 9, 0, 1, 0), - MSTP("rtc0", &r_clk, MSTPCR1, 8, 0, 0, 0), - - MSTP("atapi0", &div4_clks[DIV4_SH], MSTPCR2, 28, 0, 1, 0), - MSTP("adc0", &div4_clks[DIV4_P], MSTPCR2, 27, 0, 1, 0), - MSTP("tpu0", &div4_clks[DIV4_B], MSTPCR2, 25, 0, 1, 0), - MSTP("irda0", &div4_clks[DIV4_P], MSTPCR2, 24, 0, 1, 0), - MSTP("tsif0", &div4_clks[DIV4_B], MSTPCR2, 22, 0, 1, 0), - MSTP("icb0", &div4_clks[DIV4_B], MSTPCR2, 21, 0, 1, 1), - MSTP("sdhi0", &div4_clks[DIV4_B], MSTPCR2, 18, 0, 1, 0), - MSTP("sdhi1", &div4_clks[DIV4_B], MSTPCR2, 17, 0, 1, 0), - MSTP("keysc0", &r_clk, MSTPCR2, 14, 0, 0, 0), - MSTP("usb0", &div4_clks[DIV4_B], MSTPCR2, 11, 0, 1, 0), - MSTP("2dg0", &div4_clks[DIV4_B], MSTPCR2, 10, 0, 1, 1), - MSTP("siu0", &div4_clks[DIV4_B], MSTPCR2, 8, 0, 1, 0), - MSTP("veu1", &div4_clks[DIV4_B], MSTPCR2, 6, 1, 1, 1), - MSTP("vou0", &div4_clks[DIV4_B], MSTPCR2, 5, 0, 1, 1), - MSTP("beu0", &div4_clks[DIV4_B], MSTPCR2, 4, 0, 1, 1), - MSTP("ceu0", &div4_clks[DIV4_B], MSTPCR2, 3, 0, 1, 1), - MSTP("veu0", &div4_clks[DIV4_B], MSTPCR2, 2, 1, 1, 1), - MSTP("vpu0", &div4_clks[DIV4_B], MSTPCR2, 1, 1, 1, 1), - MSTP("lcdc0", &div4_clks[DIV4_B], MSTPCR2, 0, 0, 1, 1), + SH_HWBLK_CLK("tlb0", -1, I_CLK, HWBLK_TLB, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("ic0", -1, I_CLK, HWBLK_IC, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("oc0", -1, I_CLK, HWBLK_OC, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("l2c0", -1, SH_CLK, HWBLK_L2C, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("ilmem0", -1, I_CLK, HWBLK_ILMEM, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("fpu0", -1, I_CLK, HWBLK_FPU, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("intc0", -1, I_CLK, HWBLK_INTC, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("dmac0", -1, B_CLK, HWBLK_DMAC0, 0), + SH_HWBLK_CLK("sh0", -1, SH_CLK, HWBLK_SHYWAY, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("hudi0", -1, P_CLK, HWBLK_HUDI, 0), + SH_HWBLK_CLK("ubc0", -1, I_CLK, HWBLK_UBC, 0), + SH_HWBLK_CLK("tmu0", -1, P_CLK, HWBLK_TMU0, 0), + SH_HWBLK_CLK("cmt0", -1, R_CLK, HWBLK_CMT, 0), + SH_HWBLK_CLK("rwdt0", -1, R_CLK, HWBLK_RWDT, 0), + SH_HWBLK_CLK("dmac1", -1, B_CLK, HWBLK_DMAC1, 0), + SH_HWBLK_CLK("tmu1", -1, P_CLK, HWBLK_TMU1, 0), + SH_HWBLK_CLK("flctl0", -1, P_CLK, HWBLK_FLCTL, 0), + SH_HWBLK_CLK("scif0", -1, P_CLK, HWBLK_SCIF0, 0), + SH_HWBLK_CLK("scif1", -1, P_CLK, HWBLK_SCIF1, 0), + SH_HWBLK_CLK("scif2", -1, P_CLK, HWBLK_SCIF2, 0), + SH_HWBLK_CLK("scif3", -1, B_CLK, HWBLK_SCIF3, 0), + SH_HWBLK_CLK("scif4", -1, B_CLK, HWBLK_SCIF4, 0), + SH_HWBLK_CLK("scif5", -1, B_CLK, HWBLK_SCIF5, 0), + SH_HWBLK_CLK("msiof0", -1, B_CLK, HWBLK_MSIOF0, 0), + SH_HWBLK_CLK("msiof1", -1, B_CLK, HWBLK_MSIOF1, 0), + SH_HWBLK_CLK("meram0", -1, SH_CLK, HWBLK_MERAM, 0), + + SH_HWBLK_CLK("i2c0", -1, P_CLK, HWBLK_IIC, 0), + SH_HWBLK_CLK("rtc0", -1, R_CLK, HWBLK_RTC, 0), + + SH_HWBLK_CLK("atapi0", -1, SH_CLK, HWBLK_ATAPI, 0), + SH_HWBLK_CLK("adc0", -1, P_CLK, HWBLK_ADC, 0), + SH_HWBLK_CLK("tpu0", -1, B_CLK, HWBLK_TPU, 0), + SH_HWBLK_CLK("irda0", -1, P_CLK, HWBLK_IRDA, 0), + SH_HWBLK_CLK("tsif0", -1, B_CLK, HWBLK_TSIF, 0), + SH_HWBLK_CLK("icb0", -1, B_CLK, HWBLK_ICB, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("sdhi0", -1, B_CLK, HWBLK_SDHI0, 0), + SH_HWBLK_CLK("sdhi1", -1, B_CLK, HWBLK_SDHI1, 0), + SH_HWBLK_CLK("keysc0", -1, R_CLK, HWBLK_KEYSC, 0), + SH_HWBLK_CLK("usb0", -1, B_CLK, HWBLK_USB, 0), + SH_HWBLK_CLK("2dg0", -1, B_CLK, HWBLK_2DG, 0), + SH_HWBLK_CLK("siu0", -1, B_CLK, HWBLK_SIU, 0), + SH_HWBLK_CLK("veu1", -1, B_CLK, HWBLK_VEU2H1, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("vou0", -1, B_CLK, HWBLK_VOU, 0), + SH_HWBLK_CLK("beu0", -1, B_CLK, HWBLK_BEU, 0), + SH_HWBLK_CLK("ceu0", -1, B_CLK, HWBLK_CEU, 0), + SH_HWBLK_CLK("veu0", -1, B_CLK, HWBLK_VEU2H0, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("vpu0", -1, B_CLK, HWBLK_VPU, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("lcdc0", -1, B_CLK, HWBLK_LCDC, 0), }; int __init arch_clk_init(void) @@ -216,7 +222,7 @@ int __init arch_clk_init(void) ret = sh_clk_div6_register(div6_clks, ARRAY_SIZE(div6_clks)); if (!ret) - ret = sh_clk_mstp32_register(mstp_clks, ARRAY_SIZE(mstp_clks)); + ret = sh_hwblk_clk_register(mstp_clks, ARRAY_SIZE(mstp_clks)); return ret; } diff --git a/arch/sh/kernel/cpu/sh4a/hwblk-sh7723.c b/arch/sh/kernel/cpu/sh4a/hwblk-sh7723.c new file mode 100644 index 0000000..a7f4684 --- /dev/null +++ b/arch/sh/kernel/cpu/sh4a/hwblk-sh7723.c @@ -0,0 +1,117 @@ +/* + * arch/sh/kernel/cpu/sh4a/hwblk-sh7723.c + * + * SH7723 hardware block support + * + * Copyright (C) 2009 Magnus Damm + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#include +#include +#include +#include +#include +#include + +/* SH7723 registers */ +#define MSTPCR0 0xa4150030 +#define MSTPCR1 0xa4150034 +#define MSTPCR2 0xa4150038 + +/* SH7723 Power Domains */ +enum { CORE_AREA, SUB_AREA, CORE_AREA_BM }; +static struct hwblk_area sh7723_hwblk_area[] = { + [CORE_AREA] = HWBLK_AREA(0, 0), + [CORE_AREA_BM] = HWBLK_AREA(HWBLK_AREA_FLAG_PARENT, CORE_AREA), + [SUB_AREA] = HWBLK_AREA(0, 0), +}; + +/* Table mapping HWBLK to Module Stop Bit and Power Domain */ +static struct hwblk sh7723_hwblk[HWBLK_NR] = { + [HWBLK_TLB] = HWBLK(MSTPCR0, 31, CORE_AREA), + [HWBLK_IC] = HWBLK(MSTPCR0, 30, CORE_AREA), + [HWBLK_OC] = HWBLK(MSTPCR0, 29, CORE_AREA), + [HWBLK_L2C] = HWBLK(MSTPCR0, 28, CORE_AREA), + [HWBLK_ILMEM] = HWBLK(MSTPCR0, 27, CORE_AREA), + [HWBLK_FPU] = HWBLK(MSTPCR0, 24, CORE_AREA), + [HWBLK_INTC] = HWBLK(MSTPCR0, 22, CORE_AREA), + [HWBLK_DMAC0] = HWBLK(MSTPCR0, 21, CORE_AREA_BM), + [HWBLK_SHYWAY] = HWBLK(MSTPCR0, 20, CORE_AREA), + [HWBLK_HUDI] = HWBLK(MSTPCR0, 19, CORE_AREA), + [HWBLK_DBG] = HWBLK(MSTPCR0, 18, CORE_AREA), + [HWBLK_UBC] = HWBLK(MSTPCR0, 17, CORE_AREA), + [HWBLK_SUBC] = HWBLK(MSTPCR0, 16, CORE_AREA), + [HWBLK_TMU0] = HWBLK(MSTPCR0, 15, CORE_AREA), + [HWBLK_CMT] = HWBLK(MSTPCR0, 14, SUB_AREA), + [HWBLK_RWDT] = HWBLK(MSTPCR0, 13, SUB_AREA), + [HWBLK_DMAC1] = HWBLK(MSTPCR0, 12, CORE_AREA_BM), + [HWBLK_TMU1] = HWBLK(MSTPCR0, 11, CORE_AREA), + [HWBLK_FLCTL] = HWBLK(MSTPCR0, 10, CORE_AREA), + [HWBLK_SCIF0] = HWBLK(MSTPCR0, 9, CORE_AREA), + [HWBLK_SCIF1] = HWBLK(MSTPCR0, 8, CORE_AREA), + [HWBLK_SCIF2] = HWBLK(MSTPCR0, 7, CORE_AREA), + [HWBLK_SCIF3] = HWBLK(MSTPCR0, 6, CORE_AREA), + [HWBLK_SCIF4] = HWBLK(MSTPCR0, 5, CORE_AREA), + [HWBLK_SCIF5] = HWBLK(MSTPCR0, 4, CORE_AREA), + [HWBLK_MSIOF0] = HWBLK(MSTPCR0, 2, CORE_AREA), + [HWBLK_MSIOF1] = HWBLK(MSTPCR0, 1, CORE_AREA), + [HWBLK_MERAM] = HWBLK(MSTPCR0, 0, CORE_AREA), + + [HWBLK_IIC] = HWBLK(MSTPCR1, 9, CORE_AREA), + [HWBLK_RTC] = HWBLK(MSTPCR1, 8, SUB_AREA), + + [HWBLK_ATAPI] = HWBLK(MSTPCR2, 28, CORE_AREA_BM), + [HWBLK_ADC] = HWBLK(MSTPCR2, 27, CORE_AREA), + [HWBLK_TPU] = HWBLK(MSTPCR2, 25, CORE_AREA), + [HWBLK_IRDA] = HWBLK(MSTPCR2, 24, CORE_AREA), + [HWBLK_TSIF] = HWBLK(MSTPCR2, 22, CORE_AREA), + [HWBLK_ICB] = HWBLK(MSTPCR2, 21, CORE_AREA_BM), + [HWBLK_SDHI0] = HWBLK(MSTPCR2, 18, CORE_AREA), + [HWBLK_SDHI1] = HWBLK(MSTPCR2, 17, CORE_AREA), + [HWBLK_KEYSC] = HWBLK(MSTPCR2, 14, SUB_AREA), + [HWBLK_USB] = HWBLK(MSTPCR2, 11, CORE_AREA), + [HWBLK_2DG] = HWBLK(MSTPCR2, 10, CORE_AREA_BM), + [HWBLK_SIU] = HWBLK(MSTPCR2, 8, CORE_AREA), + [HWBLK_VEU2H1] = HWBLK(MSTPCR2, 6, CORE_AREA_BM), + [HWBLK_VOU] = HWBLK(MSTPCR2, 5, CORE_AREA_BM), + [HWBLK_BEU] = HWBLK(MSTPCR2, 4, CORE_AREA_BM), + [HWBLK_CEU] = HWBLK(MSTPCR2, 3, CORE_AREA_BM), + [HWBLK_VEU2H0] = HWBLK(MSTPCR2, 2, CORE_AREA_BM), + [HWBLK_VPU] = HWBLK(MSTPCR2, 1, CORE_AREA_BM), + [HWBLK_LCDC] = HWBLK(MSTPCR2, 0, CORE_AREA_BM), +}; + +static struct hwblk_info sh7723_hwblk_info = { + .areas = sh7723_hwblk_area, + .nr_areas = ARRAY_SIZE(sh7723_hwblk_area), + .hwblks = sh7723_hwblk, + .nr_hwblks = ARRAY_SIZE(sh7723_hwblk), +}; + +int arch_hwblk_sleep_mode(void) +{ + if (!sh7723_hwblk_area[CORE_AREA].cnt[HWBLK_CNT_USAGE]) + return SUSP_SH_STANDBY | SUSP_SH_SF; + + if (!sh7723_hwblk_area[CORE_AREA_BM].cnt[HWBLK_CNT_USAGE]) + return SUSP_SH_SLEEP | SUSP_SH_SF; + + return SUSP_SH_SLEEP; +} + +int __init arch_hwblk_init(void) +{ + return hwblk_register(&sh7723_hwblk_info); +} -- cgit v1.1 From 719a72b7c75bb239ca6184190ab994b71a31c6dc Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Fri, 17 Jul 2009 14:59:55 +0000 Subject: usb: r8a66597-hcd platform data on_chip support Convert the r8a66597-hcd driver to use the on_chip flag from platform data to enable on chip behaviour instead of relying on CONFIG_SUPERH_ON_CHIP_R8A66597 ugliness. This makes the code cleaner and also allows us to support both external and internal r8a66597 with the same kernel. It also makes the Kconfig part more future proof since we with this patch can add support for new processors with on-chip r8a66597 without modifying the Kconfig. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/boards/mach-se/7724/setup.c | 1 + arch/sh/kernel/cpu/sh4a/setup-sh7366.c | 2 +- arch/sh/kernel/cpu/sh4a/setup-sh7723.c | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/sh/boards/mach-se/7724/setup.c b/arch/sh/boards/mach-se/7724/setup.c index 8fed45a..4fb7e48 100644 --- a/arch/sh/boards/mach-se/7724/setup.c +++ b/arch/sh/boards/mach-se/7724/setup.c @@ -304,6 +304,7 @@ static struct platform_device sh_eth_device = { }; static struct r8a66597_platdata sh7724_usb0_host_data = { + .on_chip = 1, }; static struct resource sh7724_usb0_host_resources[] = { diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7366.c b/arch/sh/kernel/cpu/sh4a/setup-sh7366.c index c18f7d0..f6d2088 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7366.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7366.c @@ -40,7 +40,7 @@ static struct platform_device iic_device = { }; static struct r8a66597_platdata r8a66597_data = { - /* This set zero to all members */ + .on_chip = 1, }; static struct resource usb_host_resources[] = { diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7723.c b/arch/sh/kernel/cpu/sh4a/setup-sh7723.c index e1bb80b..2851649 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7723.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7723.c @@ -398,7 +398,7 @@ static struct platform_device rtc_device = { }; static struct r8a66597_platdata r8a66597_data = { - /* This set zero to all members */ + .on_chip = 1, }; static struct resource sh7723_usb_host_resources[] = { -- cgit v1.1 From 6bde607e699b9f3c6fa5e4a97d78c7778e643b3d Mon Sep 17 00:00:00 2001 From: Tim Abbott Date: Mon, 20 Jul 2009 23:30:24 +0900 Subject: sh: Clean up linker script using new BSS_SECTION macro. Updated to use the fixed BSS linker script macros from this thread: http://www.spinics.net/lists/kernel/msg913238.html Signed-off-by: Tim Abbott Cc: Sam Ravnborg Cc: linux-sh@vger.kernel.org Signed-off-by: Paul Mundt --- arch/sh/kernel/vmlinux.lds.S | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/sh/kernel/vmlinux.lds.S b/arch/sh/kernel/vmlinux.lds.S index 674ed8f..80dc9f8 100644 --- a/arch/sh/kernel/vmlinux.lds.S +++ b/arch/sh/kernel/vmlinux.lds.S @@ -93,8 +93,7 @@ SECTIONS . = ALIGN(PAGE_SIZE); __init_end = .; - BSS(PAGE_SIZE) - . = ALIGN(4); + BSS_SECTION(0, PAGE_SIZE, 4) _ebss = .; /* uClinux MTD sucks */ _end = . ; -- cgit v1.1 From ef9b542fce00dafc6bb1d9097b045a777f4a2382 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 21 Jul 2009 17:24:36 +0900 Subject: sh: bzip2/lzma uImage support. This builds on the bzip2/lzma zImage support change and wires it up for uImages. Based on the blackfin implementation. Signed-off-by: Paul Mundt --- arch/sh/Makefile | 15 ++++++++++----- arch/sh/boot/.gitignore | 5 ++--- arch/sh/boot/Makefile | 39 ++++++++++++++++++++++++++++----------- arch/sh/boot/compressed/Makefile | 4 ---- 4 files changed, 40 insertions(+), 23 deletions(-) (limited to 'arch') diff --git a/arch/sh/Makefile b/arch/sh/Makefile index 75d049b0..2aba73e 100644 --- a/arch/sh/Makefile +++ b/arch/sh/Makefile @@ -189,14 +189,16 @@ KBUILD_AFLAGS += $(cflags-y) libs-$(CONFIG_SUPERH32) := arch/sh/lib/ $(libs-y) libs-$(CONFIG_SUPERH64) := arch/sh/lib64/ $(libs-y) -PHONY += maketools FORCE +BOOT_TARGETS = uImage uImage.bz2 uImage.gz uImage.lzma uImage.srec \ + zImage vmlinux.srec +PHONY += maketools $(BOOT_TARGETS) FORCE maketools: include/linux/version.h FORCE $(Q)$(MAKE) $(build)=arch/sh/tools include/asm-sh/machtypes.h all: $(KBUILD_IMAGE) -zImage uImage uImage.srec vmlinux.srec: vmlinux +$(BOOT_TARGETS): vmlinux $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ compressed: zImage @@ -208,10 +210,13 @@ archclean: $(Q)$(MAKE) $(clean)=arch/sh/kernel/vsyscall define archhelp - @echo '* zImage - Compressed kernel image' + @echo ' zImage - Compressed kernel image' @echo ' vmlinux.srec - Create an ELF S-record' - @echo ' uImage - Create a bootable image for U-Boot' - @echo ' uImage.srec - Create an S-record for U-Boot' + @echo '* uImage - Alias to bootable U-Boot image' + @echo ' uImage.srec - Create an S-record for U-Boot' + @echo '* uImage.gz - Kernel-only image for U-Boot (gzip)' + @echo ' uImage.bz2 - Kernel-only image for U-Boot (bzip2)' + @echo ' uImage.lzma - Kernel-only image for U-Boot (lzma)' endef CLEAN_FILES += include/asm-sh/machtypes.h diff --git a/arch/sh/boot/.gitignore b/arch/sh/boot/.gitignore index aad5edd..541087d 100644 --- a/arch/sh/boot/.gitignore +++ b/arch/sh/boot/.gitignore @@ -1,4 +1,3 @@ zImage -vmlinux.srec -uImage -uImage.srec +vmlinux* +uImage* diff --git a/arch/sh/boot/Makefile b/arch/sh/boot/Makefile index 78efb04..dd2a852 100644 --- a/arch/sh/boot/Makefile +++ b/arch/sh/boot/Makefile @@ -20,7 +20,12 @@ CONFIG_BOOT_LINK_OFFSET ?= 0x00800000 CONFIG_ZERO_PAGE_OFFSET ?= 0x00001000 CONFIG_ENTRY_OFFSET ?= 0x00001000 -targets := zImage vmlinux.srec uImage uImage.srec +suffix-$(CONFIG_KERNEL_GZIP) := gz +suffix-$(CONFIG_KERNEL_BZIP2) := bz2 +suffix-$(CONFIG_KERNEL_LZMA) := lzma + +targets := zImage vmlinux.srec uImage uImage.srec uImage.gz uImage.bz2 uImage.lzma +extra-y += vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma subdir- := compressed $(obj)/zImage: $(obj)/compressed/vmlinux FORCE @@ -40,9 +45,6 @@ KERNEL_MEMORY := $(shell /bin/bash -c 'printf "0x%08x" \ $$[$(CONFIG_MEMORY_START)]') endif -export CONFIG_PAGE_OFFSET CONFIG_MEMORY_START CONFIG_BOOT_LINK_OFFSET \ - CONFIG_ZERO_PAGE_OFFSET CONFIG_ENTRY_OFFSET KERNEL_MEMORY - KERNEL_LOAD := $(shell /bin/bash -c 'printf "0x%08x" \ $$[$(CONFIG_PAGE_OFFSET) + \ $(KERNEL_MEMORY) + \ @@ -55,19 +57,30 @@ KERNEL_ENTRY := $(shell /bin/bash -c 'printf "0x%08x" \ quiet_cmd_uimage = UIMAGE $@ cmd_uimage = $(CONFIG_SHELL) $(MKIMAGE) -A sh -O linux -T kernel \ - -C gzip -a $(KERNEL_LOAD) -e $(KERNEL_ENTRY) \ + -C $(2) -a $(KERNEL_LOAD) -e $(KERNEL_ENTRY) \ -n 'Linux-$(KERNELRELEASE)' -d $< $@ -$(obj)/uImage: $(obj)/vmlinux.bin.gz FORCE - $(call if_changed,uimage) - @echo ' Image $@ is ready' - $(obj)/vmlinux.bin: vmlinux FORCE $(call if_changed,objcopy) $(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE $(call if_changed,gzip) +$(obj)/vmlinux.bin.bz2: $(obj)/vmlinux.bin FORCE + $(call if_changed,bzip2) + +$(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin FORCE + $(call if_changed,lzma) + +$(obj)/uImage.bz2: $(obj)/vmlinux.bin.bz2 + $(call if_changed,uimage,bzip2) + +$(obj)/uImage.gz: $(obj)/vmlinux.bin.gz + $(call if_changed,uimage,gzip) + +$(obj)/uImage.lzma: $(obj)/vmlinux.bin.lzma + $(call if_changed,uimage,lzma) + OBJCOPYFLAGS_vmlinux.srec := -I binary -O srec $(obj)/vmlinux.srec: $(obj)/compressed/vmlinux $(call if_changed,objcopy) @@ -76,5 +89,9 @@ OBJCOPYFLAGS_uImage.srec := -I binary -O srec $(obj)/uImage.srec: $(obj)/uImage $(call if_changed,objcopy) -clean-files += uImage uImage.srec vmlinux.srec \ - vmlinux.bin vmlinux.bin.gz +$(obj)/uImage: $(obj)/uImage.$(suffix-y) + @ln -sf $(notdir $<) $@ + @echo ' Image $@ is ready' + +export CONFIG_PAGE_OFFSET CONFIG_MEMORY_START CONFIG_BOOT_LINK_OFFSET \ + CONFIG_ZERO_PAGE_OFFSET CONFIG_ENTRY_OFFSET KERNEL_MEMORY suffix-y diff --git a/arch/sh/boot/compressed/Makefile b/arch/sh/boot/compressed/Makefile index 3324019..b31618e 100644 --- a/arch/sh/boot/compressed/Makefile +++ b/arch/sh/boot/compressed/Makefile @@ -48,10 +48,6 @@ $(obj)/vmlinux.bin.bz2: $(vmlinux.bin.all-y) FORCE $(obj)/vmlinux.bin.lzma: $(vmlinux.bin.all-y) FORCE $(call if_changed,lzma) -suffix-$(CONFIG_KERNEL_GZIP) := gz -suffix-$(CONFIG_KERNEL_BZIP2) := bz2 -suffix-$(CONFIG_KERNEL_LZMA) := lzma - OBJCOPYFLAGS += -R .empty_zero_page LDFLAGS_piggy.o := -r --format binary --oformat $(ld-bfd) -T -- cgit v1.1 From 3162534069597e34dd0ac9eb711be8dc23835ae7 Mon Sep 17 00:00:00 2001 From: Joseph Cihula Date: Tue, 30 Jun 2009 19:30:59 -0700 Subject: x86, intel_txt: Intel TXT boot support This patch adds kernel configuration and boot support for Intel Trusted Execution Technology (Intel TXT). Intel's technology for safer computing, Intel Trusted Execution Technology (Intel TXT), defines platform-level enhancements that provide the building blocks for creating trusted platforms. Intel TXT was formerly known by the code name LaGrande Technology (LT). Intel TXT in Brief: o Provides dynamic root of trust for measurement (DRTM) o Data protection in case of improper shutdown o Measurement and verification of launched environment Intel TXT is part of the vPro(TM) brand and is also available some non-vPro systems. It is currently available on desktop systems based on the Q35, X38, Q45, and Q43 Express chipsets (e.g. Dell Optiplex 755, HP dc7800, etc.) and mobile systems based on the GM45, PM45, and GS45 Express chipsets. For more information, see http://www.intel.com/technology/security/. This site also has a link to the Intel TXT MLE Developers Manual, which has been updated for the new released platforms. A much more complete description of how these patches support TXT, how to configure a system for it, etc. is in the Documentation/intel_txt.txt file in this patch. This patch provides the TXT support routines for complete functionality, documentation for TXT support and for the changes to the boot_params structure, and boot detection of a TXT launch. Attempts to shutdown (reboot, Sx) the system will result in platform resets; subsequent patches will support these shutdown modes properly. Documentation/intel_txt.txt | 210 +++++++++++++++++++++ Documentation/x86/zero-page.txt | 1 arch/x86/include/asm/bootparam.h | 3 arch/x86/include/asm/fixmap.h | 3 arch/x86/include/asm/tboot.h | 197 ++++++++++++++++++++ arch/x86/kernel/Makefile | 1 arch/x86/kernel/setup.c | 4 arch/x86/kernel/tboot.c | 379 +++++++++++++++++++++++++++++++++++++++ security/Kconfig | 30 +++ 9 files changed, 827 insertions(+), 1 deletion(-) Signed-off-by: Joseph Cihula Signed-off-by: Shane Wang Signed-off-by: Gang Wei Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/bootparam.h | 3 +- arch/x86/include/asm/fixmap.h | 3 + arch/x86/include/asm/tboot.h | 197 ++++++++++++++++++++ arch/x86/kernel/Makefile | 1 + arch/x86/kernel/setup.c | 4 + arch/x86/kernel/tboot.c | 379 +++++++++++++++++++++++++++++++++++++++ 6 files changed, 586 insertions(+), 1 deletion(-) create mode 100644 arch/x86/include/asm/tboot.h create mode 100644 arch/x86/kernel/tboot.c (limited to 'arch') diff --git a/arch/x86/include/asm/bootparam.h b/arch/x86/include/asm/bootparam.h index 1724e8d..6ca2021 100644 --- a/arch/x86/include/asm/bootparam.h +++ b/arch/x86/include/asm/bootparam.h @@ -85,7 +85,8 @@ struct efi_info { struct boot_params { struct screen_info screen_info; /* 0x000 */ struct apm_bios_info apm_bios_info; /* 0x040 */ - __u8 _pad2[12]; /* 0x054 */ + __u8 _pad2[4]; /* 0x054 */ + __u64 tboot_addr; /* 0x058 */ struct ist_info ist_info; /* 0x060 */ __u8 _pad3[16]; /* 0x070 */ __u8 hd0_info[16]; /* obsolete! */ /* 0x080 */ diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h index 7b2d71d..14f9890 100644 --- a/arch/x86/include/asm/fixmap.h +++ b/arch/x86/include/asm/fixmap.h @@ -132,6 +132,9 @@ enum fixed_addresses { #ifdef CONFIG_X86_32 FIX_WP_TEST, #endif +#ifdef CONFIG_INTEL_TXT + FIX_TBOOT_BASE, +#endif __end_of_fixed_addresses }; diff --git a/arch/x86/include/asm/tboot.h b/arch/x86/include/asm/tboot.h new file mode 100644 index 0000000..b13929d --- /dev/null +++ b/arch/x86/include/asm/tboot.h @@ -0,0 +1,197 @@ +/* + * tboot.h: shared data structure with tboot and kernel and functions + * used by kernel for runtime support of Intel(R) Trusted + * Execution Technology + * + * Copyright (c) 2006-2009, Intel Corporation + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + */ + +#ifndef _ASM_TBOOT_H +#define _ASM_TBOOT_H + +#include + +/* these must have the values from 0-5 in this order */ +enum { + TB_SHUTDOWN_REBOOT = 0, + TB_SHUTDOWN_S5, + TB_SHUTDOWN_S4, + TB_SHUTDOWN_S3, + TB_SHUTDOWN_HALT, + TB_SHUTDOWN_WFS +}; + +#ifdef CONFIG_INTEL_TXT + +/* used to communicate between tboot and the launched kernel */ + +#define TB_KEY_SIZE 64 /* 512 bits */ + +#define MAX_TB_MAC_REGIONS 32 + +struct tboot_mac_region { + u64 start; /* must be 64 byte -aligned */ + u32 size; /* must be 64 byte -granular */ +} __packed; + +/* GAS - Generic Address Structure (ACPI 2.0+) */ +struct tboot_acpi_generic_address { + u8 space_id; + u8 bit_width; + u8 bit_offset; + u8 access_width; + u64 address; +} __packed; + +/* + * combines Sx info from FADT and FACS tables per ACPI 2.0+ spec + * (http://www.acpi.info/) + */ +struct tboot_acpi_sleep_info { + struct tboot_acpi_generic_address pm1a_cnt_blk; + struct tboot_acpi_generic_address pm1b_cnt_blk; + struct tboot_acpi_generic_address pm1a_evt_blk; + struct tboot_acpi_generic_address pm1b_evt_blk; + u16 pm1a_cnt_val; + u16 pm1b_cnt_val; + u64 wakeup_vector; + u32 vector_width; + u64 kernel_s3_resume_vector; +} __packed; + +/* + * shared memory page used for communication between tboot and kernel + */ +struct tboot { + /* + * version 3+ fields: + */ + + /* TBOOT_UUID */ + u8 uuid[16]; + + /* version number: 5 is current */ + u32 version; + + /* physical addr of tb_log_t log */ + u32 log_addr; + + /* + * physical addr of entry point for tboot shutdown and + * type of shutdown (TB_SHUTDOWN_*) being requested + */ + u32 shutdown_entry; + u32 shutdown_type; + + /* kernel-specified ACPI info for Sx shutdown */ + struct tboot_acpi_sleep_info acpi_sinfo; + + /* tboot location in memory (physical) */ + u32 tboot_base; + u32 tboot_size; + + /* memory regions (phys addrs) for tboot to MAC on S3 */ + u8 num_mac_regions; + struct tboot_mac_region mac_regions[MAX_TB_MAC_REGIONS]; + + + /* + * version 4+ fields: + */ + + /* symmetric key for use by kernel; will be encrypted on S3 */ + u8 s3_key[TB_KEY_SIZE]; + + + /* + * version 5+ fields: + */ + + /* used to 4byte-align num_in_wfs */ + u8 reserved_align[3]; + + /* number of processors in wait-for-SIPI */ + u32 num_in_wfs; +} __packed; + +/* + * UUID for tboot data struct to facilitate matching + * defined as {663C8DFF-E8B3-4b82-AABF-19EA4D057A08} by tboot, which is + * represented as {} in the char array used here + */ +#define TBOOT_UUID {0xff, 0x8d, 0x3c, 0x66, 0xb3, 0xe8, 0x82, 0x4b, 0xbf,\ + 0xaa, 0x19, 0xea, 0x4d, 0x5, 0x7a, 0x8} + +extern struct tboot *tboot; + +static inline int tboot_enabled(void) +{ + return tboot != NULL; +} + +extern void tboot_probe(void); +extern void tboot_create_trampoline(void); +extern void tboot_shutdown(u32 shutdown_type); +extern void tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control); +extern int tboot_wait_for_aps(int num_aps); +extern struct acpi_table_header *tboot_get_dmar_table( + struct acpi_table_header *dmar_tbl); +extern int tboot_force_iommu(void); + +#else /* CONFIG_INTEL_TXT */ + +static inline int tboot_enabled(void) +{ + return 0; +} + +static inline void tboot_probe(void) +{ +} + +static inline void tboot_create_trampoline(void) +{ +} + +static inline void tboot_shutdown(u32 shutdown_type) +{ +} + +static inline void tboot_sleep(u8 sleep_state, u32 pm1a_control, + u32 pm1b_control) +{ +} + +static inline int tboot_wait_for_aps(int num_aps) +{ + return 0; +} + +static inline struct acpi_table_header *tboot_get_dmar_table( + struct acpi_table_header *dmar_tbl) +{ + return dmar_tbl; +} + +static inline int tboot_force_iommu(void) +{ + return 0; +} + +#endif /* !CONFIG_INTEL_TXT */ + +#endif /* _ASM_TBOOT_H */ diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 430d5b2..832cb83 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -52,6 +52,7 @@ obj-$(CONFIG_X86_DS_SELFTEST) += ds_selftest.o obj-$(CONFIG_X86_32) += tls.o obj-$(CONFIG_IA32_EMULATION) += tls.o obj-y += step.o +obj-$(CONFIG_INTEL_TXT) += tboot.o obj-$(CONFIG_STACKTRACE) += stacktrace.o obj-y += cpu/ obj-y += acpi/ diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index de2cab1..80d6e9e 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -145,6 +145,8 @@ struct boot_params __initdata boot_params; struct boot_params boot_params; #endif +#include + /* * Machine setup.. */ @@ -964,6 +966,8 @@ void __init setup_arch(char **cmdline_p) paravirt_pagetable_setup_done(swapper_pg_dir); paravirt_post_allocator_init(); + tboot_probe(); + #ifdef CONFIG_X86_64 map_vsyscall(); #endif diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c new file mode 100644 index 0000000..263591a --- /dev/null +++ b/arch/x86/kernel/tboot.c @@ -0,0 +1,379 @@ +/* + * tboot.c: main implementation of helper functions used by kernel for + * runtime support of Intel(R) Trusted Execution Technology + * + * Copyright (c) 2006-2009, Intel Corporation + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "acpi/realmode/wakeup.h" + +/* Global pointer to shared data; NULL means no measured launch. */ +struct tboot *tboot __read_mostly; + +/* timeout for APs (in secs) to enter wait-for-SIPI state during shutdown */ +#define AP_WAIT_TIMEOUT 1 + +#undef pr_fmt +#define pr_fmt(fmt) "tboot: " fmt + +static u8 tboot_uuid[16] __initdata = TBOOT_UUID; + +void __init tboot_probe(void) +{ + /* Look for valid page-aligned address for shared page. */ + if (!boot_params.tboot_addr) + return; + /* + * also verify that it is mapped as we expect it before calling + * set_fixmap(), to reduce chance of garbage value causing crash + */ + if (!e820_any_mapped(boot_params.tboot_addr, + boot_params.tboot_addr, E820_RESERVED)) { + pr_warning("non-0 tboot_addr but it is not of type E820_RESERVED\n"); + return; + } + + /* only a natively booted kernel should be using TXT */ + if (paravirt_enabled()) { + pr_warning("non-0 tboot_addr but pv_ops is enabled\n"); + return; + } + + /* Map and check for tboot UUID. */ + set_fixmap(FIX_TBOOT_BASE, boot_params.tboot_addr); + tboot = (struct tboot *)fix_to_virt(FIX_TBOOT_BASE); + if (memcmp(&tboot_uuid, &tboot->uuid, sizeof(tboot->uuid))) { + pr_warning("tboot at 0x%llx is invalid\n", + boot_params.tboot_addr); + tboot = NULL; + return; + } + if (tboot->version < 5) { + pr_warning("tboot version is invalid: %u\n", tboot->version); + tboot = NULL; + return; + } + + pr_info("found shared page at phys addr 0x%llx:\n", + boot_params.tboot_addr); + pr_debug("version: %d\n", tboot->version); + pr_debug("log_addr: 0x%08x\n", tboot->log_addr); + pr_debug("shutdown_entry: 0x%x\n", tboot->shutdown_entry); + pr_debug("tboot_base: 0x%08x\n", tboot->tboot_base); + pr_debug("tboot_size: 0x%x\n", tboot->tboot_size); +} + +static pgd_t *tboot_pg_dir; +static struct mm_struct tboot_mm = { + .mm_rb = RB_ROOT, + .pgd = swapper_pg_dir, + .mm_users = ATOMIC_INIT(2), + .mm_count = ATOMIC_INIT(1), + .mmap_sem = __RWSEM_INITIALIZER(init_mm.mmap_sem), + .page_table_lock = __SPIN_LOCK_UNLOCKED(init_mm.page_table_lock), + .mmlist = LIST_HEAD_INIT(init_mm.mmlist), + .cpu_vm_mask = CPU_MASK_ALL, +}; + +static inline void switch_to_tboot_pt(void) +{ + write_cr3(virt_to_phys(tboot_pg_dir)); +} + +static int map_tboot_page(unsigned long vaddr, unsigned long pfn, + pgprot_t prot) +{ + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + pte_t *pte; + + pgd = pgd_offset(&tboot_mm, vaddr); + pud = pud_alloc(&tboot_mm, pgd, vaddr); + if (!pud) + return -1; + pmd = pmd_alloc(&tboot_mm, pud, vaddr); + if (!pmd) + return -1; + pte = pte_alloc_map(&tboot_mm, pmd, vaddr); + if (!pte) + return -1; + set_pte_at(&tboot_mm, vaddr, pte, pfn_pte(pfn, prot)); + pte_unmap(pte); + return 0; +} + +static int map_tboot_pages(unsigned long vaddr, unsigned long start_pfn, + unsigned long nr) +{ + /* Reuse the original kernel mapping */ + tboot_pg_dir = pgd_alloc(&tboot_mm); + if (!tboot_pg_dir) + return -1; + + for (; nr > 0; nr--, vaddr += PAGE_SIZE, start_pfn++) { + if (map_tboot_page(vaddr, start_pfn, PAGE_KERNEL_EXEC)) + return -1; + } + + return 0; +} + +void tboot_create_trampoline(void) +{ + u32 map_base, map_size; + + if (!tboot_enabled()) + return; + + /* Create identity map for tboot shutdown code. */ + map_base = PFN_DOWN(tboot->tboot_base); + map_size = PFN_UP(tboot->tboot_size); + if (map_tboot_pages(map_base << PAGE_SHIFT, map_base, map_size)) + panic("tboot: Error mapping tboot pages (mfns) @ 0x%x, 0x%x\n", map_base, map_size); +} + +static void set_mac_regions(void) +{ + tboot->num_mac_regions = 3; + /* S3 resume code */ + tboot->mac_regions[0].start = PFN_PHYS(PFN_DOWN(acpi_wakeup_address)); + tboot->mac_regions[0].size = PFN_UP(WAKEUP_SIZE) << PAGE_SHIFT; + /* AP trampoline code */ + tboot->mac_regions[1].start = + PFN_PHYS(PFN_DOWN(virt_to_phys(trampoline_base))); + tboot->mac_regions[1].size = PFN_UP(TRAMPOLINE_SIZE) << PAGE_SHIFT; + /* kernel code + data + bss */ + tboot->mac_regions[2].start = PFN_PHYS(PFN_DOWN(virt_to_phys(&_text))); + tboot->mac_regions[2].size = PFN_PHYS(PFN_UP(virt_to_phys(&_end))) - + PFN_PHYS(PFN_DOWN(virt_to_phys(&_text))); +} + +void tboot_shutdown(u32 shutdown_type) +{ + void (*shutdown)(void); + + if (!tboot_enabled()) + return; + + /* + * if we're being called before the 1:1 mapping is set up then just + * return and let the normal shutdown happen; this should only be + * due to very early panic() + */ + if (!tboot_pg_dir) + return; + + /* if this is S3 then set regions to MAC */ + if (shutdown_type == TB_SHUTDOWN_S3) + set_mac_regions(); + + tboot->shutdown_type = shutdown_type; + + switch_to_tboot_pt(); + + shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry; + shutdown(); + + /* should not reach here */ + while (1) + halt(); +} + +static void tboot_copy_fadt(const struct acpi_table_fadt *fadt) +{ +#define TB_COPY_GAS(tbg, g) \ + tbg.space_id = g.space_id; \ + tbg.bit_width = g.bit_width; \ + tbg.bit_offset = g.bit_offset; \ + tbg.access_width = g.access_width; \ + tbg.address = g.address; + + TB_COPY_GAS(tboot->acpi_sinfo.pm1a_cnt_blk, fadt->xpm1a_control_block); + TB_COPY_GAS(tboot->acpi_sinfo.pm1b_cnt_blk, fadt->xpm1b_control_block); + TB_COPY_GAS(tboot->acpi_sinfo.pm1a_evt_blk, fadt->xpm1a_event_block); + TB_COPY_GAS(tboot->acpi_sinfo.pm1b_evt_blk, fadt->xpm1b_event_block); + + /* + * We need phys addr of waking vector, but can't use virt_to_phys() on + * &acpi_gbl_FACS because it is ioremap'ed, so calc from FACS phys + * addr. + */ + tboot->acpi_sinfo.wakeup_vector = fadt->facs + + offsetof(struct acpi_table_facs, firmware_waking_vector); +} + +void tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control) +{ + static u32 acpi_shutdown_map[ACPI_S_STATE_COUNT] = { + /* S0,1,2: */ -1, -1, -1, + /* S3: */ TB_SHUTDOWN_S3, + /* S4: */ TB_SHUTDOWN_S4, + /* S5: */ TB_SHUTDOWN_S5 }; + + if (!tboot_enabled()) + return; + + tboot_copy_fadt(&acpi_gbl_FADT); + tboot->acpi_sinfo.pm1a_cnt_val = pm1a_control; + tboot->acpi_sinfo.pm1b_cnt_val = pm1b_control; + /* we always use the 32b wakeup vector */ + tboot->acpi_sinfo.vector_width = 32; + tboot->acpi_sinfo.kernel_s3_resume_vector = acpi_wakeup_address; + + if (sleep_state >= ACPI_S_STATE_COUNT || + acpi_shutdown_map[sleep_state] == -1) { + pr_warning("unsupported sleep state 0x%x\n", sleep_state); + return; + } + + tboot_shutdown(acpi_shutdown_map[sleep_state]); +} + +int tboot_wait_for_aps(int num_aps) +{ + unsigned long timeout; + + if (!tboot_enabled()) + return 0; + + timeout = jiffies + AP_WAIT_TIMEOUT*HZ; + while (atomic_read((atomic_t *)&tboot->num_in_wfs) != num_aps && + time_before(jiffies, timeout)) + cpu_relax(); + + return time_before(jiffies, timeout) ? 0 : 1; +} + +/* + * TXT configuration registers (offsets from TXT_{PUB, PRIV}_CONFIG_REGS_BASE) + */ + +#define TXT_PUB_CONFIG_REGS_BASE 0xfed30000 +#define TXT_PRIV_CONFIG_REGS_BASE 0xfed20000 + +/* # pages for each config regs space - used by fixmap */ +#define NR_TXT_CONFIG_PAGES ((TXT_PUB_CONFIG_REGS_BASE - \ + TXT_PRIV_CONFIG_REGS_BASE) >> PAGE_SHIFT) + +/* offsets from pub/priv config space */ +#define TXTCR_HEAP_BASE 0x0300 +#define TXTCR_HEAP_SIZE 0x0308 + +#define SHA1_SIZE 20 + +struct sha1_hash { + u8 hash[SHA1_SIZE]; +}; + +struct sinit_mle_data { + u32 version; /* currently 6 */ + struct sha1_hash bios_acm_id; + u32 edx_senter_flags; + u64 mseg_valid; + struct sha1_hash sinit_hash; + struct sha1_hash mle_hash; + struct sha1_hash stm_hash; + struct sha1_hash lcp_policy_hash; + u32 lcp_policy_control; + u32 rlp_wakeup_addr; + u32 reserved; + u32 num_mdrs; + u32 mdrs_off; + u32 num_vtd_dmars; + u32 vtd_dmars_off; +} __packed; + +struct acpi_table_header *tboot_get_dmar_table(struct acpi_table_header *dmar_tbl) +{ + void *heap_base, *heap_ptr, *config; + + if (!tboot_enabled()) + return dmar_tbl; + + /* + * ACPI tables may not be DMA protected by tboot, so use DMAR copy + * SINIT saved in SinitMleData in TXT heap (which is DMA protected) + */ + + /* map config space in order to get heap addr */ + config = ioremap(TXT_PUB_CONFIG_REGS_BASE, NR_TXT_CONFIG_PAGES * + PAGE_SIZE); + if (!config) + return NULL; + + /* now map TXT heap */ + heap_base = ioremap(*(u64 *)(config + TXTCR_HEAP_BASE), + *(u64 *)(config + TXTCR_HEAP_SIZE)); + iounmap(config); + if (!heap_base) + return NULL; + + /* walk heap to SinitMleData */ + /* skip BiosData */ + heap_ptr = heap_base + *(u64 *)heap_base; + /* skip OsMleData */ + heap_ptr += *(u64 *)heap_ptr; + /* skip OsSinitData */ + heap_ptr += *(u64 *)heap_ptr; + /* now points to SinitMleDataSize; set to SinitMleData */ + heap_ptr += sizeof(u64); + /* get addr of DMAR table */ + dmar_tbl = (struct acpi_table_header *)(heap_ptr + + ((struct sinit_mle_data *)heap_ptr)->vtd_dmars_off - + sizeof(u64)); + + /* don't unmap heap because dmar.c needs access to this */ + + return dmar_tbl; +} + +int tboot_force_iommu(void) +{ + if (!tboot_enabled()) + return 0; + + if (no_iommu || swiotlb || dmar_disabled) + pr_warning("Forcing Intel-IOMMU to enabled\n"); + + dmar_disabled = 0; +#ifdef CONFIG_SWIOTLB + swiotlb = 0; +#endif + no_iommu = 0; + + return 1; +} -- cgit v1.1 From 840c2baf2d4cdf35ecc3b7fcbba7740f97de30a4 Mon Sep 17 00:00:00 2001 From: Joseph Cihula Date: Tue, 30 Jun 2009 19:31:02 -0700 Subject: x86, intel_txt: Intel TXT reboot/halt shutdown support Support for graceful handling of kernel reboots after an Intel(R) TXT launch. Without this patch, attempting to reboot or halt the system will cause the TXT hardware to lock memory upon system restart because the secrets-in-memory flag that was set on launch was never cleared. This will in turn cause BIOS to execute a TXT Authenticated Code Module (ACM) that will scrub all of memory and then unlock it. Depending on the amount of memory in the system and its type, this may take some time. This patch creates a 1:1 address mapping to the tboot module and then calls back into tboot so that it may properly and securely clean up system state and clear the secrets-in-memory flag. When it has completed these steps, the tboot module will reboot or halt the system. arch/x86/kernel/reboot.c | 8 ++++++++ init/main.c | 3 +++ 2 files changed, 11 insertions(+) Signed-off-by: Joseph Cihula Signed-off-by: Shane Wang Signed-off-by: H. Peter Anvin --- arch/x86/kernel/reboot.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'arch') diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index d2d1ce8..9de01c5 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -24,6 +24,8 @@ # include #endif +#include + /* * Power off function, if any */ @@ -460,6 +462,8 @@ static void native_machine_emergency_restart(void) if (reboot_emergency) emergency_vmx_disable_all(); + tboot_shutdown(TB_SHUTDOWN_REBOOT); + /* Tell the BIOS if we want cold or warm reboot */ *((unsigned short *)__va(0x472)) = reboot_mode; @@ -586,6 +590,8 @@ static void native_machine_halt(void) /* stop other cpus and apics */ machine_shutdown(); + tboot_shutdown(TB_SHUTDOWN_HALT); + /* stop this cpu */ stop_this_cpu(NULL); } @@ -597,6 +603,8 @@ static void native_machine_power_off(void) machine_shutdown(); pm_power_off(); } + /* a fallback in case there is no PM info available */ + tboot_shutdown(TB_SHUTDOWN_HALT); } struct machine_ops machine_ops = { -- cgit v1.1 From 86886e55b273f565935491816c7c96b82469d4f8 Mon Sep 17 00:00:00 2001 From: Joseph Cihula Date: Tue, 30 Jun 2009 19:31:07 -0700 Subject: x86, intel_txt: Intel TXT Sx shutdown support Support for graceful handling of sleep states (S3/S4/S5) after an Intel(R) TXT launch. Without this patch, attempting to place the system in one of the ACPI sleep states (S3/S4/S5) will cause the TXT hardware to treat this as an attack and will cause a system reset, with memory locked. Not only may the subsequent memory scrub take some time, but the platform will be unable to enter the requested power state. This patch calls back into the tboot so that it may properly and securely clean up system state and clear the secrets-in-memory flag, after which it will place the system into the requested sleep state using ACPI information passed by the kernel. arch/x86/kernel/smpboot.c | 2 ++ drivers/acpi/acpica/hwsleep.c | 3 +++ kernel/cpu.c | 7 ++++++- 3 files changed, 11 insertions(+), 1 deletion(-) Signed-off-by: Joseph Cihula Signed-off-by: Shane Wang Signed-off-by: H. Peter Anvin --- arch/x86/kernel/smpboot.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch') diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 2fecda6..61cc408 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -62,6 +62,7 @@ #include #include #include +#include #include #include @@ -1317,6 +1318,7 @@ void play_dead_common(void) void native_play_dead(void) { play_dead_common(); + tboot_shutdown(TB_SHUTDOWN_WFS); wbinvd_halt(); } -- cgit v1.1 From c0b96cf639aa1bfa8983f734d4225091aa813e00 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 22 Jul 2009 16:50:57 +0900 Subject: sh: Provide _PAGE_SPECIAL for 32-bit. Allocate one of the unused PTE bits for _PAGE_SPECIAL directly. This is prep work for fast gup and the zero page revival. Signed-off-by: Paul Mundt --- arch/sh/include/asm/pgtable_32.h | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/sh/include/asm/pgtable_32.h b/arch/sh/include/asm/pgtable_32.h index 72ea209..0db19db 100644 --- a/arch/sh/include/asm/pgtable_32.h +++ b/arch/sh/include/asm/pgtable_32.h @@ -20,7 +20,7 @@ * - Bit 9 is reserved by everyone and used by _PAGE_PROTNONE. * * - Bits 10 and 11 are low bits of the PPN that are reserved on >= 4K pages. - * Bit 10 is used for _PAGE_ACCESSED, bit 11 remains unused. + * Bit 10 is used for _PAGE_ACCESSED, and bit 11 is used for _PAGE_SPECIAL. * * - On 29 bit platforms, bits 31 to 29 are used for the space attributes * and timing control which (together with bit 0) are moved into the @@ -52,6 +52,7 @@ #define _PAGE_PROTNONE 0x200 /* software: if not present */ #define _PAGE_ACCESSED 0x400 /* software: page referenced */ #define _PAGE_FILE _PAGE_WT /* software: pagecache or swap? */ +#define _PAGE_SPECIAL 0x800 /* software: special page */ #define _PAGE_SZ_MASK (_PAGE_SZ0 | _PAGE_SZ1) #define _PAGE_PR_MASK (_PAGE_RW | _PAGE_USER) @@ -148,8 +149,12 @@ # define _PAGE_SZHUGE (_PAGE_FLAGS_HARD) #endif +/* + * Mask of bits that are to be preserved accross pgprot changes. + */ #define _PAGE_CHG_MASK \ - (PTE_MASK | _PAGE_ACCESSED | _PAGE_CACHABLE | _PAGE_DIRTY) + (PTE_MASK | _PAGE_ACCESSED | _PAGE_CACHABLE | \ + _PAGE_DIRTY | _PAGE_SPECIAL) #ifndef __ASSEMBLY__ @@ -328,7 +333,7 @@ static inline void set_pte(pte_t *ptep, pte_t pte) #define pte_dirty(pte) ((pte).pte_low & _PAGE_DIRTY) #define pte_young(pte) ((pte).pte_low & _PAGE_ACCESSED) #define pte_file(pte) ((pte).pte_low & _PAGE_FILE) -#define pte_special(pte) (0) +#define pte_special(pte) ((pte).pte_low & _PAGE_SPECIAL) #ifdef CONFIG_X2TLB #define pte_write(pte) ((pte).pte_high & _PAGE_EXT_USER_WRITE) @@ -358,8 +363,9 @@ PTE_BIT_FUNC(low, mkclean, &= ~_PAGE_DIRTY); PTE_BIT_FUNC(low, mkdirty, |= _PAGE_DIRTY); PTE_BIT_FUNC(low, mkold, &= ~_PAGE_ACCESSED); PTE_BIT_FUNC(low, mkyoung, |= _PAGE_ACCESSED); +PTE_BIT_FUNC(low, mkspecial, |= _PAGE_SPECIAL); -static inline pte_t pte_mkspecial(pte_t pte) { return pte; } +#define __HAVE_ARCH_PTE_SPECIAL /* * Macro and implementation to make a page protection as uncachable. -- cgit v1.1 From 2277ab4a1df50e05bc732fe9488d4e902bb8399a Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 22 Jul 2009 19:20:49 +0900 Subject: sh: Migrate from PG_mapped to PG_dcache_dirty. This inverts the delayed dcache flush a bit to be more in line with other platforms. At the same time this also gives us the ability to do some more optimizations and cleanup. Now that the update_mmu_cache() callsite only tests for the bit, the implementation can gradually be split out and made generic, rather than relying on special implementations for each of the peculiar CPU types. SH7705 in 32kB mode and SH-4 still need slightly different handling, but this is something that can remain isolated in the varying page copy/clear routines. On top of that, SH-X3 is dcache coherent, so there is no need to bother with any of these tests in the PTEAEX version of update_mmu_cache(), so we kill that off too. Signed-off-by: Paul Mundt --- arch/sh/include/asm/page.h | 6 +++ arch/sh/include/asm/pgtable.h | 7 --- arch/sh/include/cpu-sh3/cpu/cacheflush.h | 5 +-- arch/sh/include/cpu-sh4/cpu/cacheflush.h | 2 +- arch/sh/mm/cache-sh4.c | 10 ++++- arch/sh/mm/cache-sh7705.c | 7 ++- arch/sh/mm/pg-sh4.c | 74 +++++++++++++------------------- arch/sh/mm/pg-sh7705.c | 52 ++++------------------ arch/sh/mm/tlb-pteaex.c | 17 -------- arch/sh/mm/tlb-sh3.c | 20 ++++----- arch/sh/mm/tlb-sh4.c | 23 +++++----- 11 files changed, 81 insertions(+), 142 deletions(-) (limited to 'arch') diff --git a/arch/sh/include/asm/page.h b/arch/sh/include/asm/page.h index 49592c7..a31ab40 100644 --- a/arch/sh/include/asm/page.h +++ b/arch/sh/include/asm/page.h @@ -50,6 +50,12 @@ extern unsigned long shm_align_mask; extern unsigned long max_low_pfn, min_low_pfn; extern unsigned long memory_start, memory_end; +static inline unsigned long +pages_do_alias(unsigned long addr1, unsigned long addr2) +{ + return (addr1 ^ addr2) & shm_align_mask; +} + extern void clear_page(void *to); extern void copy_page(void *to, void *from); diff --git a/arch/sh/include/asm/pgtable.h b/arch/sh/include/asm/pgtable.h index 2a011b1..d9f68f9 100644 --- a/arch/sh/include/asm/pgtable.h +++ b/arch/sh/include/asm/pgtable.h @@ -133,13 +133,6 @@ typedef pte_t *pte_addr_t; */ #define pgtable_cache_init() do { } while (0) -#if !defined(CONFIG_CACHE_OFF) && (defined(CONFIG_CPU_SH4) || \ - defined(CONFIG_SH7705_CACHE_32KB)) -struct mm_struct; -#define __HAVE_ARCH_PTEP_GET_AND_CLEAR -pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep); -#endif - struct vm_area_struct; extern void update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t pte); diff --git a/arch/sh/include/cpu-sh3/cpu/cacheflush.h b/arch/sh/include/cpu-sh3/cpu/cacheflush.h index 1ac27aa..6485ad5 100644 --- a/arch/sh/include/cpu-sh3/cpu/cacheflush.h +++ b/arch/sh/include/cpu-sh3/cpu/cacheflush.h @@ -15,10 +15,7 @@ * SH4. Unlike the SH4 this is a unified cache so we need to do some work * in mmap when 'exec'ing a new binary */ - /* 32KB cache, 4kb PAGE sizes need to check bit 12 */ -#define CACHE_ALIAS 0x00001000 - -#define PG_mapped PG_arch_1 +#define PG_dcache_dirty PG_arch_1 void flush_cache_all(void); void flush_cache_mm(struct mm_struct *mm); diff --git a/arch/sh/include/cpu-sh4/cpu/cacheflush.h b/arch/sh/include/cpu-sh4/cpu/cacheflush.h index 065306d..3564f17 100644 --- a/arch/sh/include/cpu-sh4/cpu/cacheflush.h +++ b/arch/sh/include/cpu-sh4/cpu/cacheflush.h @@ -38,6 +38,6 @@ void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, /* Initialization of P3 area for copy_user_page */ void p3_cache_init(void); -#define PG_mapped PG_arch_1 +#define PG_dcache_dirty PG_arch_1 #endif /* __ASM_CPU_SH4_CACHEFLUSH_H */ diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c index 5cfe08d..c3a09b2 100644 --- a/arch/sh/mm/cache-sh4.c +++ b/arch/sh/mm/cache-sh4.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include @@ -246,7 +247,14 @@ static inline void flush_cache_4096(unsigned long start, */ void flush_dcache_page(struct page *page) { - if (test_bit(PG_mapped, &page->flags)) { + struct address_space *mapping = page_mapping(page); + +#ifndef CONFIG_SMP + if (mapping && !mapping_mapped(mapping)) + set_bit(PG_dcache_dirty, &page->flags); + else +#endif + { unsigned long phys = PHYSADDR(page_address(page)); unsigned long addr = CACHE_OC_ADDRESS_ARRAY; int i, n; diff --git a/arch/sh/mm/cache-sh7705.c b/arch/sh/mm/cache-sh7705.c index 22dacc7..fa37bff 100644 --- a/arch/sh/mm/cache-sh7705.c +++ b/arch/sh/mm/cache-sh7705.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -128,7 +129,11 @@ static void __uses_jump_to_uncached __flush_dcache_page(unsigned long phys) */ void flush_dcache_page(struct page *page) { - if (test_bit(PG_mapped, &page->flags)) + struct address_space *mapping = page_mapping(page); + + if (mapping && !mapping_mapped(mapping)) + set_bit(PG_dcache_dirty, &page->flags); + else __flush_dcache_page(PHYSADDR(page_address(page))); } diff --git a/arch/sh/mm/pg-sh4.c b/arch/sh/mm/pg-sh4.c index 2fe14da..f3c4b2a 100644 --- a/arch/sh/mm/pg-sh4.c +++ b/arch/sh/mm/pg-sh4.c @@ -15,8 +15,6 @@ #include #include -#define CACHE_ALIAS (current_cpu_data.dcache.alias_mask) - #define kmap_get_fixmap_pte(vaddr) \ pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr)) @@ -68,10 +66,9 @@ static inline void kunmap_coherent(struct page *page) */ void clear_user_page(void *to, unsigned long address, struct page *page) { - __set_bit(PG_mapped, &page->flags); - clear_page(to); - if ((((address & PAGE_MASK) ^ (unsigned long)to) & CACHE_ALIAS)) + + if (pages_do_alias((unsigned long)to, address & PAGE_MASK)) __flush_wback_region(to, PAGE_SIZE); } @@ -79,13 +76,14 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page, unsigned long vaddr, void *dst, const void *src, unsigned long len) { - void *vto; - - __set_bit(PG_mapped, &page->flags); - - vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); - memcpy(vto, src, len); - kunmap_coherent(vto); + if (page_mapped(page) && !test_bit(PG_dcache_dirty, &page->flags)) { + void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); + memcpy(vto, src, len); + kunmap_coherent(vto); + } else { + memcpy(dst, src, len); + set_bit(PG_dcache_dirty, &page->flags); + } if (vma->vm_flags & VM_EXEC) flush_cache_page(vma, vaddr, page_to_pfn(page)); @@ -95,13 +93,14 @@ void copy_from_user_page(struct vm_area_struct *vma, struct page *page, unsigned long vaddr, void *dst, const void *src, unsigned long len) { - void *vfrom; - - __set_bit(PG_mapped, &page->flags); - - vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); - memcpy(dst, vfrom, len); - kunmap_coherent(vfrom); + if (page_mapped(page) && !test_bit(PG_dcache_dirty, &page->flags)) { + void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); + memcpy(dst, vfrom, len); + kunmap_coherent(vfrom); + } else { + memcpy(dst, src, len); + set_bit(PG_dcache_dirty, &page->flags); + } } void copy_user_highpage(struct page *to, struct page *from, @@ -109,14 +108,19 @@ void copy_user_highpage(struct page *to, struct page *from, { void *vfrom, *vto; - __set_bit(PG_mapped, &to->flags); - vto = kmap_atomic(to, KM_USER1); - vfrom = kmap_coherent(from, vaddr); - copy_page(vto, vfrom); - kunmap_coherent(vfrom); - if (((vaddr ^ (unsigned long)vto) & CACHE_ALIAS)) + if (page_mapped(from) && !test_bit(PG_dcache_dirty, &from->flags)) { + vfrom = kmap_coherent(from, vaddr); + copy_page(vto, vfrom); + kunmap_coherent(vfrom); + } else { + vfrom = kmap_atomic(from, KM_USER0); + copy_page(vto, vfrom); + kunmap_atomic(vfrom, KM_USER0); + } + + if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK)) __flush_wback_region(vto, PAGE_SIZE); kunmap_atomic(vto, KM_USER1); @@ -124,23 +128,3 @@ void copy_user_highpage(struct page *to, struct page *from, smp_wmb(); } EXPORT_SYMBOL(copy_user_highpage); - -/* - * For SH-4, we have our own implementation for ptep_get_and_clear - */ -pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) -{ - pte_t pte = *ptep; - - pte_clear(mm, addr, ptep); - if (!pte_not_present(pte)) { - unsigned long pfn = pte_pfn(pte); - if (pfn_valid(pfn)) { - struct page *page = pfn_to_page(pfn); - struct address_space *mapping = page_mapping(page); - if (!mapping || !mapping_writably_mapped(mapping)) - __clear_bit(PG_mapped, &page->flags); - } - } - return pte; -} diff --git a/arch/sh/mm/pg-sh7705.c b/arch/sh/mm/pg-sh7705.c index eaf2514..684891b 100644 --- a/arch/sh/mm/pg-sh7705.c +++ b/arch/sh/mm/pg-sh7705.c @@ -26,7 +26,7 @@ #include #include -static inline void __flush_purge_virtual_region(void *p1, void *virt, int size) +static void __flush_purge_virtual_region(void *p1, void *virt, int size) { unsigned long v; unsigned long begin, end; @@ -75,19 +75,13 @@ static inline void __flush_purge_virtual_region(void *p1, void *virt, int size) */ void clear_user_page(void *to, unsigned long address, struct page *pg) { - struct page *page = virt_to_page(to); - - __set_bit(PG_mapped, &page->flags); - if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) { - clear_page(to); - __flush_wback_region(to, PAGE_SIZE); - } else { + if (pages_do_alias(address, (unsigned long)to)) __flush_purge_virtual_region(to, (void *)(address & 0xfffff000), PAGE_SIZE); - clear_page(to); - __flush_wback_region(to, PAGE_SIZE); - } + + clear_page(to); + __flush_wback_region(to, PAGE_SIZE); } /* @@ -98,41 +92,11 @@ void clear_user_page(void *to, unsigned long address, struct page *pg) */ void copy_user_page(void *to, void *from, unsigned long address, struct page *pg) { - struct page *page = virt_to_page(to); - - - __set_bit(PG_mapped, &page->flags); - if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) { - copy_page(to, from); - __flush_wback_region(to, PAGE_SIZE); - } else { + if (pages_do_alias(address, (unsigned long)to)) __flush_purge_virtual_region(to, (void *)(address & 0xfffff000), PAGE_SIZE); - copy_page(to, from); - __flush_wback_region(to, PAGE_SIZE); - } -} -/* - * For SH7705, we have our own implementation for ptep_get_and_clear - * Copied from pg-sh4.c - */ -pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) -{ - pte_t pte = *ptep; - - pte_clear(mm, addr, ptep); - if (!pte_not_present(pte)) { - unsigned long pfn = pte_pfn(pte); - if (pfn_valid(pfn)) { - struct page *page = pfn_to_page(pfn); - struct address_space *mapping = page_mapping(page); - if (!mapping || !mapping_writably_mapped(mapping)) - __clear_bit(PG_mapped, &page->flags); - } - } - - return pte; + copy_page(to, from); + __flush_wback_region(to, PAGE_SIZE); } - diff --git a/arch/sh/mm/tlb-pteaex.c b/arch/sh/mm/tlb-pteaex.c index 2aab3ea..c39b773 100644 --- a/arch/sh/mm/tlb-pteaex.c +++ b/arch/sh/mm/tlb-pteaex.c @@ -27,23 +27,6 @@ void update_mmu_cache(struct vm_area_struct * vma, if (vma && current->active_mm != vma->vm_mm) return; -#ifndef CONFIG_CACHE_OFF - { - unsigned long pfn = pte_pfn(pte); - - if (pfn_valid(pfn)) { - struct page *page = pfn_to_page(pfn); - - if (!test_bit(PG_mapped, &page->flags)) { - unsigned long phys = pte_val(pte) & PTE_PHYS_MASK; - __flush_wback_region((void *)P1SEGADDR(phys), - PAGE_SIZE); - __set_bit(PG_mapped, &page->flags); - } - } - } -#endif - local_irq_save(flags); /* Set PTEH register */ diff --git a/arch/sh/mm/tlb-sh3.c b/arch/sh/mm/tlb-sh3.c index 17cb7c3..9b8459c 100644 --- a/arch/sh/mm/tlb-sh3.c +++ b/arch/sh/mm/tlb-sh3.c @@ -33,25 +33,25 @@ void update_mmu_cache(struct vm_area_struct * vma, unsigned long flags; unsigned long pteval; unsigned long vpn; + unsigned long pfn = pte_pfn(pte); + struct page *page; /* Ptrace may call this routine. */ if (vma && current->active_mm != vma->vm_mm) return; + page = pfn_to_page(pfn); + if (pfn_valid(pfn) && page_mapping(page)) { #if defined(CONFIG_SH7705_CACHE_32KB) - { - struct page *page = pte_page(pte); - unsigned long pfn = pte_pfn(pte); + int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags); + if (dirty) { + unsigned long addr = (unsigned long)page_address(page); - if (pfn_valid(pfn) && !test_bit(PG_mapped, &page->flags)) { - unsigned long phys = pte_val(pte) & PTE_PHYS_MASK; - - __flush_wback_region((void *)P1SEGADDR(phys), - PAGE_SIZE); - __set_bit(PG_mapped, &page->flags); + if (pages_do_alias(addr, address & PAGE_MASK)) + __flush_wback_region((void *)addr, PAGE_SIZE); } - } #endif + } local_irq_save(flags); diff --git a/arch/sh/mm/tlb-sh4.c b/arch/sh/mm/tlb-sh4.c index f0c7b73..cf50082 100644 --- a/arch/sh/mm/tlb-sh4.c +++ b/arch/sh/mm/tlb-sh4.c @@ -21,27 +21,26 @@ void update_mmu_cache(struct vm_area_struct * vma, unsigned long flags; unsigned long pteval; unsigned long vpn; + unsigned long pfn = pte_pfn(pte); + struct page *page; /* Ptrace may call this routine. */ if (vma && current->active_mm != vma->vm_mm) return; -#ifndef CONFIG_CACHE_OFF - { - unsigned long pfn = pte_pfn(pte); + page = pfn_to_page(pfn); + if (pfn_valid(pfn) && page_mapping(page)) { +#ifndef CONFIG_SMP + int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags); + if (dirty) { - if (pfn_valid(pfn)) { - struct page *page = pfn_to_page(pfn); + unsigned long addr = (unsigned long)page_address(page); - if (!test_bit(PG_mapped, &page->flags)) { - unsigned long phys = pte_val(pte) & PTE_PHYS_MASK; - __flush_wback_region((void *)P1SEGADDR(phys), - PAGE_SIZE); - __set_bit(PG_mapped, &page->flags); - } + if (pages_do_alias(addr, address & PAGE_MASK)) + __flush_wback_region((void *)addr, PAGE_SIZE); } - } #endif + } local_irq_save(flags); -- cgit v1.1 From 2c59b0b70b9d5d61c726f179724660c4c2423f31 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Wed, 22 Jul 2009 14:41:35 +0000 Subject: usb: m66592-udc platform data on_chip support Convert the m66592-udc driver to use the on_chip flag from platform data to enable on chip behaviour instead of relying on CONFIG_SUPERH_BUILT_IN_M66592 ugliness. This makes the code cleaner and also allows us to support both external and internal m66592 with the same kernel. It also makes the Kconfig part more future proof since we with this patch can add support for new processors with on-chip m66592 without modifying the Kconfig. The patch adds a m66592 header file for platform data and ties in platform data to the existing m66592 devices. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/boards/mach-highlander/setup.c | 7 +++++++ arch/sh/boards/mach-x3proto/setup.c | 7 +++++++ arch/sh/kernel/cpu/sh4a/setup-sh7722.c | 8 +++++++- 3 files changed, 21 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/sh/boards/mach-highlander/setup.c b/arch/sh/boards/mach-highlander/setup.c index 1639f89..566e69d 100644 --- a/arch/sh/boards/mach-highlander/setup.c +++ b/arch/sh/boards/mach-highlander/setup.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -60,6 +61,11 @@ static struct platform_device r8a66597_usb_host_device = { .resource = r8a66597_usb_host_resources, }; +static struct m66592_platdata usbf_platdata = { + .xtal = M66592_PLATDATA_XTAL_24MHZ, + .vif = 1, +}; + static struct resource m66592_usb_peripheral_resources[] = { [0] = { .name = "m66592_udc", @@ -81,6 +87,7 @@ static struct platform_device m66592_usb_peripheral_device = { .dev = { .dma_mask = NULL, /* don't use dma */ .coherent_dma_mask = 0xffffffff, + .platform_data = &usbf_platdata, }, .num_resources = ARRAY_SIZE(m66592_usb_peripheral_resources), .resource = m66592_usb_peripheral_resources, diff --git a/arch/sh/boards/mach-x3proto/setup.c b/arch/sh/boards/mach-x3proto/setup.c index 8913ae3..efe4cb9 100644 --- a/arch/sh/boards/mach-x3proto/setup.c +++ b/arch/sh/boards/mach-x3proto/setup.c @@ -17,6 +17,7 @@ #include #include #include +#include #include static struct resource heartbeat_resources[] = { @@ -89,6 +90,11 @@ static struct platform_device r8a66597_usb_host_device = { .resource = r8a66597_usb_host_resources, }; +static struct m66592_platdata usbf_platdata = { + .xtal = M66592_PLATDATA_XTAL_24MHZ, + .vif = 1, +}; + static struct resource m66592_usb_peripheral_resources[] = { [0] = { .name = "m66592_udc", @@ -109,6 +115,7 @@ static struct platform_device m66592_usb_peripheral_device = { .dev = { .dma_mask = NULL, /* don't use dma */ .coherent_dma_mask = 0xffffffff, + .platform_data = &usbf_platdata, }, .num_resources = ARRAY_SIZE(m66592_usb_peripheral_resources), .resource = m66592_usb_peripheral_resources, diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c index ea524a2..0bad14a 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -47,9 +48,13 @@ static struct platform_device rtc_device = { .resource = rtc_resources, }; +static struct m66592_platdata usbf_platdata = { + .on_chip = 1, +}; + static struct resource usbf_resources[] = { [0] = { - .name = "m66592_udc", + .name = "USBF", .start = 0x04480000, .end = 0x044800FF, .flags = IORESOURCE_MEM, @@ -67,6 +72,7 @@ static struct platform_device usbf_device = { .dev = { .dma_mask = NULL, .coherent_dma_mask = 0xffffffff, + .platform_data = &usbf_platdata, }, .num_resources = ARRAY_SIZE(usbf_resources), .resource = usbf_resources, -- cgit v1.1 From 955c9863bb5855a994751843e7066017edc00410 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Wed, 22 Jul 2009 15:14:29 +0000 Subject: sh: convert processor device setup functions to arch_initcall() Convert the processor platform device setup functions from __initcall() and sometimes device_initcall() to arch_initcall(). This makes sure that the platform devices are registered a bit earlier so the devices are available when drivers register using initcall levels earlier than device_initcall(). A good example is platform devices needed by i2c-sh_mobile.c which registers a bit earlier using subsys_initcall(). Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/kernel/cpu/sh2/setup-sh7619.c | 2 +- arch/sh/kernel/cpu/sh2a/setup-mxg.c | 2 +- arch/sh/kernel/cpu/sh2a/setup-sh7201.c | 2 +- arch/sh/kernel/cpu/sh2a/setup-sh7203.c | 2 +- arch/sh/kernel/cpu/sh2a/setup-sh7206.c | 2 +- arch/sh/kernel/cpu/sh3/setup-sh7705.c | 2 +- arch/sh/kernel/cpu/sh3/setup-sh770x.c | 2 +- arch/sh/kernel/cpu/sh3/setup-sh7710.c | 2 +- arch/sh/kernel/cpu/sh3/setup-sh7720.c | 2 +- arch/sh/kernel/cpu/sh4/setup-sh4-202.c | 2 +- arch/sh/kernel/cpu/sh4/setup-sh7750.c | 2 +- arch/sh/kernel/cpu/sh4/setup-sh7760.c | 2 +- arch/sh/kernel/cpu/sh4a/setup-sh7343.c | 2 +- arch/sh/kernel/cpu/sh4a/setup-sh7366.c | 2 +- arch/sh/kernel/cpu/sh4a/setup-sh7722.c | 2 +- arch/sh/kernel/cpu/sh4a/setup-sh7723.c | 2 +- arch/sh/kernel/cpu/sh4a/setup-sh7724.c | 2 +- arch/sh/kernel/cpu/sh4a/setup-sh7763.c | 2 +- arch/sh/kernel/cpu/sh4a/setup-sh7770.c | 2 +- arch/sh/kernel/cpu/sh4a/setup-sh7780.c | 2 +- arch/sh/kernel/cpu/sh4a/setup-sh7785.c | 2 +- arch/sh/kernel/cpu/sh4a/setup-sh7786.c | 2 +- arch/sh/kernel/cpu/sh4a/setup-shx3.c | 2 +- arch/sh/kernel/cpu/sh5/setup-sh5.c | 2 +- 24 files changed, 24 insertions(+), 24 deletions(-) (limited to 'arch') diff --git a/arch/sh/kernel/cpu/sh2/setup-sh7619.c b/arch/sh/kernel/cpu/sh2/setup-sh7619.c index 1379873..8555c05 100644 --- a/arch/sh/kernel/cpu/sh2/setup-sh7619.c +++ b/arch/sh/kernel/cpu/sh2/setup-sh7619.c @@ -187,7 +187,7 @@ static int __init sh7619_devices_setup(void) return platform_add_devices(sh7619_devices, ARRAY_SIZE(sh7619_devices)); } -__initcall(sh7619_devices_setup); +arch_initcall(sh7619_devices_setup); void __init plat_irq_setup(void) { diff --git a/arch/sh/kernel/cpu/sh2a/setup-mxg.c b/arch/sh/kernel/cpu/sh2a/setup-mxg.c index 869c2da..b673764 100644 --- a/arch/sh/kernel/cpu/sh2a/setup-mxg.c +++ b/arch/sh/kernel/cpu/sh2a/setup-mxg.c @@ -238,7 +238,7 @@ static int __init mxg_devices_setup(void) return platform_add_devices(mxg_devices, ARRAY_SIZE(mxg_devices)); } -__initcall(mxg_devices_setup); +arch_initcall(mxg_devices_setup); void __init plat_irq_setup(void) { diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7201.c b/arch/sh/kernel/cpu/sh2a/setup-sh7201.c index d8febe1..fbde5b7 100644 --- a/arch/sh/kernel/cpu/sh2a/setup-sh7201.c +++ b/arch/sh/kernel/cpu/sh2a/setup-sh7201.c @@ -357,7 +357,7 @@ static int __init sh7201_devices_setup(void) return platform_add_devices(sh7201_devices, ARRAY_SIZE(sh7201_devices)); } -__initcall(sh7201_devices_setup); +arch_initcall(sh7201_devices_setup); void __init plat_irq_setup(void) { diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7203.c b/arch/sh/kernel/cpu/sh2a/setup-sh7203.c index 62e3039..d3fd536 100644 --- a/arch/sh/kernel/cpu/sh2a/setup-sh7203.c +++ b/arch/sh/kernel/cpu/sh2a/setup-sh7203.c @@ -367,7 +367,7 @@ static int __init sh7203_devices_setup(void) return platform_add_devices(sh7203_devices, ARRAY_SIZE(sh7203_devices)); } -__initcall(sh7203_devices_setup); +arch_initcall(sh7203_devices_setup); void __init plat_irq_setup(void) { diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7206.c b/arch/sh/kernel/cpu/sh2a/setup-sh7206.c index 3e6f3d7..a9ccc5e 100644 --- a/arch/sh/kernel/cpu/sh2a/setup-sh7206.c +++ b/arch/sh/kernel/cpu/sh2a/setup-sh7206.c @@ -338,7 +338,7 @@ static int __init sh7206_devices_setup(void) return platform_add_devices(sh7206_devices, ARRAY_SIZE(sh7206_devices)); } -__initcall(sh7206_devices_setup); +arch_initcall(sh7206_devices_setup); void __init plat_irq_setup(void) { diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7705.c b/arch/sh/kernel/cpu/sh3/setup-sh7705.c index 88f742f..c231059 100644 --- a/arch/sh/kernel/cpu/sh3/setup-sh7705.c +++ b/arch/sh/kernel/cpu/sh3/setup-sh7705.c @@ -222,7 +222,7 @@ static int __init sh7705_devices_setup(void) return platform_add_devices(sh7705_devices, ARRAY_SIZE(sh7705_devices)); } -__initcall(sh7705_devices_setup); +arch_initcall(sh7705_devices_setup); static struct platform_device *sh7705_early_devices[] __initdata = { &tmu0_device, diff --git a/arch/sh/kernel/cpu/sh3/setup-sh770x.c b/arch/sh/kernel/cpu/sh3/setup-sh770x.c index c563067..347ab35 100644 --- a/arch/sh/kernel/cpu/sh3/setup-sh770x.c +++ b/arch/sh/kernel/cpu/sh3/setup-sh770x.c @@ -250,7 +250,7 @@ static int __init sh770x_devices_setup(void) return platform_add_devices(sh770x_devices, ARRAY_SIZE(sh770x_devices)); } -__initcall(sh770x_devices_setup); +arch_initcall(sh770x_devices_setup); static struct platform_device *sh770x_early_devices[] __initdata = { &tmu0_device, diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7710.c b/arch/sh/kernel/cpu/sh3/setup-sh7710.c index efa76c8..717e90a 100644 --- a/arch/sh/kernel/cpu/sh3/setup-sh7710.c +++ b/arch/sh/kernel/cpu/sh3/setup-sh7710.c @@ -226,7 +226,7 @@ static int __init sh7710_devices_setup(void) return platform_add_devices(sh7710_devices, ARRAY_SIZE(sh7710_devices)); } -__initcall(sh7710_devices_setup); +arch_initcall(sh7710_devices_setup); static struct platform_device *sh7710_early_devices[] __initdata = { &tmu0_device, diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7720.c b/arch/sh/kernel/cpu/sh3/setup-sh7720.c index 5b21077..74d8baa 100644 --- a/arch/sh/kernel/cpu/sh3/setup-sh7720.c +++ b/arch/sh/kernel/cpu/sh3/setup-sh7720.c @@ -388,7 +388,7 @@ static int __init sh7720_devices_setup(void) return platform_add_devices(sh7720_devices, ARRAY_SIZE(sh7720_devices)); } -__initcall(sh7720_devices_setup); +arch_initcall(sh7720_devices_setup); static struct platform_device *sh7720_early_devices[] __initdata = { &cmt0_device, diff --git a/arch/sh/kernel/cpu/sh4/setup-sh4-202.c b/arch/sh/kernel/cpu/sh4/setup-sh4-202.c index 6d088d1..de4827d 100644 --- a/arch/sh/kernel/cpu/sh4/setup-sh4-202.c +++ b/arch/sh/kernel/cpu/sh4/setup-sh4-202.c @@ -138,7 +138,7 @@ static int __init sh4202_devices_setup(void) return platform_add_devices(sh4202_devices, ARRAY_SIZE(sh4202_devices)); } -__initcall(sh4202_devices_setup); +arch_initcall(sh4202_devices_setup); static struct platform_device *sh4202_early_devices[] __initdata = { &tmu0_device, diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7750.c b/arch/sh/kernel/cpu/sh4/setup-sh7750.c index 851672d..1b8b122 100644 --- a/arch/sh/kernel/cpu/sh4/setup-sh7750.c +++ b/arch/sh/kernel/cpu/sh4/setup-sh7750.c @@ -239,7 +239,7 @@ static int __init sh7750_devices_setup(void) return platform_add_devices(sh7750_devices, ARRAY_SIZE(sh7750_devices)); } -__initcall(sh7750_devices_setup); +arch_initcall(sh7750_devices_setup); static struct platform_device *sh7750_early_devices[] __initdata = { &tmu0_device, diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7760.c b/arch/sh/kernel/cpu/sh4/setup-sh7760.c index 5b82251..7fbb7be 100644 --- a/arch/sh/kernel/cpu/sh4/setup-sh7760.c +++ b/arch/sh/kernel/cpu/sh4/setup-sh7760.c @@ -265,7 +265,7 @@ static int __init sh7760_devices_setup(void) return platform_add_devices(sh7760_devices, ARRAY_SIZE(sh7760_devices)); } -__initcall(sh7760_devices_setup); +arch_initcall(sh7760_devices_setup); static struct platform_device *sh7760_early_devices[] __initdata = { &tmu0_device, diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7343.c b/arch/sh/kernel/cpu/sh4a/setup-sh7343.c index 6307e08..ac4d567 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7343.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7343.c @@ -325,7 +325,7 @@ static int __init sh7343_devices_setup(void) return platform_add_devices(sh7343_devices, ARRAY_SIZE(sh7343_devices)); } -__initcall(sh7343_devices_setup); +arch_initcall(sh7343_devices_setup); static struct platform_device *sh7343_early_devices[] __initdata = { &cmt_device, diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7366.c b/arch/sh/kernel/cpu/sh4a/setup-sh7366.c index f6d2088..4a9010b 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7366.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7366.c @@ -318,7 +318,7 @@ static int __init sh7366_devices_setup(void) return platform_add_devices(sh7366_devices, ARRAY_SIZE(sh7366_devices)); } -__initcall(sh7366_devices_setup); +arch_initcall(sh7366_devices_setup); static struct platform_device *sh7366_early_devices[] __initdata = { &cmt_device, diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c index 0bad14a..67b0d87 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c @@ -365,7 +365,7 @@ static int __init sh7722_devices_setup(void) return platform_add_devices(sh7722_devices, ARRAY_SIZE(sh7722_devices)); } -__initcall(sh7722_devices_setup); +arch_initcall(sh7722_devices_setup); static struct platform_device *sh7722_early_devices[] __initdata = { &cmt_device, diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7723.c b/arch/sh/kernel/cpu/sh4a/setup-sh7723.c index 2851649..26dc4d3 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7723.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7723.c @@ -473,7 +473,7 @@ static int __init sh7723_devices_setup(void) return platform_add_devices(sh7723_devices, ARRAY_SIZE(sh7723_devices)); } -__initcall(sh7723_devices_setup); +arch_initcall(sh7723_devices_setup); static struct platform_device *sh7723_early_devices[] __initdata = { &cmt_device, diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c index e5ac9eb..a04edaa 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c @@ -508,7 +508,7 @@ static int __init sh7724_devices_setup(void) return platform_add_devices(sh7724_devices, ARRAY_SIZE(sh7724_devices)); } -device_initcall(sh7724_devices_setup); +arch_initcall(sh7724_devices_setup); static struct platform_device *sh7724_early_devices[] __initdata = { &cmt_device, diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7763.c b/arch/sh/kernel/cpu/sh4a/setup-sh7763.c index f1e0c0d..4659fff 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7763.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7763.c @@ -314,7 +314,7 @@ static int __init sh7763_devices_setup(void) return platform_add_devices(sh7763_devices, ARRAY_SIZE(sh7763_devices)); } -__initcall(sh7763_devices_setup); +arch_initcall(sh7763_devices_setup); static struct platform_device *sh7763_early_devices[] __initdata = { &tmu0_device, diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7770.c b/arch/sh/kernel/cpu/sh4a/setup-sh7770.c index 1e86209..eead08d 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7770.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7770.c @@ -368,7 +368,7 @@ static int __init sh7770_devices_setup(void) return platform_add_devices(sh7770_devices, ARRAY_SIZE(sh7770_devices)); } -__initcall(sh7770_devices_setup); +arch_initcall(sh7770_devices_setup); static struct platform_device *sh7770_early_devices[] __initdata = { &tmu0_device, diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7780.c b/arch/sh/kernel/cpu/sh4a/setup-sh7780.c index 715e05b..2c901f4 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7780.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7780.c @@ -256,7 +256,7 @@ static int __init sh7780_devices_setup(void) return platform_add_devices(sh7780_devices, ARRAY_SIZE(sh7780_devices)); } -__initcall(sh7780_devices_setup); +arch_initcall(sh7780_devices_setup); static struct platform_device *sh7780_early_devices[] __initdata = { &tmu0_device, diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7785.c b/arch/sh/kernel/cpu/sh4a/setup-sh7785.c index af56140..7f6c718 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7785.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7785.c @@ -263,7 +263,7 @@ static int __init sh7785_devices_setup(void) return platform_add_devices(sh7785_devices, ARRAY_SIZE(sh7785_devices)); } -__initcall(sh7785_devices_setup); +arch_initcall(sh7785_devices_setup); static struct platform_device *sh7785_early_devices[] __initdata = { &tmu0_device, diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7786.c b/arch/sh/kernel/cpu/sh4a/setup-sh7786.c index b700494..0104a8e 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7786.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7786.c @@ -547,7 +547,7 @@ static int __init sh7786_devices_setup(void) return platform_add_devices(sh7786_devices, ARRAY_SIZE(sh7786_devices)); } -device_initcall(sh7786_devices_setup); +arch_initcall(sh7786_devices_setup); void __init plat_early_device_setup(void) { diff --git a/arch/sh/kernel/cpu/sh4a/setup-shx3.c b/arch/sh/kernel/cpu/sh4a/setup-shx3.c index 53c65fd..07f0789 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-shx3.c +++ b/arch/sh/kernel/cpu/sh4a/setup-shx3.c @@ -256,7 +256,7 @@ static int __init shx3_devices_setup(void) return platform_add_devices(shx3_devices, ARRAY_SIZE(shx3_devices)); } -__initcall(shx3_devices_setup); +arch_initcall(shx3_devices_setup); void __init plat_early_device_setup(void) { diff --git a/arch/sh/kernel/cpu/sh5/setup-sh5.c b/arch/sh/kernel/cpu/sh5/setup-sh5.c index f5ff1ac..6a0f82f 100644 --- a/arch/sh/kernel/cpu/sh5/setup-sh5.c +++ b/arch/sh/kernel/cpu/sh5/setup-sh5.c @@ -186,7 +186,7 @@ static int __init sh5_devices_setup(void) return platform_add_devices(sh5_devices, ARRAY_SIZE(sh5_devices)); } -__initcall(sh5_devices_setup); +arch_initcall(sh5_devices_setup); void __init plat_early_device_setup(void) { -- cgit v1.1 From e7d165146a7de5ceb4f68e188b2679f003744f54 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Wed, 22 Jul 2009 16:20:54 +0000 Subject: sh: kfr2r09 board support - SCIF console This patch adds basic kfr2r09 board support. Only the SCIF1 console is supported with this patch, but this patch and a proper sh7724 configuration is all that is needed. Combine with an initramfs to have a small RAM based kernel and distribution booted as zImage from RAM via JTAG. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/Makefile | 1 + arch/sh/boards/Kconfig | 7 ++++++ arch/sh/boards/mach-kfr2r09/Makefile | 1 + arch/sh/boards/mach-kfr2r09/setup.c | 48 ++++++++++++++++++++++++++++++++++++ 4 files changed, 57 insertions(+) create mode 100644 arch/sh/boards/mach-kfr2r09/Makefile create mode 100644 arch/sh/boards/mach-kfr2r09/setup.c (limited to 'arch') diff --git a/arch/sh/Makefile b/arch/sh/Makefile index 75d049b0..d7358d7 100644 --- a/arch/sh/Makefile +++ b/arch/sh/Makefile @@ -136,6 +136,7 @@ machdir-$(CONFIG_SH_7751_SYSTEMH) += mach-systemh machdir-$(CONFIG_SH_EDOSK7705) += mach-edosk7705 machdir-$(CONFIG_SH_HIGHLANDER) += mach-highlander machdir-$(CONFIG_SH_MIGOR) += mach-migor +machdir-$(CONFIG_SH_KFR2R09) += mach-kfr2r09 machdir-$(CONFIG_SH_SDK7780) += mach-sdk7780 machdir-$(CONFIG_SH_X3PROTO) += mach-x3proto machdir-$(CONFIG_SH_SH7763RDP) += mach-sh7763rdp diff --git a/arch/sh/boards/Kconfig b/arch/sh/boards/Kconfig index 2b1af0e..db04c85 100644 --- a/arch/sh/boards/Kconfig +++ b/arch/sh/boards/Kconfig @@ -193,6 +193,13 @@ config SH_AP325RXA Renesas "AP-325RXA" support. Compatible with ALGO SYSTEM CO.,LTD. "AP-320A" +config SH_KFR2R09 + bool "KFR2R09" + depends on CPU_SUBTYPE_SH7724 + select ARCH_REQUIRE_GPIOLIB + help + "Kit For R2R for 2009" support. + config SH_SH7763RDP bool "SH7763RDP" depends on CPU_SUBTYPE_SH7763 diff --git a/arch/sh/boards/mach-kfr2r09/Makefile b/arch/sh/boards/mach-kfr2r09/Makefile new file mode 100644 index 0000000..7703756 --- /dev/null +++ b/arch/sh/boards/mach-kfr2r09/Makefile @@ -0,0 +1 @@ +obj-y := setup.o diff --git a/arch/sh/boards/mach-kfr2r09/setup.c b/arch/sh/boards/mach-kfr2r09/setup.c new file mode 100644 index 0000000..224318a --- /dev/null +++ b/arch/sh/boards/mach-kfr2r09/setup.c @@ -0,0 +1,48 @@ +/* + * KFR2R09 board support code + * + * Copyright (C) 2009 Magnus Damm + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static int __init kfr2r09_devices_setup(void) +{ + /* enable SCIF1 serial port for YC401 console support */ + gpio_request(GPIO_FN_SCIF1_RXD, NULL); + gpio_request(GPIO_FN_SCIF1_TXD, NULL); + + return 0; +} +device_initcall(kfr2r09_devices_setup); + +/* Return the board specific boot mode pin configuration */ +static int kfr2r09_mode_pins(void) +{ + /* MD0=1, MD1=1, MD2=0: Clock Mode 3 + * MD3=0: 16-bit Area0 Bus Width + * MD5=1: Little Endian + * MD8=1: Test Mode Disabled + */ + return MODE_PIN0 | MODE_PIN1 | MODE_PIN5 | MODE_PIN8; +} + +/* + * The Machine Vector + */ +static struct sh_machine_vector mv_kfr2r09 __initmv = { + .mv_name = "kfr2r09", + .mv_mode_pins = kfr2r09_mode_pins, +}; -- cgit v1.1 From a366aa64f3a51ca3deebe74447f929a5614d9b90 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Wed, 22 Jul 2009 16:22:28 +0000 Subject: sh: kfr2r09 board support - NOR flash This patch adds NOR flash support to the kfr2r09 board. NOR flash support is added by describing the NOR flash chip hooked up to CS0 as platform device data for the physmap-flash MTD driver. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/boards/mach-kfr2r09/setup.c | 54 ++++++++++++++++++++++++++++++++++++- 1 file changed, 53 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/sh/boards/mach-kfr2r09/setup.c b/arch/sh/boards/mach-kfr2r09/setup.c index 224318a..382bf18 100644 --- a/arch/sh/boards/mach-kfr2r09/setup.c +++ b/arch/sh/boards/mach-kfr2r09/setup.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -18,13 +19,64 @@ #include #include +static struct mtd_partition kfr2r09_nor_flash_partitions[] = +{ + { + .name = "boot", + .offset = 0, + .size = (4 * 1024 * 1024), + .mask_flags = MTD_WRITEABLE, /* Read-only */ + }, + { + .name = "other", + .offset = MTDPART_OFS_APPEND, + .size = MTDPART_SIZ_FULL, + }, +}; + +static struct physmap_flash_data kfr2r09_nor_flash_data = { + .width = 2, + .parts = kfr2r09_nor_flash_partitions, + .nr_parts = ARRAY_SIZE(kfr2r09_nor_flash_partitions), +}; + +static struct resource kfr2r09_nor_flash_resources[] = { + [0] = { + .name = "NOR Flash", + .start = 0x00000000, + .end = 0x03ffffff, + .flags = IORESOURCE_MEM, + } +}; + +static struct platform_device kfr2r09_nor_flash_device = { + .name = "physmap-flash", + .resource = kfr2r09_nor_flash_resources, + .num_resources = ARRAY_SIZE(kfr2r09_nor_flash_resources), + .dev = { + .platform_data = &kfr2r09_nor_flash_data, + }, +}; + +static struct platform_device *kfr2r09_devices[] __initdata = { + &kfr2r09_nor_flash_device, +}; + +#define BSC_CS0BCR 0xfec10004 +#define BSC_CS0WCR 0xfec10024 + static int __init kfr2r09_devices_setup(void) { /* enable SCIF1 serial port for YC401 console support */ gpio_request(GPIO_FN_SCIF1_RXD, NULL); gpio_request(GPIO_FN_SCIF1_TXD, NULL); - return 0; + /* setup NOR flash at CS0 */ + ctrl_outl(0x36db0400, BSC_CS0BCR); + ctrl_outl(0x00000500, BSC_CS0WCR); + + return platform_add_devices(kfr2r09_devices, + ARRAY_SIZE(kfr2r09_devices)); } device_initcall(kfr2r09_devices_setup); -- cgit v1.1 From 39a6bf1426af4aed2348bb533481027862346a37 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Wed, 22 Jul 2009 16:23:45 +0000 Subject: sh: kfr2r09 board support - KEYSC keypad This patch adds KEYSC keypad support to the kfr2r09 board. The keys driven by the sh7724 on-chip KEYSC block are described as a platform device and platform data for the sh_keysc driver. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/boards/mach-kfr2r09/setup.c | 53 +++++++++++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) (limited to 'arch') diff --git a/arch/sh/boards/mach-kfr2r09/setup.c b/arch/sh/boards/mach-kfr2r09/setup.c index 382bf18..bf5f8f8 100644 --- a/arch/sh/boards/mach-kfr2r09/setup.c +++ b/arch/sh/boards/mach-kfr2r09/setup.c @@ -14,9 +14,11 @@ #include #include #include +#include #include #include #include +#include #include static struct mtd_partition kfr2r09_nor_flash_partitions[] = @@ -58,8 +60,46 @@ static struct platform_device kfr2r09_nor_flash_device = { }, }; +static struct sh_keysc_info kfr2r09_sh_keysc_info = { + .mode = SH_KEYSC_MODE_1, /* KEYOUT0->4, KEYIN0->4 */ + .scan_timing = 3, + .delay = 10, + .keycodes = { + KEY_PHONE, KEY_CLEAR, KEY_MAIL, KEY_WWW, KEY_ENTER, + KEY_1, KEY_2, KEY_3, 0, KEY_UP, + KEY_4, KEY_5, KEY_6, 0, KEY_LEFT, + KEY_7, KEY_8, KEY_9, KEY_PROG1, KEY_RIGHT, + KEY_S, KEY_0, KEY_P, KEY_PROG2, KEY_DOWN, + 0, 0, 0, 0, 0 + }, +}; + +static struct resource kfr2r09_sh_keysc_resources[] = { + [0] = { + .name = "KEYSC", + .start = 0x044b0000, + .end = 0x044b000f, + .flags = IORESOURCE_MEM, + }, + [1] = { + .start = 79, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device kfr2r09_sh_keysc_device = { + .name = "sh_keysc", + .id = 0, /* "keysc0" clock */ + .num_resources = ARRAY_SIZE(kfr2r09_sh_keysc_resources), + .resource = kfr2r09_sh_keysc_resources, + .dev = { + .platform_data = &kfr2r09_sh_keysc_info, + }, +}; + static struct platform_device *kfr2r09_devices[] __initdata = { &kfr2r09_nor_flash_device, + &kfr2r09_sh_keysc_device, }; #define BSC_CS0BCR 0xfec10004 @@ -75,6 +115,19 @@ static int __init kfr2r09_devices_setup(void) ctrl_outl(0x36db0400, BSC_CS0BCR); ctrl_outl(0x00000500, BSC_CS0WCR); + /* setup KEYSC pins */ + gpio_request(GPIO_FN_KEYOUT0, NULL); + gpio_request(GPIO_FN_KEYOUT1, NULL); + gpio_request(GPIO_FN_KEYOUT2, NULL); + gpio_request(GPIO_FN_KEYOUT3, NULL); + gpio_request(GPIO_FN_KEYOUT4_IN6, NULL); + gpio_request(GPIO_FN_KEYIN0, NULL); + gpio_request(GPIO_FN_KEYIN1, NULL); + gpio_request(GPIO_FN_KEYIN2, NULL); + gpio_request(GPIO_FN_KEYIN3, NULL); + gpio_request(GPIO_FN_KEYIN4, NULL); + gpio_request(GPIO_FN_KEYOUT5_IN5, NULL); + return platform_add_devices(kfr2r09_devices, ARRAY_SIZE(kfr2r09_devices)); } -- cgit v1.1 From 5bdef865eb358b6f3760e25e591ae115e9eeddef Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Thu, 23 Jul 2009 08:59:48 +0000 Subject: sh: kfr2r09 board support - mach-type and defconfig This patch adds a defconfig and a mach-types entry for the kfr2r09 board. At this point only a few devices like SCIF, KEYSC and NOR Flash are supported together with sh7724 devices such as IIC0, IIC1 and the multimedia blocks exported via UIO. Kexec is supported, but booting from flash is not (yet). Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/configs/kfr2r09_defconfig | 877 ++++++++++++++++++++++++++++++++++++++ arch/sh/tools/mach-types | 1 + 2 files changed, 878 insertions(+) create mode 100644 arch/sh/configs/kfr2r09_defconfig (limited to 'arch') diff --git a/arch/sh/configs/kfr2r09_defconfig b/arch/sh/configs/kfr2r09_defconfig new file mode 100644 index 0000000..90e575c --- /dev/null +++ b/arch/sh/configs/kfr2r09_defconfig @@ -0,0 +1,877 @@ +# +# Automatically generated make config: don't edit +# Linux kernel version: 2.6.31-rc3 +# Thu Jul 23 17:45:09 2009 +# +CONFIG_SUPERH=y +CONFIG_SUPERH32=y +# CONFIG_SUPERH64 is not set +CONFIG_ARCH_DEFCONFIG="arch/sh/configs/shx3_defconfig" +CONFIG_RWSEM_GENERIC_SPINLOCK=y +CONFIG_GENERIC_BUG=y +CONFIG_GENERIC_FIND_NEXT_BIT=y +CONFIG_GENERIC_HWEIGHT=y +CONFIG_GENERIC_HARDIRQS=y +CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_IRQ_PER_CPU=y +CONFIG_GENERIC_GPIO=y +CONFIG_GENERIC_TIME=y +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_ARCH_SUSPEND_POSSIBLE=y +CONFIG_ARCH_HIBERNATION_POSSIBLE=y +CONFIG_SYS_SUPPORTS_CMT=y +CONFIG_SYS_SUPPORTS_TMU=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_HAVE_LATENCYTOP_SUPPORT=y +# CONFIG_ARCH_HAS_ILOG2_U32 is not set +# CONFIG_ARCH_HAS_ILOG2_U64 is not set +CONFIG_ARCH_NO_VIRT_TO_BUS=y +CONFIG_ARCH_HAS_DEFAULT_IDLE=y +CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y +CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" +CONFIG_CONSTRUCTORS=y + +# +# General setup +# +CONFIG_EXPERIMENTAL=y +CONFIG_BROKEN_ON_SMP=y +CONFIG_INIT_ENV_ARG_LIMIT=32 +CONFIG_LOCALVERSION="" +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_HAVE_KERNEL_GZIP=y +CONFIG_HAVE_KERNEL_BZIP2=y +CONFIG_HAVE_KERNEL_LZMA=y +CONFIG_KERNEL_GZIP=y +# CONFIG_KERNEL_BZIP2 is not set +# CONFIG_KERNEL_LZMA is not set +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +# CONFIG_POSIX_MQUEUE is not set +CONFIG_BSD_PROCESS_ACCT=y +# CONFIG_BSD_PROCESS_ACCT_V3 is not set +# CONFIG_TASKSTATS is not set +# CONFIG_AUDIT is not set + +# +# RCU Subsystem +# +CONFIG_CLASSIC_RCU=y +# CONFIG_TREE_RCU is not set +# CONFIG_PREEMPT_RCU is not set +# CONFIG_TREE_RCU_TRACE is not set +# CONFIG_PREEMPT_RCU_TRACE is not set +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=14 +CONFIG_GROUP_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y +# CONFIG_RT_GROUP_SCHED is not set +CONFIG_USER_SCHED=y +# CONFIG_CGROUP_SCHED is not set +# CONFIG_CGROUPS is not set +CONFIG_SYSFS_DEPRECATED=y +CONFIG_SYSFS_DEPRECATED_V2=y +# CONFIG_RELAY is not set +# CONFIG_NAMESPACES is not set +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +# CONFIG_RD_BZIP2 is not set +# CONFIG_RD_LZMA is not set +CONFIG_CC_OPTIMIZE_FOR_SIZE=y +CONFIG_SYSCTL=y +CONFIG_ANON_INODES=y +CONFIG_EMBEDDED=y +CONFIG_UID16=y +CONFIG_SYSCTL_SYSCALL=y +# CONFIG_KALLSYMS is not set +CONFIG_HOTPLUG=y +CONFIG_PRINTK=y +CONFIG_BUG=y +CONFIG_ELF_CORE=y +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_SHMEM=y +CONFIG_AIO=y +CONFIG_HAVE_PERF_COUNTERS=y + +# +# Performance Counters +# +# CONFIG_PERF_COUNTERS is not set +CONFIG_VM_EVENT_COUNTERS=y +# CONFIG_STRIP_ASM_SYMS is not set +CONFIG_COMPAT_BRK=y +CONFIG_SLAB=y +# CONFIG_SLUB is not set +# CONFIG_SLOB is not set +# CONFIG_PROFILING is not set +# CONFIG_MARKERS is not set +CONFIG_HAVE_OPROFILE=y +CONFIG_HAVE_IOREMAP_PROT=y +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_HAVE_ARCH_TRACEHOOK=y +CONFIG_HAVE_CLK=y +CONFIG_HAVE_DMA_API_DEBUG=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +# CONFIG_SLOW_WORK is not set +CONFIG_HAVE_GENERIC_DMA_COHERENT=y +CONFIG_SLABINFO=y +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +# CONFIG_MODULES is not set +CONFIG_BLOCK=y +CONFIG_LBDAF=y +# CONFIG_BLK_DEV_BSG is not set +# CONFIG_BLK_DEV_INTEGRITY is not set + +# +# IO Schedulers +# +CONFIG_IOSCHED_NOOP=y +# CONFIG_IOSCHED_AS is not set +# CONFIG_IOSCHED_DEADLINE is not set +# CONFIG_IOSCHED_CFQ is not set +# CONFIG_DEFAULT_AS is not set +# CONFIG_DEFAULT_DEADLINE is not set +# CONFIG_DEFAULT_CFQ is not set +CONFIG_DEFAULT_NOOP=y +CONFIG_DEFAULT_IOSCHED="noop" +# CONFIG_FREEZER is not set + +# +# System type +# +CONFIG_CPU_SH4=y +CONFIG_CPU_SH4A=y +CONFIG_CPU_SHX2=y +CONFIG_ARCH_SHMOBILE=y +# CONFIG_CPU_SUBTYPE_SH7619 is not set +# CONFIG_CPU_SUBTYPE_SH7201 is not set +# CONFIG_CPU_SUBTYPE_SH7203 is not set +# CONFIG_CPU_SUBTYPE_SH7206 is not set +# CONFIG_CPU_SUBTYPE_SH7263 is not set +# CONFIG_CPU_SUBTYPE_MXG is not set +# CONFIG_CPU_SUBTYPE_SH7705 is not set +# CONFIG_CPU_SUBTYPE_SH7706 is not set +# CONFIG_CPU_SUBTYPE_SH7707 is not set +# CONFIG_CPU_SUBTYPE_SH7708 is not set +# CONFIG_CPU_SUBTYPE_SH7709 is not set +# CONFIG_CPU_SUBTYPE_SH7710 is not set +# CONFIG_CPU_SUBTYPE_SH7712 is not set +# CONFIG_CPU_SUBTYPE_SH7720 is not set +# CONFIG_CPU_SUBTYPE_SH7721 is not set +# CONFIG_CPU_SUBTYPE_SH7750 is not set +# CONFIG_CPU_SUBTYPE_SH7091 is not set +# CONFIG_CPU_SUBTYPE_SH7750R is not set +# CONFIG_CPU_SUBTYPE_SH7750S is not set +# CONFIG_CPU_SUBTYPE_SH7751 is not set +# CONFIG_CPU_SUBTYPE_SH7751R is not set +# CONFIG_CPU_SUBTYPE_SH7760 is not set +# CONFIG_CPU_SUBTYPE_SH4_202 is not set +# CONFIG_CPU_SUBTYPE_SH7723 is not set +CONFIG_CPU_SUBTYPE_SH7724=y +# CONFIG_CPU_SUBTYPE_SH7763 is not set +# CONFIG_CPU_SUBTYPE_SH7770 is not set +# CONFIG_CPU_SUBTYPE_SH7780 is not set +# CONFIG_CPU_SUBTYPE_SH7785 is not set +# CONFIG_CPU_SUBTYPE_SH7786 is not set +# CONFIG_CPU_SUBTYPE_SHX3 is not set +# CONFIG_CPU_SUBTYPE_SH7343 is not set +# CONFIG_CPU_SUBTYPE_SH7722 is not set +# CONFIG_CPU_SUBTYPE_SH7366 is not set + +# +# Memory management options +# +CONFIG_QUICKLIST=y +CONFIG_MMU=y +CONFIG_PAGE_OFFSET=0x80000000 +CONFIG_FORCE_MAX_ZONEORDER=11 +CONFIG_MEMORY_START=0x08000000 +CONFIG_MEMORY_SIZE=0x08000000 +CONFIG_29BIT=y +# CONFIG_X2TLB is not set +CONFIG_VSYSCALL=y +CONFIG_ARCH_FLATMEM_ENABLE=y +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_ARCH_SPARSEMEM_DEFAULT=y +CONFIG_MAX_ACTIVE_REGIONS=1 +CONFIG_ARCH_POPULATES_NODE_MAP=y +CONFIG_ARCH_SELECT_MEMORY_MODEL=y +CONFIG_PAGE_SIZE_4KB=y +# CONFIG_PAGE_SIZE_8KB is not set +# CONFIG_PAGE_SIZE_16KB is not set +# CONFIG_PAGE_SIZE_64KB is not set +CONFIG_SELECT_MEMORY_MODEL=y +CONFIG_FLATMEM_MANUAL=y +# CONFIG_DISCONTIGMEM_MANUAL is not set +# CONFIG_SPARSEMEM_MANUAL is not set +CONFIG_FLATMEM=y +CONFIG_FLAT_NODE_MEM_MAP=y +CONFIG_SPARSEMEM_STATIC=y +CONFIG_PAGEFLAGS_EXTENDED=y +CONFIG_SPLIT_PTLOCK_CPUS=4 +# CONFIG_PHYS_ADDR_T_64BIT is not set +CONFIG_ZONE_DMA_FLAG=0 +CONFIG_NR_QUICK=2 +CONFIG_HAVE_MLOCK=y +CONFIG_HAVE_MLOCKED_PAGE_BIT=y +CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 + +# +# Cache configuration +# +CONFIG_CACHE_WRITEBACK=y +# CONFIG_CACHE_WRITETHROUGH is not set +# CONFIG_CACHE_OFF is not set + +# +# Processor features +# +CONFIG_CPU_LITTLE_ENDIAN=y +# CONFIG_CPU_BIG_ENDIAN is not set +CONFIG_SH_FPU=y +# CONFIG_SH_STORE_QUEUES is not set +CONFIG_CPU_HAS_INTEVT=y +CONFIG_CPU_HAS_SR_RB=y +CONFIG_CPU_HAS_FPU=y + +# +# Board support +# +# CONFIG_SH_7724_SOLUTION_ENGINE is not set +CONFIG_SH_KFR2R09=y + +# +# Timer and clock configuration +# +# CONFIG_SH_TIMER_TMU is not set +CONFIG_SH_TIMER_CMT=y +CONFIG_SH_PCLK_FREQ=33333333 +CONFIG_SH_CLK_CPG=y +CONFIG_TICK_ONESHOT=y +CONFIG_NO_HZ=y +# CONFIG_HIGH_RES_TIMERS is not set +CONFIG_GENERIC_CLOCKEVENTS_BUILD=y + +# +# CPU Frequency scaling +# +# CONFIG_CPU_FREQ is not set + +# +# DMA support +# +# CONFIG_SH_DMA is not set + +# +# Companion Chips +# + +# +# Additional SuperH Device Drivers +# +# CONFIG_HEARTBEAT is not set +# CONFIG_PUSH_SWITCH is not set + +# +# Kernel features +# +# CONFIG_HZ_100 is not set +# CONFIG_HZ_250 is not set +# CONFIG_HZ_300 is not set +CONFIG_HZ_1000=y +CONFIG_HZ=1000 +# CONFIG_SCHED_HRTICK is not set +CONFIG_KEXEC=y +# CONFIG_CRASH_DUMP is not set +# CONFIG_SECCOMP is not set +# CONFIG_PREEMPT_NONE is not set +CONFIG_PREEMPT_VOLUNTARY=y +# CONFIG_PREEMPT is not set +CONFIG_GUSA=y +# CONFIG_SPARSE_IRQ is not set + +# +# Boot options +# +CONFIG_ZERO_PAGE_OFFSET=0x00001000 +CONFIG_BOOT_LINK_OFFSET=0x00800000 +CONFIG_ENTRY_OFFSET=0x00001000 +CONFIG_CMDLINE_BOOL=y +CONFIG_CMDLINE="console=ttySC1,115200" + +# +# Bus options +# +# CONFIG_ARCH_SUPPORTS_MSI is not set +# CONFIG_PCCARD is not set + +# +# Executable file formats +# +CONFIG_BINFMT_ELF=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +# CONFIG_HAVE_AOUT is not set +# CONFIG_BINFMT_MISC is not set + +# +# Power management options (EXPERIMENTAL) +# +CONFIG_PM=y +# CONFIG_PM_DEBUG is not set +# CONFIG_SUSPEND is not set +# CONFIG_HIBERNATION is not set +CONFIG_CPU_IDLE=y +CONFIG_CPU_IDLE_GOV_LADDER=y +CONFIG_CPU_IDLE_GOV_MENU=y +CONFIG_NET=y + +# +# Networking options +# +CONFIG_PACKET=y +CONFIG_PACKET_MMAP=y +CONFIG_UNIX=y +# CONFIG_NET_KEY is not set +CONFIG_INET=y +# CONFIG_IP_MULTICAST is not set +# CONFIG_IP_ADVANCED_ROUTER is not set +CONFIG_IP_FIB_HASH=y +# CONFIG_IP_PNP is not set +# CONFIG_NET_IPIP is not set +# CONFIG_NET_IPGRE is not set +# CONFIG_ARPD is not set +# CONFIG_SYN_COOKIES is not set +# CONFIG_INET_AH is not set +# CONFIG_INET_ESP is not set +# CONFIG_INET_IPCOMP is not set +# CONFIG_INET_XFRM_TUNNEL is not set +# CONFIG_INET_TUNNEL is not set +# CONFIG_INET_XFRM_MODE_TRANSPORT is not set +# CONFIG_INET_XFRM_MODE_TUNNEL is not set +# CONFIG_INET_XFRM_MODE_BEET is not set +# CONFIG_INET_LRO is not set +# CONFIG_INET_DIAG is not set +# CONFIG_TCP_CONG_ADVANCED is not set +CONFIG_TCP_CONG_CUBIC=y +CONFIG_DEFAULT_TCP_CONG="cubic" +# CONFIG_TCP_MD5SIG is not set +# CONFIG_IPV6 is not set +# CONFIG_NETWORK_SECMARK is not set +# CONFIG_NETFILTER is not set +# CONFIG_IP_DCCP is not set +# CONFIG_IP_SCTP is not set +# CONFIG_TIPC is not set +# CONFIG_ATM is not set +# CONFIG_BRIDGE is not set +# CONFIG_NET_DSA is not set +# CONFIG_VLAN_8021Q is not set +# CONFIG_DECNET is not set +# CONFIG_LLC2 is not set +# CONFIG_IPX is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_ECONET is not set +# CONFIG_WAN_ROUTER is not set +# CONFIG_PHONET is not set +# CONFIG_IEEE802154 is not set +# CONFIG_NET_SCHED is not set +# CONFIG_DCB is not set + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set +# CONFIG_HAMRADIO is not set +# CONFIG_CAN is not set +# CONFIG_IRDA is not set +# CONFIG_BT is not set +# CONFIG_AF_RXRPC is not set +# CONFIG_WIRELESS is not set +# CONFIG_WIMAX is not set +# CONFIG_RFKILL is not set +# CONFIG_NET_9P is not set + +# +# Device Drivers +# + +# +# Generic Driver Options +# +CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y +CONFIG_FW_LOADER=y +CONFIG_FIRMWARE_IN_KERNEL=y +CONFIG_EXTRA_FIRMWARE="" +# CONFIG_SYS_HYPERVISOR is not set +# CONFIG_CONNECTOR is not set +CONFIG_MTD=y +# CONFIG_MTD_DEBUG is not set +CONFIG_MTD_CONCAT=y +CONFIG_MTD_PARTITIONS=y +# CONFIG_MTD_REDBOOT_PARTS is not set +CONFIG_MTD_CMDLINE_PARTS=y +# CONFIG_MTD_AR7_PARTS is not set + +# +# User Modules And Translation Layers +# +CONFIG_MTD_CHAR=y +CONFIG_MTD_BLKDEVS=y +CONFIG_MTD_BLOCK=y +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set +# CONFIG_RFD_FTL is not set +# CONFIG_SSFDC is not set +# CONFIG_MTD_OOPS is not set + +# +# RAM/ROM/Flash chip drivers +# +CONFIG_MTD_CFI=y +# CONFIG_MTD_JEDECPROBE is not set +CONFIG_MTD_GEN_PROBE=y +# CONFIG_MTD_CFI_ADV_OPTIONS is not set +CONFIG_MTD_MAP_BANK_WIDTH_1=y +CONFIG_MTD_MAP_BANK_WIDTH_2=y +CONFIG_MTD_MAP_BANK_WIDTH_4=y +# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set +CONFIG_MTD_CFI_I1=y +CONFIG_MTD_CFI_I2=y +# CONFIG_MTD_CFI_I4 is not set +# CONFIG_MTD_CFI_I8 is not set +CONFIG_MTD_CFI_INTELEXT=y +# CONFIG_MTD_CFI_AMDSTD is not set +# CONFIG_MTD_CFI_STAA is not set +CONFIG_MTD_CFI_UTIL=y +# CONFIG_MTD_RAM is not set +# CONFIG_MTD_ROM is not set +# CONFIG_MTD_ABSENT is not set + +# +# Mapping drivers for chip access +# +# CONFIG_MTD_COMPLEX_MAPPINGS is not set +CONFIG_MTD_PHYSMAP=y +# CONFIG_MTD_PHYSMAP_COMPAT is not set +# CONFIG_MTD_PLATRAM is not set + +# +# Self-contained MTD device drivers +# +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_PHRAM is not set +# CONFIG_MTD_MTDRAM is not set +# CONFIG_MTD_BLOCK2MTD is not set + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOC2000 is not set +# CONFIG_MTD_DOC2001 is not set +# CONFIG_MTD_DOC2001PLUS is not set +# CONFIG_MTD_NAND is not set +# CONFIG_MTD_ONENAND is not set + +# +# LPDDR flash memory drivers +# +# CONFIG_MTD_LPDDR is not set + +# +# UBI - Unsorted block images +# +CONFIG_MTD_UBI=y +CONFIG_MTD_UBI_WL_THRESHOLD=4096 +CONFIG_MTD_UBI_BEB_RESERVE=1 +# CONFIG_MTD_UBI_GLUEBI is not set + +# +# UBI debugging options +# +# CONFIG_MTD_UBI_DEBUG is not set +# CONFIG_PARPORT is not set +CONFIG_BLK_DEV=y +# CONFIG_BLK_DEV_COW_COMMON is not set +# CONFIG_BLK_DEV_LOOP is not set +# CONFIG_BLK_DEV_NBD is not set +# CONFIG_BLK_DEV_RAM is not set +# CONFIG_CDROM_PKTCDVD is not set +# CONFIG_ATA_OVER_ETH is not set +# CONFIG_BLK_DEV_HD is not set +# CONFIG_MISC_DEVICES is not set +CONFIG_HAVE_IDE=y +# CONFIG_IDE is not set + +# +# SCSI device support +# +# CONFIG_RAID_ATTRS is not set +# CONFIG_SCSI is not set +# CONFIG_SCSI_DMA is not set +# CONFIG_SCSI_NETLINK is not set +# CONFIG_ATA is not set +# CONFIG_MD is not set +# CONFIG_NETDEVICES is not set +# CONFIG_ISDN is not set +# CONFIG_PHONE is not set + +# +# Input device support +# +CONFIG_INPUT=y +# CONFIG_INPUT_FF_MEMLESS is not set +# CONFIG_INPUT_POLLDEV is not set + +# +# Userland interfaces +# +# CONFIG_INPUT_MOUSEDEV is not set +# CONFIG_INPUT_JOYDEV is not set +CONFIG_INPUT_EVDEV=y +# CONFIG_INPUT_EVBUG is not set + +# +# Input Device Drivers +# +CONFIG_INPUT_KEYBOARD=y +# CONFIG_KEYBOARD_ATKBD is not set +# CONFIG_KEYBOARD_LKKBD is not set +# CONFIG_KEYBOARD_GPIO is not set +# CONFIG_KEYBOARD_MATRIX is not set +# CONFIG_KEYBOARD_NEWTON is not set +# CONFIG_KEYBOARD_STOWAWAY is not set +# CONFIG_KEYBOARD_SUNKBD is not set +CONFIG_KEYBOARD_SH_KEYSC=y +# CONFIG_KEYBOARD_XTKBD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TABLET is not set +# CONFIG_INPUT_TOUCHSCREEN is not set +# CONFIG_INPUT_MISC is not set + +# +# Hardware I/O ports +# +# CONFIG_SERIO is not set +# CONFIG_GAMEPORT is not set + +# +# Character devices +# +CONFIG_VT=y +CONFIG_CONSOLE_TRANSLATIONS=y +CONFIG_VT_CONSOLE=y +CONFIG_HW_CONSOLE=y +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_DEVKMEM=y +# CONFIG_SERIAL_NONSTANDARD is not set + +# +# Serial drivers +# +# CONFIG_SERIAL_8250 is not set + +# +# Non-8250 serial port support +# +CONFIG_SERIAL_SH_SCI=y +CONFIG_SERIAL_SH_SCI_NR_UARTS=6 +CONFIG_SERIAL_SH_SCI_CONSOLE=y +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_UNIX98_PTYS=y +# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set +CONFIG_LEGACY_PTYS=y +CONFIG_LEGACY_PTY_COUNT=256 +# CONFIG_IPMI_HANDLER is not set +CONFIG_HW_RANDOM=y +# CONFIG_HW_RANDOM_TIMERIOMEM is not set +# CONFIG_R3964 is not set +# CONFIG_RAW_DRIVER is not set +# CONFIG_TCG_TPM is not set +CONFIG_I2C=y +CONFIG_I2C_BOARDINFO=y +# CONFIG_I2C_CHARDEV is not set +CONFIG_I2C_HELPER_AUTO=y + +# +# I2C Hardware Bus support +# + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_DESIGNWARE is not set +# CONFIG_I2C_GPIO is not set +# CONFIG_I2C_OCORES is not set +CONFIG_I2C_SH_MOBILE=y +# CONFIG_I2C_SIMTEC is not set + +# +# External I2C/SMBus adapter drivers +# +# CONFIG_I2C_PARPORT_LIGHT is not set +# CONFIG_I2C_TAOS_EVM is not set + +# +# Other I2C/SMBus bus drivers +# +# CONFIG_I2C_PCA_PLATFORM is not set + +# +# Miscellaneous I2C Chip support +# +# CONFIG_DS1682 is not set +# CONFIG_SENSORS_PCF8574 is not set +# CONFIG_PCF8575 is not set +# CONFIG_SENSORS_PCA9539 is not set +# CONFIG_SENSORS_TSL2550 is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +# CONFIG_I2C_DEBUG_CHIP is not set +# CONFIG_SPI is not set + +# +# PPS support +# +# CONFIG_PPS is not set +CONFIG_ARCH_REQUIRE_GPIOLIB=y +CONFIG_GPIOLIB=y +CONFIG_GPIO_SYSFS=y + +# +# Memory mapped GPIO expanders: +# + +# +# I2C GPIO expanders: +# +# CONFIG_GPIO_MAX732X is not set +# CONFIG_GPIO_PCA953X is not set +# CONFIG_GPIO_PCF857X is not set + +# +# PCI GPIO expanders: +# + +# +# SPI GPIO expanders: +# +# CONFIG_W1 is not set +# CONFIG_POWER_SUPPLY is not set +# CONFIG_HWMON is not set +# CONFIG_THERMAL is not set +# CONFIG_THERMAL_HWMON is not set +# CONFIG_WATCHDOG is not set +CONFIG_SSB_POSSIBLE=y + +# +# Sonics Silicon Backplane +# +# CONFIG_SSB is not set + +# +# Multifunction device drivers +# +# CONFIG_MFD_CORE is not set +# CONFIG_MFD_SM501 is not set +# CONFIG_HTC_PASIC3 is not set +# CONFIG_TPS65010 is not set +# CONFIG_TWL4030_CORE is not set +# CONFIG_MFD_TMIO is not set +# CONFIG_PMIC_DA903X is not set +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_AB3100_CORE is not set +# CONFIG_REGULATOR is not set +# CONFIG_MEDIA_SUPPORT is not set + +# +# Graphics support +# +# CONFIG_VGASTATE is not set +# CONFIG_VIDEO_OUTPUT_CONTROL is not set +# CONFIG_FB is not set +# CONFIG_BACKLIGHT_LCD_SUPPORT is not set + +# +# Display device support +# +# CONFIG_DISPLAY_SUPPORT is not set + +# +# Console display driver support +# +CONFIG_DUMMY_CONSOLE=y +# CONFIG_SOUND is not set +# CONFIG_HID_SUPPORT is not set +# CONFIG_USB_SUPPORT is not set +# CONFIG_MMC is not set +# CONFIG_MEMSTICK is not set +# CONFIG_NEW_LEDS is not set +# CONFIG_ACCESSIBILITY is not set +CONFIG_RTC_LIB=y +# CONFIG_RTC_CLASS is not set +# CONFIG_DMADEVICES is not set +# CONFIG_AUXDISPLAY is not set +CONFIG_UIO=y +# CONFIG_UIO_PDRV is not set +CONFIG_UIO_PDRV_GENIRQ=y +# CONFIG_UIO_SMX is not set +# CONFIG_UIO_SERCOS3 is not set + +# +# TI VLYNQ +# +# CONFIG_STAGING is not set + +# +# File systems +# +# CONFIG_EXT2_FS is not set +# CONFIG_EXT3_FS is not set +# CONFIG_EXT4_FS is not set +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +# CONFIG_FS_POSIX_ACL is not set +# CONFIG_XFS_FS is not set +# CONFIG_GFS2_FS is not set +# CONFIG_OCFS2_FS is not set +# CONFIG_BTRFS_FS is not set +CONFIG_FILE_LOCKING=y +# CONFIG_FSNOTIFY is not set +# CONFIG_INOTIFY is not set +# CONFIG_QUOTA is not set +# CONFIG_AUTOFS_FS is not set +# CONFIG_AUTOFS4_FS is not set +# CONFIG_FUSE_FS is not set + +# +# Caches +# +# CONFIG_FSCACHE is not set + +# +# CD-ROM/DVD Filesystems +# +# CONFIG_ISO9660_FS is not set +# CONFIG_UDF_FS is not set + +# +# DOS/FAT/NT Filesystems +# +# CONFIG_MSDOS_FS is not set +# CONFIG_VFAT_FS is not set +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +# CONFIG_TMPFS_POSIX_ACL is not set +# CONFIG_HUGETLBFS is not set +# CONFIG_HUGETLB_PAGE is not set +# CONFIG_CONFIGFS_FS is not set +# CONFIG_MISC_FILESYSTEMS is not set +# CONFIG_NETWORK_FILESYSTEMS is not set + +# +# Partition Types +# +# CONFIG_PARTITION_ADVANCED is not set +CONFIG_MSDOS_PARTITION=y +# CONFIG_NLS is not set +# CONFIG_DLM is not set + +# +# Kernel hacking +# +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +# CONFIG_PRINTK_TIME is not set +CONFIG_ENABLE_WARN_DEPRECATED=y +# CONFIG_ENABLE_MUST_CHECK is not set +CONFIG_FRAME_WARN=1024 +# CONFIG_MAGIC_SYSRQ is not set +# CONFIG_UNUSED_SYMBOLS is not set +CONFIG_DEBUG_FS=y +# CONFIG_HEADERS_CHECK is not set +# CONFIG_DEBUG_KERNEL is not set +# CONFIG_DEBUG_BUGVERBOSE is not set +# CONFIG_DEBUG_MEMORY_INIT is not set +# CONFIG_RCU_CPU_STALL_DETECTOR is not set +# CONFIG_LATENCYTOP is not set +CONFIG_SYSCTL_SYSCALL_CHECK=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_FTRACE_SYSCALLS=y +CONFIG_TRACING_SUPPORT=y +# CONFIG_FTRACE is not set +# CONFIG_DYNAMIC_DEBUG is not set +# CONFIG_DMA_API_DEBUG is not set +# CONFIG_SAMPLES is not set +CONFIG_HAVE_ARCH_KGDB=y +# CONFIG_SH_STANDARD_BIOS is not set +# CONFIG_EARLY_SCIF_CONSOLE is not set + +# +# Security options +# +# CONFIG_KEYS is not set +# CONFIG_SECURITY is not set +# CONFIG_SECURITYFS is not set +# CONFIG_SECURITY_FILE_CAPABILITIES is not set +# CONFIG_CRYPTO is not set +# CONFIG_BINARY_PRINTF is not set + +# +# Library routines +# +CONFIG_BITREVERSE=y +CONFIG_GENERIC_FIND_LAST_BIT=y +# CONFIG_CRC_CCITT is not set +# CONFIG_CRC16 is not set +CONFIG_CRC_T10DIF=y +CONFIG_CRC_ITU_T=y +CONFIG_CRC32=y +CONFIG_CRC7=y +# CONFIG_LIBCRC32C is not set +CONFIG_ZLIB_INFLATE=y +CONFIG_DECOMPRESS_GZIP=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT=y +CONFIG_HAS_DMA=y +CONFIG_HAVE_LMB=y +CONFIG_NLATTR=y +CONFIG_GENERIC_ATOMIC64=y diff --git a/arch/sh/tools/mach-types b/arch/sh/tools/mach-types index fec3a53..09eef36 100644 --- a/arch/sh/tools/mach-types +++ b/arch/sh/tools/mach-types @@ -56,3 +56,4 @@ SH7785LCR SH_SH7785LCR URQUELL SH_URQUELL ESPT SH_ESPT POLARIS SH_POLARIS +KFR2R09 SH_KFR2R09 -- cgit v1.1 From dfff0fa65ab15db45acd64b3189787d37ab163cd Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Mon, 27 Jul 2009 20:53:22 +0900 Subject: sh: wire up clear_user_highpage() for sh4, convert sh7705. This wires up clear_user_highpage() on SH-4 and subsequently converts the SH7705 32kB cache mode over to using it. Now that the SH-4 implementation handles all of the dcache purging directly in the aliasing case, there is no need to do this in the default clear_page() implementation. Signed-off-by: Paul Mundt --- arch/sh/include/asm/cacheflush.h | 2 +- arch/sh/include/asm/page.h | 12 +++-- arch/sh/include/asm/pgtable.h | 3 +- arch/sh/kernel/sh_ksyms_32.c | 6 --- arch/sh/lib/clear_page.S | 46 ------------------ arch/sh/mm/Makefile_32 | 2 +- arch/sh/mm/pg-nommu.c | 7 +-- arch/sh/mm/pg-sh4.c | 29 ++++++----- arch/sh/mm/pg-sh7705.c | 102 --------------------------------------- 9 files changed, 27 insertions(+), 182 deletions(-) delete mode 100644 arch/sh/mm/pg-sh7705.c (limited to 'arch') diff --git a/arch/sh/include/asm/cacheflush.h b/arch/sh/include/asm/cacheflush.h index 4c5462d..4e36011 100644 --- a/arch/sh/include/asm/cacheflush.h +++ b/arch/sh/include/asm/cacheflush.h @@ -49,7 +49,7 @@ static inline void flush_kernel_dcache_page(struct page *page) flush_dcache_page(page); } -#if defined(CONFIG_CPU_SH4) && !defined(CONFIG_CACHE_OFF) +#if (defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)) && !defined(CONFIG_CACHE_OFF) extern void copy_to_user_page(struct vm_area_struct *vma, struct page *page, unsigned long vaddr, void *dst, const void *src, unsigned long len); diff --git a/arch/sh/include/asm/page.h b/arch/sh/include/asm/page.h index a31ab40..5208b7b 100644 --- a/arch/sh/include/asm/page.h +++ b/arch/sh/include/asm/page.h @@ -56,21 +56,25 @@ pages_do_alias(unsigned long addr1, unsigned long addr2) return (addr1 ^ addr2) & shm_align_mask; } -extern void clear_page(void *to); + +#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) extern void copy_page(void *to, void *from); +struct page; +struct vm_area_struct; + #if !defined(CONFIG_CACHE_OFF) && defined(CONFIG_MMU) && \ (defined(CONFIG_CPU_SH5) || defined(CONFIG_CPU_SH4) || \ defined(CONFIG_SH7705_CACHE_32KB)) -struct page; -struct vm_area_struct; extern void clear_user_page(void *to, unsigned long address, struct page *page); extern void copy_user_page(void *to, void *from, unsigned long address, struct page *page); -#if defined(CONFIG_CPU_SH4) +#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB) extern void copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr, struct vm_area_struct *vma); #define __HAVE_ARCH_COPY_USER_HIGHPAGE +extern void clear_user_highpage(struct page *page, unsigned long vaddr); +#define clear_user_highpage clear_user_highpage #endif #else #define clear_user_page(page, vaddr, pg) clear_page(page) diff --git a/arch/sh/include/asm/pgtable.h b/arch/sh/include/asm/pgtable.h index d9f68f9..bef3ab7 100644 --- a/arch/sh/include/asm/pgtable.h +++ b/arch/sh/include/asm/pgtable.h @@ -141,7 +141,8 @@ extern void paging_init(void); extern void page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd); -#if !defined(CONFIG_CACHE_OFF) && defined(CONFIG_CPU_SH4) && defined(CONFIG_MMU) +#if !defined(CONFIG_CACHE_OFF) && (defined(CONFIG_CPU_SH4) || \ + defined(CONFIG_SH7705_CACHE_32KB)) && defined(CONFIG_MMU) extern void kmap_coherent_init(void); #else #define kmap_coherent_init() do { } while (0) diff --git a/arch/sh/kernel/sh_ksyms_32.c b/arch/sh/kernel/sh_ksyms_32.c index cec6108..8dbe26b 100644 --- a/arch/sh/kernel/sh_ksyms_32.c +++ b/arch/sh/kernel/sh_ksyms_32.c @@ -101,11 +101,6 @@ EXPORT_SYMBOL(flush_cache_range); EXPORT_SYMBOL(flush_dcache_page); #endif -#if !defined(CONFIG_CACHE_OFF) && defined(CONFIG_MMU) && \ - (defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)) -EXPORT_SYMBOL(clear_user_page); -#endif - #ifdef CONFIG_MCOUNT DECLARE_EXPORT(mcount); #endif @@ -114,7 +109,6 @@ EXPORT_SYMBOL(csum_partial_copy_generic); #ifdef CONFIG_IPV6 EXPORT_SYMBOL(csum_ipv6_magic); #endif -EXPORT_SYMBOL(clear_page); EXPORT_SYMBOL(copy_page); EXPORT_SYMBOL(__clear_user); EXPORT_SYMBOL(_ebss); diff --git a/arch/sh/lib/clear_page.S b/arch/sh/lib/clear_page.S index 8342bfb..bee9817 100644 --- a/arch/sh/lib/clear_page.S +++ b/arch/sh/lib/clear_page.S @@ -8,52 +8,6 @@ #include #include -/* - * clear_page - * @to: P1 address - * - * void clear_page(void *to) - */ - -/* - * r0 --- scratch - * r4 --- to - * r5 --- to + PAGE_SIZE - */ -ENTRY(clear_page) - mov r4,r5 - mov.l .Llimit,r0 - add r0,r5 - mov #0,r0 - ! -1: -#if defined(CONFIG_CPU_SH4) - movca.l r0,@r4 - mov r4,r1 -#else - mov.l r0,@r4 -#endif - add #32,r4 - mov.l r0,@-r4 - mov.l r0,@-r4 - mov.l r0,@-r4 - mov.l r0,@-r4 - mov.l r0,@-r4 - mov.l r0,@-r4 - mov.l r0,@-r4 -#if defined(CONFIG_CPU_SH4) - ocbwb @r1 -#endif - cmp/eq r5,r4 - bf/s 1b - add #28,r4 - ! - rts - nop - - .balign 4 -.Llimit: .long (PAGE_SIZE-28) - ENTRY(__clear_user) ! mov #0, r0 diff --git a/arch/sh/mm/Makefile_32 b/arch/sh/mm/Makefile_32 index 986a1e0..5c04bbb 100644 --- a/arch/sh/mm/Makefile_32 +++ b/arch/sh/mm/Makefile_32 @@ -31,7 +31,7 @@ tlb-$(CONFIG_CPU_HAS_PTEAEX) := tlb-pteaex.o obj-y += $(tlb-y) ifndef CONFIG_CACHE_OFF obj-$(CONFIG_CPU_SH4) += pg-sh4.o -obj-$(CONFIG_SH7705_CACHE_32KB) += pg-sh7705.o +obj-$(CONFIG_SH7705_CACHE_32KB) += pg-sh4.o endif endif diff --git a/arch/sh/mm/pg-nommu.c b/arch/sh/mm/pg-nommu.c index 91ed4e6..7e33b48 100644 --- a/arch/sh/mm/pg-nommu.c +++ b/arch/sh/mm/pg-nommu.c @@ -1,7 +1,7 @@ /* * arch/sh/mm/pg-nommu.c * - * clear_page()/copy_page() implementation for MMUless SH. + * copy_page()/__copy_user()/__clear_user() implementations for MMUless SH. * * Copyright (C) 2003 Paul Mundt * @@ -20,11 +20,6 @@ void copy_page(void *to, void *from) memcpy(to, from, PAGE_SIZE); } -void clear_page(void *to) -{ - memset(to, 0, PAGE_SIZE); -} - __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n) { memcpy(to, from, n); diff --git a/arch/sh/mm/pg-sh4.c b/arch/sh/mm/pg-sh4.c index f3c4b2a..4d93070 100644 --- a/arch/sh/mm/pg-sh4.c +++ b/arch/sh/mm/pg-sh4.c @@ -2,7 +2,7 @@ * arch/sh/mm/pg-sh4.c * * Copyright (C) 1999, 2000, 2002 Niibe Yutaka - * Copyright (C) 2002 - 2007 Paul Mundt + * Copyright (C) 2002 - 2009 Paul Mundt * * Released under the terms of the GNU GPL v2.0. */ @@ -58,20 +58,6 @@ static inline void kunmap_coherent(struct page *page) preempt_check_resched(); } -/* - * clear_user_page - * @to: P1 address - * @address: U0 address to be mapped - * @page: page (virt_to_page(to)) - */ -void clear_user_page(void *to, unsigned long address, struct page *page) -{ - clear_page(to); - - if (pages_do_alias((unsigned long)to, address & PAGE_MASK)) - __flush_wback_region(to, PAGE_SIZE); -} - void copy_to_user_page(struct vm_area_struct *vma, struct page *page, unsigned long vaddr, void *dst, const void *src, unsigned long len) @@ -128,3 +114,16 @@ void copy_user_highpage(struct page *to, struct page *from, smp_wmb(); } EXPORT_SYMBOL(copy_user_highpage); + +void clear_user_highpage(struct page *page, unsigned long vaddr) +{ + void *kaddr = kmap_atomic(page, KM_USER0); + + clear_page(kaddr); + + if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK)) + __flush_wback_region(kaddr, PAGE_SIZE); + + kunmap_atomic(kaddr, KM_USER0); +} +EXPORT_SYMBOL(clear_user_highpage); diff --git a/arch/sh/mm/pg-sh7705.c b/arch/sh/mm/pg-sh7705.c deleted file mode 100644 index 684891b..0000000 --- a/arch/sh/mm/pg-sh7705.c +++ /dev/null @@ -1,102 +0,0 @@ -/* - * arch/sh/mm/pg-sh7705.c - * - * Copyright (C) 1999, 2000 Niibe Yutaka - * Copyright (C) 2004 Alex Song - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -static void __flush_purge_virtual_region(void *p1, void *virt, int size) -{ - unsigned long v; - unsigned long begin, end; - unsigned long p1_begin; - - - begin = L1_CACHE_ALIGN((unsigned long)virt); - end = L1_CACHE_ALIGN((unsigned long)virt + size); - - p1_begin = (unsigned long)p1 & ~(L1_CACHE_BYTES - 1); - - /* do this the slow way as we may not have TLB entries - * for virt yet. */ - for (v = begin; v < end; v += L1_CACHE_BYTES) { - unsigned long p; - unsigned long ways, addr; - - p = __pa(p1_begin); - - ways = current_cpu_data.dcache.ways; - addr = CACHE_OC_ADDRESS_ARRAY; - - do { - unsigned long data; - - addr |= (v & current_cpu_data.dcache.entry_mask); - - data = ctrl_inl(addr); - if ((data & CACHE_PHYSADDR_MASK) == - (p & CACHE_PHYSADDR_MASK)) { - data &= ~(SH_CACHE_UPDATED|SH_CACHE_VALID); - ctrl_outl(data, addr); - } - - addr += current_cpu_data.dcache.way_incr; - } while (--ways); - - p1_begin += L1_CACHE_BYTES; - } -} - -/* - * clear_user_page - * @to: P1 address - * @address: U0 address to be mapped - */ -void clear_user_page(void *to, unsigned long address, struct page *pg) -{ - if (pages_do_alias(address, (unsigned long)to)) - __flush_purge_virtual_region(to, - (void *)(address & 0xfffff000), - PAGE_SIZE); - - clear_page(to); - __flush_wback_region(to, PAGE_SIZE); -} - -/* - * copy_user_page - * @to: P1 address - * @from: P1 address - * @address: U0 address to be mapped - */ -void copy_user_page(void *to, void *from, unsigned long address, struct page *pg) -{ - if (pages_do_alias(address, (unsigned long)to)) - __flush_purge_virtual_region(to, - (void *)(address & 0xfffff000), - PAGE_SIZE); - - copy_page(to, from); - __flush_wback_region(to, PAGE_SIZE); -} -- cgit v1.1 From 221c007b028ebf663ebee4fc90483909547d92a7 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Mon, 27 Jul 2009 20:55:46 +0900 Subject: sh: Rename arch/sh/lib/clear_page.S -> __clear_user.S. Now that this only contains the __clear_user() function, rename it accordingly. Signed-off-by: Paul Mundt --- arch/sh/lib/Makefile | 2 +- arch/sh/lib/__clear_user.S | 108 +++++++++++++++++++++++++++++++++++++++++++++ arch/sh/lib/clear_page.S | 108 --------------------------------------------- 3 files changed, 109 insertions(+), 109 deletions(-) create mode 100644 arch/sh/lib/__clear_user.S delete mode 100644 arch/sh/lib/clear_page.S (limited to 'arch') diff --git a/arch/sh/lib/Makefile b/arch/sh/lib/Makefile index c2b28d8..a969b47 100644 --- a/arch/sh/lib/Makefile +++ b/arch/sh/lib/Makefile @@ -23,7 +23,7 @@ obj-y += io.o memcpy-y := memcpy.o memcpy-$(CONFIG_CPU_SH4) := memcpy-sh4.o -lib-$(CONFIG_MMU) += copy_page.o clear_page.o +lib-$(CONFIG_MMU) += copy_page.o __clear_user.o lib-$(CONFIG_MCOUNT) += mcount.o lib-y += $(memcpy-y) $(udivsi3-y) diff --git a/arch/sh/lib/__clear_user.S b/arch/sh/lib/__clear_user.S new file mode 100644 index 0000000..bee9817 --- /dev/null +++ b/arch/sh/lib/__clear_user.S @@ -0,0 +1,108 @@ +/* + * __clear_user_page, __clear_user, clear_page implementation of SuperH + * + * Copyright (C) 2001 Kaz Kojima + * Copyright (C) 2001, 2002 Niibe Yutaka + * Copyright (C) 2006 Paul Mundt + */ +#include +#include + +ENTRY(__clear_user) + ! + mov #0, r0 + mov #0xe0, r1 ! 0xffffffe0 + ! + ! r4..(r4+31)&~32 -------- not aligned [ Area 0 ] + ! (r4+31)&~32..(r4+r5)&~32 -------- aligned [ Area 1 ] + ! (r4+r5)&~32..r4+r5 -------- not aligned [ Area 2 ] + ! + ! Clear area 0 + mov r4, r2 + ! + tst r1, r5 ! length < 32 + bt .Larea2 ! skip to remainder + ! + add #31, r2 + and r1, r2 + cmp/eq r4, r2 + bt .Larea1 + mov r2, r3 + sub r4, r3 + mov r3, r7 + mov r4, r2 + ! +.L0: dt r3 +0: mov.b r0, @r2 + bf/s .L0 + add #1, r2 + ! + sub r7, r5 + mov r2, r4 +.Larea1: + mov r4, r3 + add r5, r3 + and r1, r3 + cmp/hi r2, r3 + bf .Larea2 + ! + ! Clear area 1 +#if defined(CONFIG_CPU_SH4) +1: movca.l r0, @r2 +#else +1: mov.l r0, @r2 +#endif + add #4, r2 +2: mov.l r0, @r2 + add #4, r2 +3: mov.l r0, @r2 + add #4, r2 +4: mov.l r0, @r2 + add #4, r2 +5: mov.l r0, @r2 + add #4, r2 +6: mov.l r0, @r2 + add #4, r2 +7: mov.l r0, @r2 + add #4, r2 +8: mov.l r0, @r2 + add #4, r2 + cmp/hi r2, r3 + bt/s 1b + nop + ! + ! Clear area 2 +.Larea2: + mov r4, r3 + add r5, r3 + cmp/hs r3, r2 + bt/s .Ldone + sub r2, r3 +.L2: dt r3 +9: mov.b r0, @r2 + bf/s .L2 + add #1, r2 + ! +.Ldone: rts + mov #0, r0 ! return 0 as normal return + + ! return the number of bytes remained +.Lbad_clear_user: + mov r4, r0 + add r5, r0 + rts + sub r2, r0 + +.section __ex_table,"a" + .align 2 + .long 0b, .Lbad_clear_user + .long 1b, .Lbad_clear_user + .long 2b, .Lbad_clear_user + .long 3b, .Lbad_clear_user + .long 4b, .Lbad_clear_user + .long 5b, .Lbad_clear_user + .long 6b, .Lbad_clear_user + .long 7b, .Lbad_clear_user + .long 8b, .Lbad_clear_user + .long 9b, .Lbad_clear_user +.previous diff --git a/arch/sh/lib/clear_page.S b/arch/sh/lib/clear_page.S deleted file mode 100644 index bee9817..0000000 --- a/arch/sh/lib/clear_page.S +++ /dev/null @@ -1,108 +0,0 @@ -/* - * __clear_user_page, __clear_user, clear_page implementation of SuperH - * - * Copyright (C) 2001 Kaz Kojima - * Copyright (C) 2001, 2002 Niibe Yutaka - * Copyright (C) 2006 Paul Mundt - */ -#include -#include - -ENTRY(__clear_user) - ! - mov #0, r0 - mov #0xe0, r1 ! 0xffffffe0 - ! - ! r4..(r4+31)&~32 -------- not aligned [ Area 0 ] - ! (r4+31)&~32..(r4+r5)&~32 -------- aligned [ Area 1 ] - ! (r4+r5)&~32..r4+r5 -------- not aligned [ Area 2 ] - ! - ! Clear area 0 - mov r4, r2 - ! - tst r1, r5 ! length < 32 - bt .Larea2 ! skip to remainder - ! - add #31, r2 - and r1, r2 - cmp/eq r4, r2 - bt .Larea1 - mov r2, r3 - sub r4, r3 - mov r3, r7 - mov r4, r2 - ! -.L0: dt r3 -0: mov.b r0, @r2 - bf/s .L0 - add #1, r2 - ! - sub r7, r5 - mov r2, r4 -.Larea1: - mov r4, r3 - add r5, r3 - and r1, r3 - cmp/hi r2, r3 - bf .Larea2 - ! - ! Clear area 1 -#if defined(CONFIG_CPU_SH4) -1: movca.l r0, @r2 -#else -1: mov.l r0, @r2 -#endif - add #4, r2 -2: mov.l r0, @r2 - add #4, r2 -3: mov.l r0, @r2 - add #4, r2 -4: mov.l r0, @r2 - add #4, r2 -5: mov.l r0, @r2 - add #4, r2 -6: mov.l r0, @r2 - add #4, r2 -7: mov.l r0, @r2 - add #4, r2 -8: mov.l r0, @r2 - add #4, r2 - cmp/hi r2, r3 - bt/s 1b - nop - ! - ! Clear area 2 -.Larea2: - mov r4, r3 - add r5, r3 - cmp/hs r3, r2 - bt/s .Ldone - sub r2, r3 -.L2: dt r3 -9: mov.b r0, @r2 - bf/s .L2 - add #1, r2 - ! -.Ldone: rts - mov #0, r0 ! return 0 as normal return - - ! return the number of bytes remained -.Lbad_clear_user: - mov r4, r0 - add r5, r0 - rts - sub r2, r0 - -.section __ex_table,"a" - .align 2 - .long 0b, .Lbad_clear_user - .long 1b, .Lbad_clear_user - .long 2b, .Lbad_clear_user - .long 3b, .Lbad_clear_user - .long 4b, .Lbad_clear_user - .long 5b, .Lbad_clear_user - .long 6b, .Lbad_clear_user - .long 7b, .Lbad_clear_user - .long 8b, .Lbad_clear_user - .long 9b, .Lbad_clear_user -.previous -- cgit v1.1 From 0dfae7d5a21901b28ec0452d71be64adf5ea323e Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Mon, 27 Jul 2009 21:30:17 +0900 Subject: sh: Use the now generic SH-4 clear/copy page ops for all MMU platforms. Now that the SH-4 page clear/copy ops are generic, they can be used for all platforms with CONFIG_MMU=y. SH-5 remains the odd one out, but it too will gradually be converted over to using this interface. SH-3 platforms which do not contain aliases will see no impact from this change, while aliasing SH-3 platforms will get the same interface as SH-4. Signed-off-by: Paul Mundt --- arch/sh/include/asm/cacheflush.h | 15 ----- arch/sh/include/asm/page.h | 11 ++-- arch/sh/include/asm/pgtable.h | 3 +- arch/sh/mm/Makefile_32 | 6 +- arch/sh/mm/cache-sh5.c | 17 +++++ arch/sh/mm/pg-mmu.c | 136 +++++++++++++++++++++++++++++++++++++++ arch/sh/mm/pg-sh4.c | 129 ------------------------------------- 7 files changed, 161 insertions(+), 156 deletions(-) create mode 100644 arch/sh/mm/pg-mmu.c delete mode 100644 arch/sh/mm/pg-sh4.c (limited to 'arch') diff --git a/arch/sh/include/asm/cacheflush.h b/arch/sh/include/asm/cacheflush.h index 4e36011..4c85d55 100644 --- a/arch/sh/include/asm/cacheflush.h +++ b/arch/sh/include/asm/cacheflush.h @@ -49,7 +49,6 @@ static inline void flush_kernel_dcache_page(struct page *page) flush_dcache_page(page); } -#if (defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)) && !defined(CONFIG_CACHE_OFF) extern void copy_to_user_page(struct vm_area_struct *vma, struct page *page, unsigned long vaddr, void *dst, const void *src, unsigned long len); @@ -57,20 +56,6 @@ extern void copy_to_user_page(struct vm_area_struct *vma, extern void copy_from_user_page(struct vm_area_struct *vma, struct page *page, unsigned long vaddr, void *dst, const void *src, unsigned long len); -#else -#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ - do { \ - flush_cache_page(vma, vaddr, page_to_pfn(page));\ - memcpy(dst, src, len); \ - flush_icache_user_range(vma, page, vaddr, len); \ - } while (0) - -#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ - do { \ - flush_cache_page(vma, vaddr, page_to_pfn(page));\ - memcpy(dst, src, len); \ - } while (0) -#endif #define flush_cache_vmap(start, end) flush_cache_all() #define flush_cache_vunmap(start, end) flush_cache_all() diff --git a/arch/sh/include/asm/page.h b/arch/sh/include/asm/page.h index 5208b7b..847eeab 100644 --- a/arch/sh/include/asm/page.h +++ b/arch/sh/include/asm/page.h @@ -63,22 +63,23 @@ extern void copy_page(void *to, void *from); struct page; struct vm_area_struct; -#if !defined(CONFIG_CACHE_OFF) && defined(CONFIG_MMU) && \ - (defined(CONFIG_CPU_SH5) || defined(CONFIG_CPU_SH4) || \ - defined(CONFIG_SH7705_CACHE_32KB)) +#if defined(CONFIG_CPU_SH5) extern void clear_user_page(void *to, unsigned long address, struct page *page); extern void copy_user_page(void *to, void *from, unsigned long address, struct page *page); -#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB) + +#elif defined(CONFIG_MMU) extern void copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr, struct vm_area_struct *vma); #define __HAVE_ARCH_COPY_USER_HIGHPAGE extern void clear_user_highpage(struct page *page, unsigned long vaddr); #define clear_user_highpage clear_user_highpage -#endif + #else + #define clear_user_page(page, vaddr, pg) clear_page(page) #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) + #endif /* diff --git a/arch/sh/include/asm/pgtable.h b/arch/sh/include/asm/pgtable.h index bef3ab7..ba23332 100644 --- a/arch/sh/include/asm/pgtable.h +++ b/arch/sh/include/asm/pgtable.h @@ -141,8 +141,7 @@ extern void paging_init(void); extern void page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd); -#if !defined(CONFIG_CACHE_OFF) && (defined(CONFIG_CPU_SH4) || \ - defined(CONFIG_SH7705_CACHE_32KB)) && defined(CONFIG_MMU) +#if defined(CONFIG_MMU) && !defined(CONFIG_CPU_SH5) extern void kmap_coherent_init(void); #else #define kmap_coherent_init() do { } while (0) diff --git a/arch/sh/mm/Makefile_32 b/arch/sh/mm/Makefile_32 index 5c04bbb..62e2807 100644 --- a/arch/sh/mm/Makefile_32 +++ b/arch/sh/mm/Makefile_32 @@ -15,7 +15,7 @@ endif obj-y += $(cache-y) mmu-y := tlb-nommu.o pg-nommu.o -mmu-$(CONFIG_MMU) := fault_32.o tlbflush_32.o ioremap_32.o +mmu-$(CONFIG_MMU) := fault_32.o tlbflush_32.o ioremap_32.o pg-mmu.o obj-y += $(mmu-y) obj-$(CONFIG_DEBUG_FS) += asids-debugfs.o @@ -29,10 +29,6 @@ tlb-$(CONFIG_CPU_SH3) := tlb-sh3.o tlb-$(CONFIG_CPU_SH4) := tlb-sh4.o tlb-$(CONFIG_CPU_HAS_PTEAEX) := tlb-pteaex.o obj-y += $(tlb-y) -ifndef CONFIG_CACHE_OFF -obj-$(CONFIG_CPU_SH4) += pg-sh4.o -obj-$(CONFIG_SH7705_CACHE_32KB) += pg-sh4.o -endif endif obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o diff --git a/arch/sh/mm/cache-sh5.c b/arch/sh/mm/cache-sh5.c index 8676209..3e2d732 100644 --- a/arch/sh/mm/cache-sh5.c +++ b/arch/sh/mm/cache-sh5.c @@ -831,4 +831,21 @@ void clear_user_page(void *to, unsigned long address, struct page *page) else sh64_clear_user_page_coloured(to, address); } + +void copy_to_user_page(struct vm_area_struct *vma, struct page *page, + unsigned long vaddr, void *dst, const void *src, + unsigned long len) +{ + flush_cache_page(vma, vaddr, page_to_pfn(page)); + memcpy(dst, src, len); + flush_icache_user_range(vma, page, vaddr, len); +} + +void copy_from_user_page(struct vm_area_struct *vma, struct page *page, + unsigned long vaddr, void *dst, const void *src, + unsigned long len) +{ + flush_cache_page(vma, vaddr, page_to_pfn(page)); + memcpy(dst, src, len); +} #endif diff --git a/arch/sh/mm/pg-mmu.c b/arch/sh/mm/pg-mmu.c new file mode 100644 index 0000000..356d2cd --- /dev/null +++ b/arch/sh/mm/pg-mmu.c @@ -0,0 +1,136 @@ +/* + * arch/sh/mm/pg-mmu.c + * + * Copyright (C) 1999, 2000, 2002 Niibe Yutaka + * Copyright (C) 2002 - 2009 Paul Mundt + * + * Released under the terms of the GNU GPL v2.0. + */ +#include +#include +#include +#include +#include +#include +#include +#include + +#define kmap_get_fixmap_pte(vaddr) \ + pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr)) + +static pte_t *kmap_coherent_pte; + +void __init kmap_coherent_init(void) +{ +#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB) + unsigned long vaddr; + + /* cache the first coherent kmap pte */ + vaddr = __fix_to_virt(FIX_CMAP_BEGIN); + kmap_coherent_pte = kmap_get_fixmap_pte(vaddr); +#endif +} + +static inline void *kmap_coherent(struct page *page, unsigned long addr) +{ + enum fixed_addresses idx; + unsigned long vaddr, flags; + pte_t pte; + + inc_preempt_count(); + + idx = (addr & current_cpu_data.dcache.alias_mask) >> PAGE_SHIFT; + vaddr = __fix_to_virt(FIX_CMAP_END - idx); + pte = mk_pte(page, PAGE_KERNEL); + + local_irq_save(flags); + flush_tlb_one(get_asid(), vaddr); + local_irq_restore(flags); + + update_mmu_cache(NULL, vaddr, pte); + + set_pte(kmap_coherent_pte - (FIX_CMAP_END - idx), pte); + + return (void *)vaddr; +} + +static inline void kunmap_coherent(struct page *page) +{ + dec_preempt_count(); + preempt_check_resched(); +} + +void copy_to_user_page(struct vm_area_struct *vma, struct page *page, + unsigned long vaddr, void *dst, const void *src, + unsigned long len) +{ + if (boot_cpu_data.dcache.n_aliases && page_mapped(page) && + !test_bit(PG_dcache_dirty, &page->flags)) { + void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); + memcpy(vto, src, len); + kunmap_coherent(vto); + } else { + memcpy(dst, src, len); + if (boot_cpu_data.dcache.n_aliases) + set_bit(PG_dcache_dirty, &page->flags); + } + + if (vma->vm_flags & VM_EXEC) + flush_cache_page(vma, vaddr, page_to_pfn(page)); +} + +void copy_from_user_page(struct vm_area_struct *vma, struct page *page, + unsigned long vaddr, void *dst, const void *src, + unsigned long len) +{ + if (boot_cpu_data.dcache.n_aliases && page_mapped(page) && + !test_bit(PG_dcache_dirty, &page->flags)) { + void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); + memcpy(dst, vfrom, len); + kunmap_coherent(vfrom); + } else { + memcpy(dst, src, len); + if (boot_cpu_data.dcache.n_aliases) + set_bit(PG_dcache_dirty, &page->flags); + } +} + +void copy_user_highpage(struct page *to, struct page *from, + unsigned long vaddr, struct vm_area_struct *vma) +{ + void *vfrom, *vto; + + vto = kmap_atomic(to, KM_USER1); + + if (boot_cpu_data.dcache.n_aliases && page_mapped(from) && + !test_bit(PG_dcache_dirty, &from->flags)) { + vfrom = kmap_coherent(from, vaddr); + copy_page(vto, vfrom); + kunmap_coherent(vfrom); + } else { + vfrom = kmap_atomic(from, KM_USER0); + copy_page(vto, vfrom); + kunmap_atomic(vfrom, KM_USER0); + } + + if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK)) + __flush_wback_region(vto, PAGE_SIZE); + + kunmap_atomic(vto, KM_USER1); + /* Make sure this page is cleared on other CPU's too before using it */ + smp_wmb(); +} +EXPORT_SYMBOL(copy_user_highpage); + +void clear_user_highpage(struct page *page, unsigned long vaddr) +{ + void *kaddr = kmap_atomic(page, KM_USER0); + + clear_page(kaddr); + + if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK)) + __flush_wback_region(kaddr, PAGE_SIZE); + + kunmap_atomic(kaddr, KM_USER0); +} +EXPORT_SYMBOL(clear_user_highpage); diff --git a/arch/sh/mm/pg-sh4.c b/arch/sh/mm/pg-sh4.c deleted file mode 100644 index 4d93070..0000000 --- a/arch/sh/mm/pg-sh4.c +++ /dev/null @@ -1,129 +0,0 @@ -/* - * arch/sh/mm/pg-sh4.c - * - * Copyright (C) 1999, 2000, 2002 Niibe Yutaka - * Copyright (C) 2002 - 2009 Paul Mundt - * - * Released under the terms of the GNU GPL v2.0. - */ -#include -#include -#include -#include -#include -#include -#include -#include - -#define kmap_get_fixmap_pte(vaddr) \ - pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr)) - -static pte_t *kmap_coherent_pte; - -void __init kmap_coherent_init(void) -{ - unsigned long vaddr; - - /* cache the first coherent kmap pte */ - vaddr = __fix_to_virt(FIX_CMAP_BEGIN); - kmap_coherent_pte = kmap_get_fixmap_pte(vaddr); -} - -static inline void *kmap_coherent(struct page *page, unsigned long addr) -{ - enum fixed_addresses idx; - unsigned long vaddr, flags; - pte_t pte; - - inc_preempt_count(); - - idx = (addr & current_cpu_data.dcache.alias_mask) >> PAGE_SHIFT; - vaddr = __fix_to_virt(FIX_CMAP_END - idx); - pte = mk_pte(page, PAGE_KERNEL); - - local_irq_save(flags); - flush_tlb_one(get_asid(), vaddr); - local_irq_restore(flags); - - update_mmu_cache(NULL, vaddr, pte); - - set_pte(kmap_coherent_pte - (FIX_CMAP_END - idx), pte); - - return (void *)vaddr; -} - -static inline void kunmap_coherent(struct page *page) -{ - dec_preempt_count(); - preempt_check_resched(); -} - -void copy_to_user_page(struct vm_area_struct *vma, struct page *page, - unsigned long vaddr, void *dst, const void *src, - unsigned long len) -{ - if (page_mapped(page) && !test_bit(PG_dcache_dirty, &page->flags)) { - void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); - memcpy(vto, src, len); - kunmap_coherent(vto); - } else { - memcpy(dst, src, len); - set_bit(PG_dcache_dirty, &page->flags); - } - - if (vma->vm_flags & VM_EXEC) - flush_cache_page(vma, vaddr, page_to_pfn(page)); -} - -void copy_from_user_page(struct vm_area_struct *vma, struct page *page, - unsigned long vaddr, void *dst, const void *src, - unsigned long len) -{ - if (page_mapped(page) && !test_bit(PG_dcache_dirty, &page->flags)) { - void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); - memcpy(dst, vfrom, len); - kunmap_coherent(vfrom); - } else { - memcpy(dst, src, len); - set_bit(PG_dcache_dirty, &page->flags); - } -} - -void copy_user_highpage(struct page *to, struct page *from, - unsigned long vaddr, struct vm_area_struct *vma) -{ - void *vfrom, *vto; - - vto = kmap_atomic(to, KM_USER1); - - if (page_mapped(from) && !test_bit(PG_dcache_dirty, &from->flags)) { - vfrom = kmap_coherent(from, vaddr); - copy_page(vto, vfrom); - kunmap_coherent(vfrom); - } else { - vfrom = kmap_atomic(from, KM_USER0); - copy_page(vto, vfrom); - kunmap_atomic(vfrom, KM_USER0); - } - - if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK)) - __flush_wback_region(vto, PAGE_SIZE); - - kunmap_atomic(vto, KM_USER1); - /* Make sure this page is cleared on other CPU's too before using it */ - smp_wmb(); -} -EXPORT_SYMBOL(copy_user_highpage); - -void clear_user_highpage(struct page *page, unsigned long vaddr) -{ - void *kaddr = kmap_atomic(page, KM_USER0); - - clear_page(kaddr); - - if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK)) - __flush_wback_region(kaddr, PAGE_SIZE); - - kunmap_atomic(kaddr, KM_USER0); -} -EXPORT_SYMBOL(clear_user_highpage); -- cgit v1.1 From 9cef7492696a416663b4edb953a4eade8517ebeb Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 29 Jul 2009 00:12:17 +0900 Subject: sh: update_mmu_cache() consolidation. This splits out a separate __update_cache()/__update_tlb() for update_mmu_cache() to wrap in to. This lets us share the common __update_cache() bits while keeping special __update_tlb() handling broken out. Signed-off-by: Paul Mundt --- arch/sh/include/asm/pgtable.h | 15 +++++++++++++-- arch/sh/mm/pg-mmu.c | 21 +++++++++++++++++++++ arch/sh/mm/tlb-nommu.c | 9 ++++++--- arch/sh/mm/tlb-pteaex.c | 13 ++++++------- arch/sh/mm/tlb-sh3.c | 29 ++++++----------------------- arch/sh/mm/tlb-sh4.c | 29 ++++++----------------------- arch/sh/mm/tlbflush_64.c | 25 +++++++++---------------- 7 files changed, 67 insertions(+), 74 deletions(-) (limited to 'arch') diff --git a/arch/sh/include/asm/pgtable.h b/arch/sh/include/asm/pgtable.h index ba23332..43ef3e9 100644 --- a/arch/sh/include/asm/pgtable.h +++ b/arch/sh/include/asm/pgtable.h @@ -134,8 +134,19 @@ typedef pte_t *pte_addr_t; #define pgtable_cache_init() do { } while (0) struct vm_area_struct; -extern void update_mmu_cache(struct vm_area_struct * vma, - unsigned long address, pte_t pte); + +extern void __update_cache(struct vm_area_struct *vma, + unsigned long address, pte_t pte); +extern void __update_tlb(struct vm_area_struct *vma, + unsigned long address, pte_t pte); + +static inline void +update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) +{ + __update_cache(vma, address, pte); + __update_tlb(vma, address, pte); +} + extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; extern void paging_init(void); extern void page_table_range_init(unsigned long start, unsigned long end, diff --git a/arch/sh/mm/pg-mmu.c b/arch/sh/mm/pg-mmu.c index 356d2cd..8602f68 100644 --- a/arch/sh/mm/pg-mmu.c +++ b/arch/sh/mm/pg-mmu.c @@ -134,3 +134,24 @@ void clear_user_highpage(struct page *page, unsigned long vaddr) kunmap_atomic(kaddr, KM_USER0); } EXPORT_SYMBOL(clear_user_highpage); + +void __update_cache(struct vm_area_struct *vma, + unsigned long address, pte_t pte) +{ + struct page *page; + unsigned long pfn = pte_pfn(pte); + + if (!boot_cpu_data.dcache.n_aliases) + return; + + page = pfn_to_page(pfn); + if (pfn_valid(pfn) && page_mapping(page)) { + int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags); + if (dirty) { + unsigned long addr = (unsigned long)page_address(page); + + if (pages_do_alias(addr, address & PAGE_MASK)) + __flush_wback_region((void *)addr, PAGE_SIZE); + } + } +} diff --git a/arch/sh/mm/tlb-nommu.c b/arch/sh/mm/tlb-nommu.c index 71c742b..0ef5429 100644 --- a/arch/sh/mm/tlb-nommu.c +++ b/arch/sh/mm/tlb-nommu.c @@ -46,10 +46,13 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) BUG(); } -void update_mmu_cache(struct vm_area_struct * vma, - unsigned long address, pte_t pte) +void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) +{ +} + +void __update_cache(struct vm_area_struct *vma, + unsigned long address, pte_t pte) { - BUG(); } void __init page_table_range_init(unsigned long start, unsigned long end, diff --git a/arch/sh/mm/tlb-pteaex.c b/arch/sh/mm/tlb-pteaex.c index c39b773..9aabd31 100644 --- a/arch/sh/mm/tlb-pteaex.c +++ b/arch/sh/mm/tlb-pteaex.c @@ -16,15 +16,14 @@ #include #include -void update_mmu_cache(struct vm_area_struct * vma, - unsigned long address, pte_t pte) +void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) { - unsigned long flags; - unsigned long pteval; - unsigned long vpn; + unsigned long flags, pteval, vpn; - /* Ptrace may call this routine. */ - if (vma && current->active_mm != vma->vm_mm) + /* + * Handle debugger faulting in for debugee. + */ + if (current->active_mm != vma->vm_mm) return; local_irq_save(flags); diff --git a/arch/sh/mm/tlb-sh3.c b/arch/sh/mm/tlb-sh3.c index 9b8459c..425f1f2 100644 --- a/arch/sh/mm/tlb-sh3.c +++ b/arch/sh/mm/tlb-sh3.c @@ -27,32 +27,16 @@ #include #include -void update_mmu_cache(struct vm_area_struct * vma, - unsigned long address, pte_t pte) +void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) { - unsigned long flags; - unsigned long pteval; - unsigned long vpn; - unsigned long pfn = pte_pfn(pte); - struct page *page; + unsigned long flags, pteval, vpn; - /* Ptrace may call this routine. */ - if (vma && current->active_mm != vma->vm_mm) + /* + * Handle debugger faulting in for debugee. + */ + if (current->active_mm != vma->vm_mm) return; - page = pfn_to_page(pfn); - if (pfn_valid(pfn) && page_mapping(page)) { -#if defined(CONFIG_SH7705_CACHE_32KB) - int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags); - if (dirty) { - unsigned long addr = (unsigned long)page_address(page); - - if (pages_do_alias(addr, address & PAGE_MASK)) - __flush_wback_region((void *)addr, PAGE_SIZE); - } -#endif - } - local_irq_save(flags); /* Set PTEH register */ @@ -93,4 +77,3 @@ void local_flush_tlb_one(unsigned long asid, unsigned long page) for (i = 0; i < ways; i++) ctrl_outl(data, addr + (i << 8)); } - diff --git a/arch/sh/mm/tlb-sh4.c b/arch/sh/mm/tlb-sh4.c index cf50082..81199f1 100644 --- a/arch/sh/mm/tlb-sh4.c +++ b/arch/sh/mm/tlb-sh4.c @@ -15,33 +15,16 @@ #include #include -void update_mmu_cache(struct vm_area_struct * vma, - unsigned long address, pte_t pte) +void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) { - unsigned long flags; - unsigned long pteval; - unsigned long vpn; - unsigned long pfn = pte_pfn(pte); - struct page *page; + unsigned long flags, pteval, vpn; - /* Ptrace may call this routine. */ - if (vma && current->active_mm != vma->vm_mm) + /* + * Handle debugger faulting in for debugee. + */ + if (current->active_mm != vma->vm_mm) return; - page = pfn_to_page(pfn); - if (pfn_valid(pfn) && page_mapping(page)) { -#ifndef CONFIG_SMP - int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags); - if (dirty) { - - unsigned long addr = (unsigned long)page_address(page); - - if (pages_do_alias(addr, address & PAGE_MASK)) - __flush_wback_region((void *)addr, PAGE_SIZE); - } -#endif - } - local_irq_save(flags); /* Set PTEH register */ diff --git a/arch/sh/mm/tlbflush_64.c b/arch/sh/mm/tlbflush_64.c index 3ce40ea..f2e44e9 100644 --- a/arch/sh/mm/tlbflush_64.c +++ b/arch/sh/mm/tlbflush_64.c @@ -329,22 +329,6 @@ do_sigbus: goto no_context; } -void update_mmu_cache(struct vm_area_struct * vma, - unsigned long address, pte_t pte) -{ - /* - * This appears to get called once for every pte entry that gets - * established => I don't think it's efficient to try refilling the - * TLBs with the pages - some may not get accessed even. Also, for - * executable pages, it is impossible to determine reliably here which - * TLB they should be mapped into (or both even). - * - * So, just do nothing here and handle faults on demand. In the - * TLBMISS handling case, the refill is now done anyway after the pte - * has been fixed up, so that deals with most useful cases. - */ -} - void local_flush_tlb_one(unsigned long asid, unsigned long page) { unsigned long long match, pteh=0, lpage; @@ -482,3 +466,12 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) /* FIXME: Optimize this later.. */ flush_tlb_all(); } + +void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) +{ +} + +void __update_cache(struct vm_area_struct *vma, + unsigned long address, pte_t pte) +{ +} -- cgit v1.1 From 3ed6e129390fb872c3b7e05a232e5d380fbdfb48 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 29 Jul 2009 22:06:58 +0900 Subject: sh: Handle a NULL vma in __update_tlb() for the fast-path. The TLB miss fast-path presently calls in to update_mmu_cache() to set up the entry, and does so with a NULL vma. Check for vma validity in the __update_tlb() ptrace checks. Signed-off-by: Paul Mundt --- arch/sh/mm/tlb-pteaex.c | 2 +- arch/sh/mm/tlb-sh3.c | 2 +- arch/sh/mm/tlb-sh4.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/sh/mm/tlb-pteaex.c b/arch/sh/mm/tlb-pteaex.c index 9aabd31..409b7c2 100644 --- a/arch/sh/mm/tlb-pteaex.c +++ b/arch/sh/mm/tlb-pteaex.c @@ -23,7 +23,7 @@ void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) /* * Handle debugger faulting in for debugee. */ - if (current->active_mm != vma->vm_mm) + if (vma && current->active_mm != vma->vm_mm) return; local_irq_save(flags); diff --git a/arch/sh/mm/tlb-sh3.c b/arch/sh/mm/tlb-sh3.c index 425f1f2..ace8e6d 100644 --- a/arch/sh/mm/tlb-sh3.c +++ b/arch/sh/mm/tlb-sh3.c @@ -34,7 +34,7 @@ void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) /* * Handle debugger faulting in for debugee. */ - if (current->active_mm != vma->vm_mm) + if (vma && current->active_mm != vma->vm_mm) return; local_irq_save(flags); diff --git a/arch/sh/mm/tlb-sh4.c b/arch/sh/mm/tlb-sh4.c index 81199f1..7d3c63e 100644 --- a/arch/sh/mm/tlb-sh4.c +++ b/arch/sh/mm/tlb-sh4.c @@ -22,7 +22,7 @@ void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) /* * Handle debugger faulting in for debugee. */ - if (current->active_mm != vma->vm_mm) + if (vma && current->active_mm != vma->vm_mm) return; local_irq_save(flags); -- cgit v1.1 From 82b242214b6f5b96eb9b76452ac6e2b67dd81abd Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 29 Jul 2009 22:43:58 +0900 Subject: Revert "sh: Bump the earlytimer bits back to time_init()." This reverts commit 1d29ebebcb951ab6b04d22807cafb24b893310a2. Bumping up the earlytimer initialization causes IRQs to be enabled too early, which blows up lockdep: ... NR_IRQS:256 nr_irqs:256 ------------[ cut here ]------------ Badness at kernel/lockdep.c:2128 Pid : 0, Comm: swapper CPU : 0 Not tainted (2.6.31-rc3-00205-g3ed6e12-dirty #2443) PC is at trace_hardirqs_on_caller+0x48/0x10c PR is at trace_hardirqs_on_caller+0x3c/0x10c ... Revert it back to late_time_init time, which fixes up lockdep. Signed-off-by: Paul Mundt --- arch/sh/kernel/time.c | 27 ++++++++++++++++----------- 1 file changed, 16 insertions(+), 11 deletions(-) (limited to 'arch') diff --git a/arch/sh/kernel/time.c b/arch/sh/kernel/time.c index d2424b0..7f95f47 100644 --- a/arch/sh/kernel/time.c +++ b/arch/sh/kernel/time.c @@ -92,6 +92,21 @@ module_init(rtc_generic_init); void (*board_time_init)(void); +static void __init sh_late_time_init(void) +{ + /* + * Make sure all compiled-in early timers register themselves. + * + * Run probe() for two "earlytimer" devices, these will be the + * clockevents and clocksource devices respectively. In the event + * that only a clockevents device is available, we -ENODEV on the + * clocksource and the jiffies clocksource is used transparently + * instead. No error handling is necessary here. + */ + early_platform_driver_register_all("earlytimer"); + early_platform_driver_probe("earlytimer", 2, 0); +} + void __init time_init(void) { if (board_time_init) @@ -108,15 +123,5 @@ void __init time_init(void) local_timer_setup(smp_processor_id()); #endif - /* - * Make sure all compiled-in early timers register themselves. - * - * Run probe() for two "earlytimer" devices, these will be the - * clockevents and clocksource devices respectively. In the event - * that only a clockevents device is available, we -ENODEV on the - * clocksource and the jiffies clocksource is used transparently - * instead. No error handling is necessary here. - */ - early_platform_driver_register_all("earlytimer"); - early_platform_driver_probe("earlytimer", 2, 0); + late_time_init = sh_late_time_init; } -- cgit v1.1 From fd78a76aefb5bf28a11d6960d29e03a11db62320 Mon Sep 17 00:00:00 2001 From: Stuart Menefy Date: Wed, 29 Jul 2009 23:01:24 +0900 Subject: sh: Rework irqflags tracing to fix up CONFIG_PROVE_LOCKING. This cleans up the irqflags tracing code quite a bit and ties it in to various missing callsites that caused an imbalance when CONFIG_PROVE_LOCKING was enabled. Previously this was catching on: 987 #ifdef CONFIG_PROVE_LOCKING 988 DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled); 989 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled); 990 #endif 991 retval = -EAGAIN; with hardirqs being doubly enabled, and subsequently bailing out with the following call trace: Call trace: [<88035224>] __lock_acquire+0x616/0x6a6 [<88015a8c>] do_fork+0xf8/0x2b0 [<880331ec>] trace_hardirqs_on_caller+0xd4/0x114 [<88241074>] _spin_unlock_irq+0x20/0x64 [<88035224>] __lock_acquire+0x616/0x6a6 [<8800386c>] kernel_thread+0x48/0x70 [<88024ecc>] ____call_usermodehelper+0x0/0x110 [<88024ecc>] ____call_usermodehelper+0x0/0x110 [<88003894>] kernel_thread_helper+0x0/0x14 [<88024bac>] __call_usermodehelper+0x38/0x70 [<88025dc0>] worker_thread+0x150/0x274 [<88035b9c>] lock_release+0x0/0x198 [<88024b74>] __call_usermodehelper+0x0/0x70 [<88028cf0>] autoremove_wake_function+0x0/0x30 [<88028bf2>] kthread+0x3e/0x70 [<88025c70>] worker_thread+0x0/0x274 [<8800389c>] kernel_thread_helper+0x8/0x14 [<88028bb4>] kthread+0x0/0x70 [<88003894>] kernel_thread_helper+0x0/0x14 Reported-by: Nobuhiro Iwamatsu Signed-off-by: Stuart Menefy Signed-off-by: Matt Fleming Signed-off-by: Paul Mundt --- arch/sh/Kconfig.debug | 2 +- arch/sh/include/asm/entry-macros.S | 72 ++++++++++++++++++++++++++++++++++++++ arch/sh/kernel/entry-common.S | 63 +++++++++------------------------ arch/sh/kernel/io_trapped.c | 7 ++-- 4 files changed, 93 insertions(+), 51 deletions(-) (limited to 'arch') diff --git a/arch/sh/Kconfig.debug b/arch/sh/Kconfig.debug index b440fd9..a6dce41 100644 --- a/arch/sh/Kconfig.debug +++ b/arch/sh/Kconfig.debug @@ -38,7 +38,7 @@ config EARLY_SCIF_CONSOLE_PORT default "0xffe00000" if CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7763 || \ CPU_SUBTYPE_SH7722 || CPU_SUBTYPE_SH7366 || \ CPU_SUBTYPE_SH7343 - default "0xffea0000" if CPU_SUBTYPE_SH7785 + default "0xffeb0000" if CPU_SUBTYPE_SH7785 default "0xffeb0000" if CPU_SUBTYPE_SH7786 default "0xfffe8000" if CPU_SUBTYPE_SH7203 default "0xfffe9800" if CPU_SUBTYPE_SH7206 || CPU_SUBTYPE_SH7263 diff --git a/arch/sh/include/asm/entry-macros.S b/arch/sh/include/asm/entry-macros.S index 3a4752a..1bdd938 100644 --- a/arch/sh/include/asm/entry-macros.S +++ b/arch/sh/include/asm/entry-macros.S @@ -31,6 +31,78 @@ #endif .endm +#ifdef CONFIG_TRACE_IRQFLAGS + + .macro TRACE_IRQS_ON + mov.l r0, @-r15 + mov.l r1, @-r15 + mov.l r2, @-r15 + mov.l r3, @-r15 + mov.l r4, @-r15 + mov.l r5, @-r15 + mov.l r6, @-r15 + mov.l r7, @-r15 + + mov.l 7834f, r0 + jsr @r0 + nop + + mov.l @r15+, r7 + mov.l @r15+, r6 + mov.l @r15+, r5 + mov.l @r15+, r4 + mov.l @r15+, r3 + mov.l @r15+, r2 + mov.l @r15+, r1 + mov.l @r15+, r0 + mov.l 7834f, r0 + + bra 7835f + nop + .balign 4 +7834: .long trace_hardirqs_on +7835: + .endm + .macro TRACE_IRQS_OFF + + mov.l r0, @-r15 + mov.l r1, @-r15 + mov.l r2, @-r15 + mov.l r3, @-r15 + mov.l r4, @-r15 + mov.l r5, @-r15 + mov.l r6, @-r15 + mov.l r7, @-r15 + + mov.l 7834f, r0 + jsr @r0 + nop + + mov.l @r15+, r7 + mov.l @r15+, r6 + mov.l @r15+, r5 + mov.l @r15+, r4 + mov.l @r15+, r3 + mov.l @r15+, r2 + mov.l @r15+, r1 + mov.l @r15+, r0 + mov.l 7834f, r0 + + bra 7835f + nop + .balign 4 +7834: .long trace_hardirqs_off +7835: + .endm + +#else + .macro TRACE_IRQS_ON + .endm + + .macro TRACE_IRQS_OFF + .endm +#endif + #if defined(CONFIG_CPU_SH2A) || defined(CONFIG_CPU_SH4) # define PREF(x) pref @x #else diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S index d621756..fc26ccd 100644 --- a/arch/sh/kernel/entry-common.S +++ b/arch/sh/kernel/entry-common.S @@ -45,7 +45,7 @@ */ #if defined(CONFIG_PREEMPT) -# define preempt_stop() cli +# define preempt_stop() cli ; TRACE_IRQS_OFF #else # define preempt_stop() # define resume_kernel __restore_all @@ -55,11 +55,7 @@ .align 2 ENTRY(exception_error) ! -#ifdef CONFIG_TRACE_IRQFLAGS - mov.l 2f, r0 - jsr @r0 - nop -#endif + TRACE_IRQS_ON sti mov.l 1f, r0 jmp @r0 @@ -67,22 +63,23 @@ ENTRY(exception_error) .align 2 1: .long do_exception_error -#ifdef CONFIG_TRACE_IRQFLAGS -2: .long trace_hardirqs_on -#endif .align 2 ret_from_exception: preempt_stop() -#ifdef CONFIG_TRACE_IRQFLAGS - mov.l 4f, r0 - jsr @r0 - nop -#endif ENTRY(ret_from_irq) ! mov #OFF_SR, r0 mov.l @(r0,r15), r0 ! get status register + + shlr2 r0 + and #0x3c, r0 + cmp/eq #0x3c, r0 + bt 9f + TRACE_IRQS_ON +9: + mov #OFF_SR, r0 + mov.l @(r0,r15), r0 ! get status register shll r0 shll r0 ! kernel space? get_current_thread_info r8, r0 @@ -125,11 +122,7 @@ noresched: ENTRY(resume_userspace) ! r8: current_thread_info cli -#ifdef CONFIG_TRACE_IRQFLAGS - mov.l 5f, r0 - jsr @r0 - nop -#endif + TRACE_IRQS_OfF mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags tst #(_TIF_WORK_MASK & 0xff), r0 bt/s __restore_all @@ -156,11 +149,7 @@ work_resched: jsr @r1 ! schedule nop cli -#ifdef CONFIG_TRACE_IRQFLAGS - mov.l 5f, r0 - jsr @r0 - nop -#endif + TRACE_IRQS_OFF ! mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags tst #(_TIF_WORK_MASK & 0xff), r0 @@ -172,10 +161,6 @@ work_resched: 1: .long schedule 2: .long do_notify_resume 3: .long resume_userspace -#ifdef CONFIG_TRACE_IRQFLAGS -4: .long trace_hardirqs_on -5: .long trace_hardirqs_off -#endif .align 2 syscall_exit_work: @@ -184,11 +169,7 @@ syscall_exit_work: tst #(_TIF_WORK_SYSCALL_MASK & 0xff), r0 bt/s work_pending tst #_TIF_NEED_RESCHED, r0 -#ifdef CONFIG_TRACE_IRQFLAGS - mov.l 5f, r0 - jsr @r0 - nop -#endif + TRACE_IRQS_ON sti mov r15, r4 mov.l 8f, r0 ! do_syscall_trace_leave @@ -321,11 +302,7 @@ ENTRY(system_call) bt/s debug_trap ! it's a debug trap.. nop -#ifdef CONFIG_TRACE_IRQFLAGS - mov.l 5f, r10 - jsr @r10 - nop -#endif + TRACE_IRQS_ON sti ! @@ -355,11 +332,7 @@ syscall_call: ! syscall_exit: cli -#ifdef CONFIG_TRACE_IRQFLAGS - mov.l 6f, r0 - jsr @r0 - nop -#endif + TRACE_IRQS_OFF ! get_current_thread_info r8, r0 mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags @@ -377,9 +350,5 @@ syscall_exit: #endif 2: .long NR_syscalls 3: .long sys_call_table -#ifdef CONFIG_TRACE_IRQFLAGS -5: .long trace_hardirqs_on -6: .long trace_hardirqs_off -#endif 7: .long do_syscall_trace_enter 8: .long do_syscall_trace_leave diff --git a/arch/sh/kernel/io_trapped.c b/arch/sh/kernel/io_trapped.c index 77dfecb..e27a19e 100644 --- a/arch/sh/kernel/io_trapped.c +++ b/arch/sh/kernel/io_trapped.c @@ -112,14 +112,15 @@ void __iomem *match_trapped_io_handler(struct list_head *list, struct trapped_io *tiop; struct resource *res; int k, len; + unsigned long flags; - spin_lock_irq(&trapped_lock); + spin_lock_irqsave(&trapped_lock, flags); list_for_each_entry(tiop, list, list) { voffs = 0; for (k = 0; k < tiop->num_resources; k++) { res = tiop->resource + k; if (res->start == offset) { - spin_unlock_irq(&trapped_lock); + spin_unlock_irqrestore(&trapped_lock, flags); return tiop->virt_base + voffs; } @@ -127,7 +128,7 @@ void __iomem *match_trapped_io_handler(struct list_head *list, voffs += roundup(len, PAGE_SIZE); } } - spin_unlock_irq(&trapped_lock); + spin_unlock_irqrestore(&trapped_lock, flags); return NULL; } EXPORT_SYMBOL_GPL(match_trapped_io_handler); -- cgit v1.1 From 3c928320b2254cb6c8d8a7919a1fcf94ca7cae66 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Wed, 29 Jul 2009 15:04:05 +0000 Subject: sh: romImage support V2 This patch contains support for the romImage build target V2. The resulting romImage file should be burned to rom or flash and could be used as small boot loader. Board code should keep their setup code in the file romimage.h located in their mach include directory. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/Makefile | 2 +- arch/sh/boot/Makefile | 11 +++++++++-- arch/sh/boot/romimage/Makefile | 19 +++++++++++++++++++ arch/sh/boot/romimage/head.S | 10 ++++++++++ arch/sh/boot/romimage/vmlinux.scr | 6 ++++++ arch/sh/include/mach-common/romimage.h | 1 + 6 files changed, 46 insertions(+), 3 deletions(-) create mode 100644 arch/sh/boot/romimage/Makefile create mode 100644 arch/sh/boot/romimage/head.S create mode 100644 arch/sh/boot/romimage/vmlinux.scr create mode 100644 arch/sh/include/mach-common/romimage.h (limited to 'arch') diff --git a/arch/sh/Makefile b/arch/sh/Makefile index c46b3d5..2a7e73f 100644 --- a/arch/sh/Makefile +++ b/arch/sh/Makefile @@ -195,7 +195,7 @@ libs-$(CONFIG_SUPERH32) := arch/sh/lib/ $(libs-y) libs-$(CONFIG_SUPERH64) := arch/sh/lib64/ $(libs-y) BOOT_TARGETS = uImage uImage.bz2 uImage.gz uImage.lzma uImage.srec \ - zImage vmlinux.srec + zImage vmlinux.srec romImage PHONY += maketools $(BOOT_TARGETS) FORCE maketools: include/linux/version.h FORCE diff --git a/arch/sh/boot/Makefile b/arch/sh/boot/Makefile index dd2a852..a131687 100644 --- a/arch/sh/boot/Makefile +++ b/arch/sh/boot/Makefile @@ -24,9 +24,9 @@ suffix-$(CONFIG_KERNEL_GZIP) := gz suffix-$(CONFIG_KERNEL_BZIP2) := bz2 suffix-$(CONFIG_KERNEL_LZMA) := lzma -targets := zImage vmlinux.srec uImage uImage.srec uImage.gz uImage.bz2 uImage.lzma +targets := zImage vmlinux.srec romImage uImage uImage.srec uImage.gz uImage.bz2 uImage.lzma extra-y += vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma -subdir- := compressed +subdir- := compressed romimage $(obj)/zImage: $(obj)/compressed/vmlinux FORCE $(call if_changed,objcopy) @@ -35,6 +35,13 @@ $(obj)/zImage: $(obj)/compressed/vmlinux FORCE $(obj)/compressed/vmlinux: FORCE $(Q)$(MAKE) $(build)=$(obj)/compressed $@ +$(obj)/romImage: $(obj)/romimage/vmlinux FORCE + $(call if_changed,objcopy) + @echo ' Kernel: $@ is ready' + +$(obj)/romimage/vmlinux: $(obj)/zImage FORCE + $(Q)$(MAKE) $(build)=$(obj)/romimage $@ + KERNEL_MEMORY := 0x00000000 ifeq ($(CONFIG_PMB_FIXED),y) KERNEL_MEMORY := $(shell /bin/bash -c 'printf "0x%08x" \ diff --git a/arch/sh/boot/romimage/Makefile b/arch/sh/boot/romimage/Makefile new file mode 100644 index 0000000..5806eee --- /dev/null +++ b/arch/sh/boot/romimage/Makefile @@ -0,0 +1,19 @@ +# +# linux/arch/sh/boot/romimage/Makefile +# +# create an image suitable for burning to flash from zImage +# + +targets := vmlinux head.o + +OBJECTS = $(obj)/head.o +LDFLAGS_vmlinux := --oformat $(ld-bfd) -Ttext 0 -e romstart + +$(obj)/vmlinux: $(OBJECTS) $(obj)/piggy.o FORCE + $(call if_changed,ld) + @: + +LDFLAGS_piggy.o := -r --format binary --oformat $(ld-bfd) -T + +$(obj)/piggy.o: $(obj)/vmlinux.scr arch/sh/boot/zImage FORCE + $(call if_changed,ld) diff --git a/arch/sh/boot/romimage/head.S b/arch/sh/boot/romimage/head.S new file mode 100644 index 0000000..97a087b --- /dev/null +++ b/arch/sh/boot/romimage/head.S @@ -0,0 +1,10 @@ +/* + * linux/arch/sh/boot/romimage/head.S + * + * Board specific setup code, executed before zImage loader + */ + +.text + .global romstart +romstart: +#include diff --git a/arch/sh/boot/romimage/vmlinux.scr b/arch/sh/boot/romimage/vmlinux.scr new file mode 100644 index 0000000..287c08f --- /dev/null +++ b/arch/sh/boot/romimage/vmlinux.scr @@ -0,0 +1,6 @@ +SECTIONS +{ + .text : { + *(.data) + } +} diff --git a/arch/sh/include/mach-common/romimage.h b/arch/sh/include/mach-common/romimage.h new file mode 100644 index 0000000..267e241 --- /dev/null +++ b/arch/sh/include/mach-common/romimage.h @@ -0,0 +1 @@ +/* do nothing here by default */ -- cgit v1.1 From d162300e6c963fe255ea7f73b956a7716e24205d Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Wed, 29 Jul 2009 15:06:15 +0000 Subject: sh: kfr2r09 romImage support V2 This patch is romImage support for the kfr2r09 board V2. The partner-jet-setup.txt file is converted into assembly code which becomes the first code to execute from the reset vector. The file partner-jet-setup.txt can also be used to setup the hardware using a JTAG debugger so booting from RAM can be done without burning the code to flash. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/include/mach-kfr2r09/partner-jet-setup.txt | 134 +++++++++++++++++++++ arch/sh/include/mach-kfr2r09/romimage.h | 75 ++++++++++++ 2 files changed, 209 insertions(+) create mode 100644 arch/sh/include/mach-kfr2r09/partner-jet-setup.txt create mode 100644 arch/sh/include/mach-kfr2r09/romimage.h (limited to 'arch') diff --git a/arch/sh/include/mach-kfr2r09/partner-jet-setup.txt b/arch/sh/include/mach-kfr2r09/partner-jet-setup.txt new file mode 100644 index 0000000..9c85088 --- /dev/null +++ b/arch/sh/include/mach-kfr2r09/partner-jet-setup.txt @@ -0,0 +1,134 @@ +LIST "partner-jet-setup.txt - 20090729 Magnus Damm" +LIST "set up enough of the kfr2r09 hardware to boot the kernel" + +LIST "zImage (RAM boot)" +LIST "This script can be used to boot the kernel from RAM via JTAG:" +LIST "> < partner-jet-setup.txt" +LIST "> RD zImage, 0xa8800000" +LIST "> G=0xa8800000" + +LIST "romImage (Flash boot)" +LIST "Use the following command to burn the zImage to flash via JTAG:" +LIST "> RD romImage, 0" + +LIST "--------------------------------" + +LIST "disable watchdog" +EW 0xa4520004, 0xa507 + +LIST "select mode for cs5 + cs6" +ED 0xff800020, 0xa5a50001 +ED 0xfec10000, 0x0000001b + +LIST "setup clocks" +ED 0xa4150004, 0x00000050 +ED 0xa4150000, 0x91053508 +WAIT 1 +ED 0xa4150024, 0x00005000 + +LIST "setup pins" +EB 0xa4050120, 0x00 +EB 0xa4050122, 0x00 +EB 0xa4050124, 0x00 +EB 0xa4050126, 0x00 +EB 0xa4050128, 0xA0 +EB 0xa405012A, 0x10 +EB 0xa405012C, 0x00 +EB 0xa405012E, 0x00 +EB 0xa4050130, 0x00 +EB 0xa4050132, 0x00 +EB 0xa4050134, 0x01 +EB 0xa4050136, 0x40 +EB 0xa4050138, 0x00 +EB 0xa405013A, 0x00 +EB 0xa405013C, 0x00 +EB 0xa405013E, 0x20 +EB 0xa4050160, 0x00 +EB 0xa4050162, 0x40 +EB 0xa4050164, 0x03 +EB 0xa4050166, 0x00 +EB 0xa4050168, 0x00 +EB 0xa405016A, 0x00 +EB 0xa405016C, 0x00 + +EW 0xa405014E, 0x5660 +EW 0xa4050150, 0x0145 +EW 0xa4050152, 0x1550 +EW 0xa4050154, 0x0200 +EW 0xa4050156, 0x0040 + +EW 0xa4050158, 0x0000 +EW 0xa405015a, 0x0000 +EW 0xa405015c, 0x0000 +EW 0xa405015e, 0x0000 + +EW 0xa4050180, 0x0000 +EW 0xa4050182, 0x8002 +EW 0xa4050184, 0x0000 + +EW 0xa405018a, 0x9991 +EW 0xa405018c, 0x8011 +EW 0xa405018e, 0x9550 + +EW 0xa4050100, 0x0000 +EW 0xa4050102, 0x5540 +EW 0xa4050104, 0x0000 +EW 0xa4050106, 0x0000 +EW 0xa4050108, 0x4550 +EW 0xa405010a, 0x0130 +EW 0xa405010c, 0x0555 +EW 0xa405010e, 0x0000 +EW 0xa4050110, 0x0000 +EW 0xa4050112, 0xAAA8 +EW 0xa4050114, 0x8305 +EW 0xa4050116, 0x10F0 +EW 0xa4050118, 0x0F50 +EW 0xa405011a, 0x0000 +EW 0xa405011c, 0x0000 +EW 0xa405011e, 0x0555 +EW 0xa4050140, 0x0000 +EW 0xa4050142, 0x5141 +EW 0xa4050144, 0x5005 +EW 0xa4050146, 0xAAA9 +EW 0xa4050148, 0xFAA9 +EW 0xa405014a, 0x3000 +EW 0xa405014c, 0x0000 + +LIST "setup sdram" +ED 0xFD000108, 0x40000301 +ED 0xFD000020, 0x011B0002 +ED 0xFD000030, 0x03060E02 +ED 0xFD000034, 0x01020102 +ED 0xFD000038, 0x01090406 +ED 0xFD000008, 0x00000004 +ED 0xFD000040, 0x00000001 +ED 0xFD000040, 0x00000000 +ED 0xFD000018, 0x00000001 + +WAIT 1 + +ED 0xFD000014, 0x00000002 +ED 0xFD000060, 0x00000032 +ED 0xFD000060, 0x00020000 +ED 0xFD000014, 0x00000004 +ED 0xFD000014, 0x00000004 +ED 0xFD000010, 0x00000001 +ED 0xFD000044, 0x000004AF +ED 0xFD000048, 0x20CF0037 + +LIST "read 16 bytes from sdram" +DD 0xa8000000, 0xa8000000, 1 +DD 0xa8000004, 0xa8000004, 1 +DD 0xa8000008, 0xa8000008, 1 +DD 0xa800000c, 0xa800000c, 1 + +ED 0xFD000014, 0x00000002 +ED 0xFD000014, 0x00000004 +ED 0xFD000108, 0x40000300 +ED 0xFD000040, 0x00010000 + +LIST "write to internal ram" +ED 0xfd8007fc, 0 + +LIST "setup cache" +ED 0xff00001c, 0x0000090b diff --git a/arch/sh/include/mach-kfr2r09/romimage.h b/arch/sh/include/mach-kfr2r09/romimage.h new file mode 100644 index 0000000..f5aa8e1 --- /dev/null +++ b/arch/sh/include/mach-kfr2r09/romimage.h @@ -0,0 +1,75 @@ +/* kfr2r09 board specific boot code: + * converts the "partner-jet-script.txt" script into assembly + * the assembly code is the first code to be executed in the romImage + */ + +/* The LIST command is used to include comments in the script */ +.macro LIST comment +.endm + +/* The ED command is used to write a 32-bit word */ +.macro ED, addr, data + mov.l 1f ,r1 + mov.l 2f ,r0 + mov.l r0, @r1 + bra 3f + nop + .align 2 +1: .long \addr +2: .long \data +3: +.endm + +/* The EW command is used to write a 16-bit word */ +.macro EW, addr, data + mov.l 1f ,r1 + mov.l 2f ,r0 + mov.w r0, @r1 + bra 3f + nop + .align 2 +1: .long \addr +2: .long \data +3: +.endm + +/* The EB command is used to write an 8-bit word */ +.macro EB, addr, data + mov.l 1f ,r1 + mov.l 2f ,r0 + mov.b r0, @r1 + bra 3f + nop + .align 2 +1: .long \addr +2: .long \data +3: +.endm + +/* The WAIT command is used to delay the execution */ +.macro WAIT, time + mov.l 2f ,r3 +1: + nop + tst r3, r3 + bf/s 1b + dt r3 + bra 3f + nop + .align 2 +2: .long \time * 100 +3: +.endm + +/* The DD command is used to read a 32-bit word */ +.macro DD, addr, addr2, nr + mov.l 1f ,r1 + mov.l @r1, r0 + bra 2f + nop + .align 2 +1: .long \addr +2: +.endm + +#include "partner-jet-setup.txt" -- cgit v1.1 From fdeb076f20df14b1c7f2817362172548d146b121 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Thu, 30 Jul 2009 00:27:35 +0900 Subject: sh: Add romImage target to archhelp. Adds an archhelp blurb for the romImage target so it is reflected in 'make help'. Signed-off-by: Paul Mundt --- arch/sh/Makefile | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/sh/Makefile b/arch/sh/Makefile index 2a7e73f..b6ff337 100644 --- a/arch/sh/Makefile +++ b/arch/sh/Makefile @@ -216,6 +216,7 @@ archclean: define archhelp @echo ' zImage - Compressed kernel image' + @echo ' romImage - Compressed ROM image, if supported' @echo ' vmlinux.srec - Create an ELF S-record' @echo '* uImage - Alias to bootable U-Boot image' @echo ' uImage.srec - Create an S-record for U-Boot' -- cgit v1.1 From 94699b04eddd4b247d871930431d6fa1a46c175e Mon Sep 17 00:00:00 2001 From: Bartlomiej Zolnierkiewicz Date: Tue, 28 Jul 2009 23:52:54 +0200 Subject: x86, mce: don't log boot MCEs on Pentium M (model == 13) CPUs On my legacy Pentium M laptop (Acer Extensa 2900) I get bogus MCE on a cold boot with CONFIG_X86_NEW_MCE enabled, i.e. (after decoding it with mcelog): MCE 0 HARDWARE ERROR. This is *NOT* a software problem! Please contact your hardware vendor CPU 0 BANK 1 MCG status: MCi status: Error overflow Uncorrected error Error enabled Processor context corrupt MCA: Data CACHE Level-1 UNKNOWN Error STATUS f200000000000195 MCGSTATUS 0 [ The other STATUS values observed: f2000000000001b5 (... UNKNOWN error) and f200000000000115 (... READ Error). To verify that this is not a CONFIG_X86_NEW_MCE bug I also modified the CONFIG_X86_OLD_MCE code (which doesn't log any MCEs) to dump content of STATUS MSR before it is cleared during initialization. ] Since the bogus MCE results in a kernel taint (which in turn disables lockdep support) don't log boot MCEs on Pentium M (model == 13) CPUs by default ("mce=bootlog" boot parameter can be be used to get the old behavior). Signed-off-by: Bartlomiej Zolnierkiewicz Reviewed-by: Andi Kleen Signed-off-by: H. Peter Anvin --- arch/x86/kernel/cpu/mcheck/mce.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 07139a0..7bd19c7 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -1269,6 +1269,10 @@ static void mce_cpu_quirks(struct cpuinfo_x86 *c) if ((c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xe)) && monarch_timeout < 0) monarch_timeout = USEC_PER_SEC; + + /* There are also broken BIOSes on some Pentium M systems. */ + if (c->x86 == 6 && c->x86_model == 13 && mce_bootlog < 0) + mce_bootlog = 0; } if (monarch_timeout < 0) monarch_timeout = 0; -- cgit v1.1 From e3346fc48204d780f92527d06df8bf6f28d603ec Mon Sep 17 00:00:00 2001 From: Bartlomiej Zolnierkiewicz Date: Tue, 28 Jul 2009 23:55:09 +0200 Subject: x86, mce: fix "mce" boot option handling for CONFIG_X86_NEW_MCE "mce argument mce ignored. Please use /sys" message shouldn't be printed when using "mce" boot option. Signed-off-by: Bartlomiej Zolnierkiewicz Reviewed-by: Andi Kleen Signed-off-by: H. Peter Anvin --- arch/x86/kernel/cpu/mcheck/mce.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 7bd19c7..7591944 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -1549,8 +1549,10 @@ static struct miscdevice mce_log_device = { */ static int __init mcheck_enable(char *str) { - if (*str == 0) + if (*str == 0) { enable_p5_mce(); + return 1; + } if (*str == '=') str++; if (!strcmp(str, "off")) -- cgit v1.1 From 419d6162c0c0103fa2f44f6691dff9cac14c650d Mon Sep 17 00:00:00 2001 From: Bartlomiej Zolnierkiewicz Date: Tue, 28 Jul 2009 23:56:00 +0200 Subject: x86, mce: add missing __cpuinit tags mce_cap_init() and mce_cpu_quirks() can be tagged with __cpuinit. Signed-off-by: Bartlomiej Zolnierkiewicz Reviewed-by: Andi Kleen Signed-off-by: H. Peter Anvin --- arch/x86/kernel/cpu/mcheck/mce.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 7591944..1ce6db1 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -1158,7 +1158,7 @@ static int mce_banks_init(void) /* * Initialize Machine Checks for a CPU. */ -static int mce_cap_init(void) +static int __cpuinit mce_cap_init(void) { unsigned b; u64 cap; @@ -1222,7 +1222,7 @@ static void mce_init(void) } /* Add per CPU specific workarounds here */ -static void mce_cpu_quirks(struct cpuinfo_x86 *c) +static void __cpuinit mce_cpu_quirks(struct cpuinfo_x86 *c) { /* This should be disabled by the BIOS, but isn't always */ if (c->x86_vendor == X86_VENDOR_AMD) { -- cgit v1.1 From d0c87d1f61704ed589fc0788bedd753632340e98 Mon Sep 17 00:00:00 2001 From: Bartlomiej Zolnierkiewicz Date: Tue, 28 Jul 2009 23:56:37 +0200 Subject: x86, mce: remove never executed code fseverities_coverage is never NULL in err_out code path. Signed-off-by: Bartlomiej Zolnierkiewicz Reviewed-by: Andi Kleen Signed-off-by: H. Peter Anvin --- arch/x86/kernel/cpu/mcheck/mce-severity.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/mcheck/mce-severity.c b/arch/x86/kernel/cpu/mcheck/mce-severity.c index ff0807f..51f7c72 100644 --- a/arch/x86/kernel/cpu/mcheck/mce-severity.c +++ b/arch/x86/kernel/cpu/mcheck/mce-severity.c @@ -209,8 +209,6 @@ static int __init severities_debugfs_init(void) return 0; err_out: - if (fseverities_coverage) - debugfs_remove(fseverities_coverage); if (dmce) debugfs_remove(dmce); return -ENOMEM; -- cgit v1.1 From f3a0867b12e0cf1512c0bd0665f2339fc75ed2a8 Mon Sep 17 00:00:00 2001 From: Bartlomiej Zolnierkiewicz Date: Wed, 29 Jul 2009 00:04:59 +0200 Subject: x86, mce: fix reporting of Thermal Monitoring mechanism enabled Early Pentium M models use different method for enabling TM2 (per paragraph 13.5.2.3 of the "Intel 64 and IA-32 Architectures Software Developer's Manual Volume 3A: System Programming Guide, Part 1"). Tested on the affected Pentium M variant (model == 13). Signed-off-by: Bartlomiej Zolnierkiewicz Cc: Andi Kleen Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/msr-index.h | 4 ++++ arch/x86/kernel/cpu/mcheck/therm_throt.c | 13 ++++++++++--- 2 files changed, 14 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 3d1ce09..cbec06d 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -222,6 +222,10 @@ #define THERM_STATUS_PROCHOT (1 << 0) +#define MSR_THERM2_CTL 0x0000019d + +#define MSR_THERM2_CTL_TM_SELECT (1ULL << 16) + #define MSR_IA32_MISC_ENABLE 0x000001a0 /* MISC_ENABLE bits: architectural */ diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c index bff8dd1..15f2bc0 100644 --- a/arch/x86/kernel/cpu/mcheck/therm_throt.c +++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c @@ -253,9 +253,6 @@ void intel_init_thermal(struct cpuinfo_x86 *c) return; } - if (cpu_has(c, X86_FEATURE_TM2) && (l & MSR_IA32_MISC_ENABLE_TM2)) - tm2 = 1; - /* Check whether a vector already exists */ if (h & APIC_VECTOR_MASK) { printk(KERN_DEBUG @@ -264,6 +261,16 @@ void intel_init_thermal(struct cpuinfo_x86 *c) return; } + /* early Pentium M models use different method for enabling TM2 */ + if (cpu_has(c, X86_FEATURE_TM2)) { + if (c->x86 == 6 && (c->x86_model == 9 || c->x86_model == 13)) { + rdmsr(MSR_THERM2_CTL, l, h); + if (l & MSR_THERM2_CTL_TM_SELECT) + tm2 = 1; + } else if (l & MSR_IA32_MISC_ENABLE_TM2) + tm2 = 1; + } + /* We'll mask the thermal vector in the lapic till we're ready: */ h = THERMAL_APIC_VECTOR | APIC_DM_FIXED | APIC_LVT_MASKED; apic_write(APIC_LVTTHMR, h); -- cgit v1.1 From 6a12235c7d2d75c7d94b9afcaaecd422ff845ce0 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Wed, 29 Jul 2009 10:25:58 +0100 Subject: agp: kill phys_to_gart() and gart_to_phys() There seems to be no reason for these -- they're a 1:1 mapping on all platforms. Signed-off-by: David Woodhouse --- arch/alpha/include/asm/agp.h | 4 ---- arch/ia64/include/asm/agp.h | 4 ---- arch/parisc/include/asm/agp.h | 4 ---- arch/powerpc/include/asm/agp.h | 4 ---- arch/sparc/include/asm/agp.h | 4 ---- arch/x86/include/asm/agp.h | 4 ---- 6 files changed, 24 deletions(-) (limited to 'arch') diff --git a/arch/alpha/include/asm/agp.h b/arch/alpha/include/asm/agp.h index 26c1791..a94d48b 100644 --- a/arch/alpha/include/asm/agp.h +++ b/arch/alpha/include/asm/agp.h @@ -9,10 +9,6 @@ #define unmap_page_from_agp(page) #define flush_agp_cache() mb() -/* Convert a physical address to an address suitable for the GART. */ -#define phys_to_gart(x) (x) -#define gart_to_phys(x) (x) - /* GATT allocation. Returns/accepts GATT kernel virtual address. */ #define alloc_gatt_pages(order) \ ((char *)__get_free_pages(GFP_KERNEL, (order))) diff --git a/arch/ia64/include/asm/agp.h b/arch/ia64/include/asm/agp.h index c11fdd8..01d09c4 100644 --- a/arch/ia64/include/asm/agp.h +++ b/arch/ia64/include/asm/agp.h @@ -17,10 +17,6 @@ #define unmap_page_from_agp(page) /* nothing */ #define flush_agp_cache() mb() -/* Convert a physical address to an address suitable for the GART. */ -#define phys_to_gart(x) (x) -#define gart_to_phys(x) (x) - /* GATT allocation. Returns/accepts GATT kernel virtual address. */ #define alloc_gatt_pages(order) \ ((char *)__get_free_pages(GFP_KERNEL, (order))) diff --git a/arch/parisc/include/asm/agp.h b/arch/parisc/include/asm/agp.h index 9651660..d226ffa 100644 --- a/arch/parisc/include/asm/agp.h +++ b/arch/parisc/include/asm/agp.h @@ -11,10 +11,6 @@ #define unmap_page_from_agp(page) /* nothing */ #define flush_agp_cache() mb() -/* Convert a physical address to an address suitable for the GART. */ -#define phys_to_gart(x) (x) -#define gart_to_phys(x) (x) - /* GATT allocation. Returns/accepts GATT kernel virtual address. */ #define alloc_gatt_pages(order) \ ((char *)__get_free_pages(GFP_KERNEL, (order))) diff --git a/arch/powerpc/include/asm/agp.h b/arch/powerpc/include/asm/agp.h index 86455c4..416e12c 100644 --- a/arch/powerpc/include/asm/agp.h +++ b/arch/powerpc/include/asm/agp.h @@ -8,10 +8,6 @@ #define unmap_page_from_agp(page) #define flush_agp_cache() mb() -/* Convert a physical address to an address suitable for the GART. */ -#define phys_to_gart(x) (x) -#define gart_to_phys(x) (x) - /* GATT allocation. Returns/accepts GATT kernel virtual address. */ #define alloc_gatt_pages(order) \ ((char *)__get_free_pages(GFP_KERNEL, (order))) diff --git a/arch/sparc/include/asm/agp.h b/arch/sparc/include/asm/agp.h index c245687..70f52c1 100644 --- a/arch/sparc/include/asm/agp.h +++ b/arch/sparc/include/asm/agp.h @@ -7,10 +7,6 @@ #define unmap_page_from_agp(page) #define flush_agp_cache() mb() -/* Convert a physical address to an address suitable for the GART. */ -#define phys_to_gart(x) (x) -#define gart_to_phys(x) (x) - /* GATT allocation. Returns/accepts GATT kernel virtual address. */ #define alloc_gatt_pages(order) \ ((char *)__get_free_pages(GFP_KERNEL, (order))) diff --git a/arch/x86/include/asm/agp.h b/arch/x86/include/asm/agp.h index 9825cd6..eec2a70 100644 --- a/arch/x86/include/asm/agp.h +++ b/arch/x86/include/asm/agp.h @@ -22,10 +22,6 @@ */ #define flush_agp_cache() wbinvd() -/* Convert a physical address to an address suitable for the GART. */ -#define phys_to_gart(x) (x) -#define gart_to_phys(x) (x) - /* GATT allocation. Returns/accepts GATT kernel virtual address. */ #define alloc_gatt_pages(order) \ ((char *)__get_free_pages(GFP_KERNEL, (order))) -- cgit v1.1 From cfc65dd57967f2e0c7b3a8b73e6d12470b1cf1c1 Mon Sep 17 00:00:00 2001 From: Alex Williamson Date: Thu, 30 Jul 2009 16:15:18 -0600 Subject: iommu=pt is a valid early param This avoids a "Malformed early option 'iommu'" warning on boot when trying to use pass-through mode. Signed-off-by: Alex Williamson Signed-off-by: David Woodhouse --- arch/x86/kernel/pci-dma.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index 1a041bc..ae13e34 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c @@ -212,10 +212,8 @@ static __init int iommu_setup(char *p) if (!strncmp(p, "soft", 4)) swiotlb = 1; #endif - if (!strncmp(p, "pt", 2)) { + if (!strncmp(p, "pt", 2)) iommu_pass_through = 1; - return 1; - } gart_parse_options(p); -- cgit v1.1 From a42548a18866e87092db93b771e6c5b060d78401 Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Wed, 29 Jul 2009 12:15:29 +0200 Subject: cputime: Optimize jiffies_to_cputime(1) For powerpc with CONFIG_VIRT_CPU_ACCOUNTING jiffies_to_cputime(1) is not compile time constant and run time calculations are quite expensive. To optimize we use precomputed value. For all other architectures is is preprocessor definition. Signed-off-by: Stanislaw Gruszka Acked-by: Peter Zijlstra Acked-by: Thomas Gleixner Cc: Oleg Nesterov Cc: Andrew Morton Cc: Paul Mackerras Cc: Benjamin Herrenschmidt LKML-Reference: <1248862529-6063-5-git-send-email-sgruszka@redhat.com> Signed-off-by: Ingo Molnar --- arch/ia64/include/asm/cputime.h | 1 + arch/powerpc/include/asm/cputime.h | 13 +++++++++++++ arch/powerpc/kernel/time.c | 4 ++++ arch/s390/include/asm/cputime.h | 1 + 4 files changed, 19 insertions(+) (limited to 'arch') diff --git a/arch/ia64/include/asm/cputime.h b/arch/ia64/include/asm/cputime.h index d20b998..7fa8a85 100644 --- a/arch/ia64/include/asm/cputime.h +++ b/arch/ia64/include/asm/cputime.h @@ -30,6 +30,7 @@ typedef u64 cputime_t; typedef u64 cputime64_t; #define cputime_zero ((cputime_t)0) +#define cputime_one_jiffy jiffies_to_cputime(1) #define cputime_max ((~((cputime_t)0) >> 1) - 1) #define cputime_add(__a, __b) ((__a) + (__b)) #define cputime_sub(__a, __b) ((__a) - (__b)) diff --git a/arch/powerpc/include/asm/cputime.h b/arch/powerpc/include/asm/cputime.h index f42e623..fa19f3f 100644 --- a/arch/powerpc/include/asm/cputime.h +++ b/arch/powerpc/include/asm/cputime.h @@ -18,6 +18,9 @@ #ifndef CONFIG_VIRT_CPU_ACCOUNTING #include +#ifdef __KERNEL__ +static inline void setup_cputime_one_jiffy(void) { } +#endif #else #include @@ -49,6 +52,11 @@ typedef u64 cputime64_t; #ifdef __KERNEL__ /* + * One jiffy in timebase units computed during initialization + */ +extern cputime_t cputime_one_jiffy; + +/* * Convert cputime <-> jiffies */ extern u64 __cputime_jiffies_factor; @@ -89,6 +97,11 @@ static inline cputime_t jiffies_to_cputime(const unsigned long jif) return ct; } +static inline void setup_cputime_one_jiffy(void) +{ + cputime_one_jiffy = jiffies_to_cputime(1); +} + static inline cputime64_t jiffies64_to_cputime64(const u64 jif) { cputime_t ct; diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index eae4511..211d7b0 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -193,6 +193,8 @@ EXPORT_SYMBOL(__cputime_clockt_factor); DEFINE_PER_CPU(unsigned long, cputime_last_delta); DEFINE_PER_CPU(unsigned long, cputime_scaled_last_delta); +cputime_t cputime_one_jiffy; + static void calc_cputime_factors(void) { struct div_result res; @@ -500,6 +502,7 @@ static int __init iSeries_tb_recal(void) tb_to_xs = divres.result_low; vdso_data->tb_ticks_per_sec = tb_ticks_per_sec; vdso_data->tb_to_xs = tb_to_xs; + setup_cputime_one_jiffy(); } else { printk( "Titan recalibrate: FAILED (difference > 4 percent)\n" @@ -945,6 +948,7 @@ void __init time_init(void) tb_ticks_per_usec = ppc_tb_freq / 1000000; tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000); calc_cputime_factors(); + setup_cputime_one_jiffy(); /* * Calculate the length of each tick in ns. It will not be diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h index 7a3817a..24b1244 100644 --- a/arch/s390/include/asm/cputime.h +++ b/arch/s390/include/asm/cputime.h @@ -42,6 +42,7 @@ __div(unsigned long long n, unsigned int base) #endif /* __s390x__ */ #define cputime_zero (0ULL) +#define cputime_one_jiffy jiffies_to_cputime(1) #define cputime_max ((~0UL >> 1) - 1) #define cputime_add(__a, __b) ((__a) + (__b)) #define cputime_sub(__a, __b) ((__a) - (__b)) -- cgit v1.1 From 05aa7882757f68af799140142ec44f83b2df4298 Mon Sep 17 00:00:00 2001 From: Rafael Ignacio Zurita Date: Tue, 4 Aug 2009 14:38:08 +0900 Subject: sh: Add early printk support for SH770x CPUs. This adds early printk support for SH770x (tested on SH7709 based hp6xx). Signed-off-by: Rafael Ignacio Zurita Signed-off-by: Paul Mundt --- arch/sh/Kconfig.debug | 1 + arch/sh/kernel/early_printk.c | 6 ++---- 2 files changed, 3 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/sh/Kconfig.debug b/arch/sh/Kconfig.debug index a6dce41..763b792 100644 --- a/arch/sh/Kconfig.debug +++ b/arch/sh/Kconfig.debug @@ -43,6 +43,7 @@ config EARLY_SCIF_CONSOLE_PORT default "0xfffe8000" if CPU_SUBTYPE_SH7203 default "0xfffe9800" if CPU_SUBTYPE_SH7206 || CPU_SUBTYPE_SH7263 default "0xffe80000" if CPU_SH4 + default "0xa4000150" if CPU_SH3 default "0x00000000" config EARLY_PRINTK diff --git a/arch/sh/kernel/early_printk.c b/arch/sh/kernel/early_printk.c index a952dcf..64f2746 100644 --- a/arch/sh/kernel/early_printk.c +++ b/arch/sh/kernel/early_printk.c @@ -134,7 +134,7 @@ static void scif_sercon_init(char *s) sci_out(&scif_port, SCFCR, 0x0030); /* TTRG=b'11 */ sci_out(&scif_port, SCSCR, 0x0030); /* TE, RE */ } -#elif defined(CONFIG_CPU_SH4) +#elif defined(CONFIG_CPU_SH4) || defined(CONFIG_CPU_SH3) #define DEFAULT_BAUD 115200 /* * Simple SCIF init, primarily aimed at SH7750 and other similar SH-4 @@ -220,11 +220,9 @@ static int __init setup_early_printk(char *buf) early_console = &scif_console; #if !defined(CONFIG_SH_STANDARD_BIOS) -#if defined(CONFIG_CPU_SH4) || defined(CONFIG_CPU_SUBTYPE_SH7720) || \ - defined(CONFIG_CPU_SUBTYPE_SH7721) +#if defined(CONFIG_CPU_SH4) || defined(CONFIG_CPU_SH3) scif_sercon_init(buf + 6); #endif -#endif } #endif -- cgit v1.1 From 6ba4a8f0f542e791e4158c91a844234b142578dc Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Fri, 31 Jul 2009 06:57:36 +0000 Subject: sh: hwblk support for sh7724 This patch adds hwblk support for the sh7724 processor. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/include/cpu-sh4/cpu/sh7724.h | 17 +++++ arch/sh/kernel/cpu/sh4a/Makefile | 2 +- arch/sh/kernel/cpu/sh4a/clock-sh7724.c | 119 ++++++++++++++++---------------- arch/sh/kernel/cpu/sh4a/hwblk-sh7724.c | 121 +++++++++++++++++++++++++++++++++ 4 files changed, 201 insertions(+), 58 deletions(-) create mode 100644 arch/sh/kernel/cpu/sh4a/hwblk-sh7724.c (limited to 'arch') diff --git a/arch/sh/include/cpu-sh4/cpu/sh7724.h b/arch/sh/include/cpu-sh4/cpu/sh7724.h index 66fd118..0cd1f71 100644 --- a/arch/sh/include/cpu-sh4/cpu/sh7724.h +++ b/arch/sh/include/cpu-sh4/cpu/sh7724.h @@ -266,4 +266,21 @@ enum { GPIO_FN_INTC_IRQ1, GPIO_FN_INTC_IRQ0, }; +enum { + HWBLK_UNKNOWN = 0, + HWBLK_TLB, HWBLK_IC, HWBLK_OC, HWBLK_RSMEM, HWBLK_ILMEM, HWBLK_L2C, + HWBLK_FPU, HWBLK_INTC, HWBLK_DMAC0, HWBLK_SHYWAY, + HWBLK_HUDI, HWBLK_DBG, HWBLK_UBC, + HWBLK_TMU0, HWBLK_CMT, HWBLK_RWDT, HWBLK_DMAC1, HWBLK_TMU1, + HWBLK_SCIF0, HWBLK_SCIF1, HWBLK_SCIF2, HWBLK_SCIF3, + HWBLK_SCIF4, HWBLK_SCIF5, HWBLK_MSIOF0, HWBLK_MSIOF1, + HWBLK_KEYSC, HWBLK_RTC, HWBLK_IIC0, HWBLK_IIC1, + HWBLK_MMC, HWBLK_ETHER, HWBLK_ATAPI, HWBLK_TPU, HWBLK_IRDA, + HWBLK_TSIF, HWBLK_USB1, HWBLK_USB0, HWBLK_2DG, + HWBLK_SDHI0, HWBLK_SDHI1, HWBLK_VEU1, HWBLK_CEU1, HWBLK_BEU1, + HWBLK_2DDMAC, HWBLK_SPU, HWBLK_JPU, HWBLK_VOU, + HWBLK_BEU0, HWBLK_CEU0, HWBLK_VEU0, HWBLK_VPU, HWBLK_LCDC, + HWBLK_NR, +}; + #endif /* __ASM_SH7724_H__ */ diff --git a/arch/sh/kernel/cpu/sh4a/Makefile b/arch/sh/kernel/cpu/sh4a/Makefile index 1d7ae38..12cddf4 100644 --- a/arch/sh/kernel/cpu/sh4a/Makefile +++ b/arch/sh/kernel/cpu/sh4a/Makefile @@ -27,7 +27,7 @@ clock-$(CONFIG_CPU_SUBTYPE_SH7786) := clock-sh7786.o clock-$(CONFIG_CPU_SUBTYPE_SH7343) := clock-sh7343.o clock-$(CONFIG_CPU_SUBTYPE_SH7722) := clock-sh7722.o hwblk-sh7722.o clock-$(CONFIG_CPU_SUBTYPE_SH7723) := clock-sh7723.o hwblk-sh7723.o -clock-$(CONFIG_CPU_SUBTYPE_SH7724) := clock-sh7724.o +clock-$(CONFIG_CPU_SUBTYPE_SH7724) := clock-sh7724.o hwblk-sh7724.o clock-$(CONFIG_CPU_SUBTYPE_SH7366) := clock-sh7366.o clock-$(CONFIG_CPU_SUBTYPE_SHX3) := clock-shx3.o diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7724.c b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c index 5d5c9b9..ba24e38 100644 --- a/arch/sh/kernel/cpu/sh4a/clock-sh7724.c +++ b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c @@ -22,6 +22,8 @@ #include #include #include +#include +#include /* SH7724 registers */ #define FRQCRA 0xa4150000 @@ -156,64 +158,67 @@ struct clk div6_clks[] = { SH_CLK_DIV6("spu_clk", &div3_clk, SPUCLKCR, 0), }; -#define MSTP(_str, _parent, _reg, _bit, _force_on, _need_cpg, _need_ram) \ - SH_CLK_MSTP32(_str, -1, _parent, _reg, _bit, _force_on * CLK_ENABLE_ON_INIT) +#define R_CLK (&r_clk) +#define P_CLK (&div4_clks[DIV4_P]) +#define B_CLK (&div4_clks[DIV4_B]) +#define I_CLK (&div4_clks[DIV4_I]) +#define SH_CLK (&div4_clks[DIV4_SH]) static struct clk mstp_clks[] = { - MSTP("tlb0", &div4_clks[DIV4_I], MSTPCR0, 31, 1, 1, 0), - MSTP("ic0", &div4_clks[DIV4_I], MSTPCR0, 30, 1, 1, 0), - MSTP("oc0", &div4_clks[DIV4_I], MSTPCR0, 29, 1, 1, 0), - MSTP("rs0", &div4_clks[DIV4_B], MSTPCR0, 28, 1, 1, 0), - MSTP("ilmem0", &div4_clks[DIV4_I], MSTPCR0, 27, 1, 1, 0), - MSTP("l2c0", &div4_clks[DIV4_SH], MSTPCR0, 26, 1, 1, 0), - MSTP("fpu0", &div4_clks[DIV4_I], MSTPCR0, 24, 1, 1, 0), - MSTP("intc0", &div4_clks[DIV4_P], MSTPCR0, 22, 1, 1, 0), - MSTP("dmac0", &div4_clks[DIV4_B], MSTPCR0, 21, 0, 1, 1), - MSTP("sh0", &div4_clks[DIV4_SH], MSTPCR0, 20, 0, 1, 0), - MSTP("hudi0", &div4_clks[DIV4_P], MSTPCR0, 19, 0, 1, 0), - MSTP("ubc0", &div4_clks[DIV4_I], MSTPCR0, 17, 0, 1, 0), - MSTP("tmu0", &div4_clks[DIV4_P], MSTPCR0, 15, 0, 1, 0), - MSTP("cmt0", &r_clk, MSTPCR0, 14, 0, 0, 0), - MSTP("rwdt0", &r_clk, MSTPCR0, 13, 0, 0, 0), - MSTP("dmac1", &div4_clks[DIV4_B], MSTPCR0, 12, 0, 1, 1), - MSTP("tmu1", &div4_clks[DIV4_P], MSTPCR0, 10, 0, 1, 0), - MSTP("scif0", &div4_clks[DIV4_P], MSTPCR0, 9, 0, 1, 0), - MSTP("scif1", &div4_clks[DIV4_P], MSTPCR0, 8, 0, 1, 0), - MSTP("scif2", &div4_clks[DIV4_P], MSTPCR0, 7, 0, 1, 0), - MSTP("scif3", &div4_clks[DIV4_B], MSTPCR0, 6, 0, 1, 0), - MSTP("scif4", &div4_clks[DIV4_B], MSTPCR0, 5, 0, 1, 0), - MSTP("scif5", &div4_clks[DIV4_B], MSTPCR0, 4, 0, 1, 0), - MSTP("msiof0", &div4_clks[DIV4_B], MSTPCR0, 2, 0, 1, 0), - MSTP("msiof1", &div4_clks[DIV4_B], MSTPCR0, 1, 0, 1, 0), - - MSTP("keysc0", &r_clk, MSTPCR1, 12, 0, 0, 0), - MSTP("rtc0", &r_clk, MSTPCR1, 11, 0, 0, 0), - MSTP("i2c0", &div4_clks[DIV4_P], MSTPCR1, 9, 0, 1, 0), - MSTP("i2c1", &div4_clks[DIV4_P], MSTPCR1, 8, 0, 1, 0), - - MSTP("mmc0", &div4_clks[DIV4_B], MSTPCR2, 29, 0, 1, 0), - MSTP("eth0", &div4_clks[DIV4_B], MSTPCR2, 28, 0, 1, 0), - MSTP("atapi0", &div4_clks[DIV4_B], MSTPCR2, 26, 0, 1, 0), - MSTP("tpu0", &div4_clks[DIV4_B], MSTPCR2, 25, 0, 1, 0), - MSTP("irda0", &div4_clks[DIV4_P], MSTPCR2, 24, 0, 1, 0), - MSTP("tsif0", &div4_clks[DIV4_B], MSTPCR2, 22, 0, 1, 0), - MSTP("usb1", &div4_clks[DIV4_B], MSTPCR2, 21, 0, 1, 1), - MSTP("usb0", &div4_clks[DIV4_B], MSTPCR2, 20, 0, 1, 1), - MSTP("2dg0", &div4_clks[DIV4_B], MSTPCR2, 19, 0, 1, 1), - MSTP("sdhi0", &div4_clks[DIV4_B], MSTPCR2, 18, 0, 1, 0), - MSTP("sdhi1", &div4_clks[DIV4_B], MSTPCR2, 17, 0, 1, 0), - MSTP("veu1", &div4_clks[DIV4_B], MSTPCR2, 15, 1, 1, 1), - MSTP("ceu1", &div4_clks[DIV4_B], MSTPCR2, 13, 0, 1, 1), - MSTP("beu1", &div4_clks[DIV4_B], MSTPCR2, 12, 0, 1, 1), - MSTP("2ddmac0", &div4_clks[DIV4_SH], MSTPCR2, 10, 0, 1, 1), - MSTP("spu0", &div4_clks[DIV4_B], MSTPCR2, 9, 0, 1, 0), - MSTP("jpu0", &div4_clks[DIV4_B], MSTPCR2, 6, 1, 1, 1), - MSTP("vou0", &div4_clks[DIV4_B], MSTPCR2, 5, 0, 1, 1), - MSTP("beu0", &div4_clks[DIV4_B], MSTPCR2, 4, 0, 1, 1), - MSTP("ceu0", &div4_clks[DIV4_B], MSTPCR2, 3, 0, 1, 1), - MSTP("veu0", &div4_clks[DIV4_B], MSTPCR2, 2, 1, 1, 1), - MSTP("vpu0", &div4_clks[DIV4_B], MSTPCR2, 1, 1, 1, 1), - MSTP("lcdc0", &div4_clks[DIV4_B], MSTPCR2, 0, 0, 1, 1), + SH_HWBLK_CLK("tlb0", -1, I_CLK, HWBLK_TLB, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("ic0", -1, I_CLK, HWBLK_IC, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("oc0", -1, I_CLK, HWBLK_OC, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("rs0", -1, B_CLK, HWBLK_RSMEM, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("ilmem0", -1, I_CLK, HWBLK_ILMEM, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("l2c0", -1, SH_CLK, HWBLK_L2C, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("fpu0", -1, I_CLK, HWBLK_FPU, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("intc0", -1, P_CLK, HWBLK_INTC, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("dmac0", -1, B_CLK, HWBLK_DMAC0, 0), + SH_HWBLK_CLK("sh0", -1, SH_CLK, HWBLK_SHYWAY, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("hudi0", -1, P_CLK, HWBLK_HUDI, 0), + SH_HWBLK_CLK("ubc0", -1, I_CLK, HWBLK_UBC, 0), + SH_HWBLK_CLK("tmu0", -1, P_CLK, HWBLK_TMU0, 0), + SH_HWBLK_CLK("cmt0", -1, R_CLK, HWBLK_CMT, 0), + SH_HWBLK_CLK("rwdt0", -1, R_CLK, HWBLK_RWDT, 0), + SH_HWBLK_CLK("dmac1", -1, B_CLK, HWBLK_DMAC1, 0), + SH_HWBLK_CLK("tmu1", -1, P_CLK, HWBLK_TMU1, 0), + SH_HWBLK_CLK("scif0", -1, P_CLK, HWBLK_SCIF0, 0), + SH_HWBLK_CLK("scif1", -1, P_CLK, HWBLK_SCIF1, 0), + SH_HWBLK_CLK("scif2", -1, P_CLK, HWBLK_SCIF2, 0), + SH_HWBLK_CLK("scif3", -1, B_CLK, HWBLK_SCIF3, 0), + SH_HWBLK_CLK("scif4", -1, B_CLK, HWBLK_SCIF4, 0), + SH_HWBLK_CLK("scif5", -1, B_CLK, HWBLK_SCIF5, 0), + SH_HWBLK_CLK("msiof0", -1, B_CLK, HWBLK_MSIOF0, 0), + SH_HWBLK_CLK("msiof1", -1, B_CLK, HWBLK_MSIOF1, 0), + + SH_HWBLK_CLK("keysc0", -1, R_CLK, HWBLK_KEYSC, 0), + SH_HWBLK_CLK("rtc0", -1, R_CLK, HWBLK_RTC, 0), + SH_HWBLK_CLK("i2c0", -1, P_CLK, HWBLK_IIC0, 0), + SH_HWBLK_CLK("i2c1", -1, P_CLK, HWBLK_IIC1, 0), + + SH_HWBLK_CLK("mmc0", -1, B_CLK, HWBLK_MMC, 0), + SH_HWBLK_CLK("eth0", -1, B_CLK, HWBLK_ETHER, 0), + SH_HWBLK_CLK("atapi0", -1, B_CLK, HWBLK_ATAPI, 0), + SH_HWBLK_CLK("tpu0", -1, B_CLK, HWBLK_TPU, 0), + SH_HWBLK_CLK("irda0", -1, P_CLK, HWBLK_IRDA, 0), + SH_HWBLK_CLK("tsif0", -1, B_CLK, HWBLK_TSIF, 0), + SH_HWBLK_CLK("usb1", -1, B_CLK, HWBLK_USB1, 0), + SH_HWBLK_CLK("usb0", -1, B_CLK, HWBLK_USB0, 0), + SH_HWBLK_CLK("2dg0", -1, B_CLK, HWBLK_2DG, 0), + SH_HWBLK_CLK("sdhi0", -1, B_CLK, HWBLK_SDHI0, 0), + SH_HWBLK_CLK("sdhi1", -1, B_CLK, HWBLK_SDHI1, 0), + SH_HWBLK_CLK("veu1", -1, B_CLK, HWBLK_VEU1, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("ceu1", -1, B_CLK, HWBLK_CEU1, 0), + SH_HWBLK_CLK("beu1", -1, B_CLK, HWBLK_BEU1, 0), + SH_HWBLK_CLK("2ddmac0", -1, SH_CLK, HWBLK_2DDMAC, 0), + SH_HWBLK_CLK("spu0", -1, B_CLK, HWBLK_SPU, 0), + SH_HWBLK_CLK("jpu0", -1, B_CLK, HWBLK_JPU, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("vou0", -1, B_CLK, HWBLK_VOU, 0), + SH_HWBLK_CLK("beu0", -1, B_CLK, HWBLK_BEU0, 0), + SH_HWBLK_CLK("ceu0", -1, B_CLK, HWBLK_CEU0, 0), + SH_HWBLK_CLK("veu0", -1, B_CLK, HWBLK_VEU0, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("vpu0", -1, B_CLK, HWBLK_VPU, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("lcdc0", -1, B_CLK, HWBLK_LCDC, 0), }; int __init arch_clk_init(void) @@ -236,7 +241,7 @@ int __init arch_clk_init(void) ret = sh_clk_div6_register(div6_clks, ARRAY_SIZE(div6_clks)); if (!ret) - ret = sh_clk_mstp32_register(mstp_clks, ARRAY_SIZE(mstp_clks)); + ret = sh_hwblk_clk_register(mstp_clks, ARRAY_SIZE(mstp_clks)); return ret; } diff --git a/arch/sh/kernel/cpu/sh4a/hwblk-sh7724.c b/arch/sh/kernel/cpu/sh4a/hwblk-sh7724.c new file mode 100644 index 0000000..1613ad6 --- /dev/null +++ b/arch/sh/kernel/cpu/sh4a/hwblk-sh7724.c @@ -0,0 +1,121 @@ +/* + * arch/sh/kernel/cpu/sh4a/hwblk-sh7724.c + * + * SH7724 hardware block support + * + * Copyright (C) 2009 Magnus Damm + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#include +#include +#include +#include +#include +#include + +/* SH7724 registers */ +#define MSTPCR0 0xa4150030 +#define MSTPCR1 0xa4150034 +#define MSTPCR2 0xa4150038 + +/* SH7724 Power Domains */ +enum { CORE_AREA, SUB_AREA, CORE_AREA_BM }; +static struct hwblk_area sh7724_hwblk_area[] = { + [CORE_AREA] = HWBLK_AREA(0, 0), + [CORE_AREA_BM] = HWBLK_AREA(HWBLK_AREA_FLAG_PARENT, CORE_AREA), + [SUB_AREA] = HWBLK_AREA(0, 0), +}; + +/* Table mapping HWBLK to Module Stop Bit and Power Domain */ +static struct hwblk sh7724_hwblk[HWBLK_NR] = { + [HWBLK_TLB] = HWBLK(MSTPCR0, 31, CORE_AREA), + [HWBLK_IC] = HWBLK(MSTPCR0, 30, CORE_AREA), + [HWBLK_OC] = HWBLK(MSTPCR0, 29, CORE_AREA), + [HWBLK_RSMEM] = HWBLK(MSTPCR0, 28, CORE_AREA), + [HWBLK_ILMEM] = HWBLK(MSTPCR0, 27, CORE_AREA), + [HWBLK_L2C] = HWBLK(MSTPCR0, 26, CORE_AREA), + [HWBLK_FPU] = HWBLK(MSTPCR0, 24, CORE_AREA), + [HWBLK_INTC] = HWBLK(MSTPCR0, 22, CORE_AREA), + [HWBLK_DMAC0] = HWBLK(MSTPCR0, 21, CORE_AREA_BM), + [HWBLK_SHYWAY] = HWBLK(MSTPCR0, 20, CORE_AREA), + [HWBLK_HUDI] = HWBLK(MSTPCR0, 19, CORE_AREA), + [HWBLK_DBG] = HWBLK(MSTPCR0, 18, CORE_AREA), + [HWBLK_UBC] = HWBLK(MSTPCR0, 17, CORE_AREA), + [HWBLK_TMU0] = HWBLK(MSTPCR0, 15, CORE_AREA), + [HWBLK_CMT] = HWBLK(MSTPCR0, 14, SUB_AREA), + [HWBLK_RWDT] = HWBLK(MSTPCR0, 13, SUB_AREA), + [HWBLK_DMAC1] = HWBLK(MSTPCR0, 12, CORE_AREA_BM), + [HWBLK_TMU1] = HWBLK(MSTPCR0, 10, CORE_AREA), + [HWBLK_SCIF0] = HWBLK(MSTPCR0, 9, CORE_AREA), + [HWBLK_SCIF1] = HWBLK(MSTPCR0, 8, CORE_AREA), + [HWBLK_SCIF2] = HWBLK(MSTPCR0, 7, CORE_AREA), + [HWBLK_SCIF3] = HWBLK(MSTPCR0, 6, CORE_AREA), + [HWBLK_SCIF4] = HWBLK(MSTPCR0, 5, CORE_AREA), + [HWBLK_SCIF5] = HWBLK(MSTPCR0, 4, CORE_AREA), + [HWBLK_MSIOF0] = HWBLK(MSTPCR0, 2, CORE_AREA), + [HWBLK_MSIOF1] = HWBLK(MSTPCR0, 1, CORE_AREA), + + [HWBLK_KEYSC] = HWBLK(MSTPCR1, 12, SUB_AREA), + [HWBLK_RTC] = HWBLK(MSTPCR1, 11, SUB_AREA), + [HWBLK_IIC0] = HWBLK(MSTPCR1, 9, CORE_AREA), + [HWBLK_IIC1] = HWBLK(MSTPCR1, 8, CORE_AREA), + + [HWBLK_MMC] = HWBLK(MSTPCR2, 29, CORE_AREA), + [HWBLK_ETHER] = HWBLK(MSTPCR2, 28, CORE_AREA_BM), + [HWBLK_ATAPI] = HWBLK(MSTPCR2, 26, CORE_AREA_BM), + [HWBLK_TPU] = HWBLK(MSTPCR2, 25, CORE_AREA), + [HWBLK_IRDA] = HWBLK(MSTPCR2, 24, CORE_AREA), + [HWBLK_TSIF] = HWBLK(MSTPCR2, 22, CORE_AREA), + [HWBLK_USB1] = HWBLK(MSTPCR2, 21, CORE_AREA), + [HWBLK_USB0] = HWBLK(MSTPCR2, 20, CORE_AREA), + [HWBLK_2DG] = HWBLK(MSTPCR2, 19, CORE_AREA_BM), + [HWBLK_SDHI0] = HWBLK(MSTPCR2, 18, CORE_AREA), + [HWBLK_SDHI1] = HWBLK(MSTPCR2, 17, CORE_AREA), + [HWBLK_VEU1] = HWBLK(MSTPCR2, 15, CORE_AREA_BM), + [HWBLK_CEU1] = HWBLK(MSTPCR2, 13, CORE_AREA_BM), + [HWBLK_BEU1] = HWBLK(MSTPCR2, 12, CORE_AREA_BM), + [HWBLK_2DDMAC] = HWBLK(MSTPCR2, 10, CORE_AREA_BM), + [HWBLK_SPU] = HWBLK(MSTPCR2, 9, CORE_AREA_BM), + [HWBLK_JPU] = HWBLK(MSTPCR2, 6, CORE_AREA_BM), + [HWBLK_VOU] = HWBLK(MSTPCR2, 5, CORE_AREA_BM), + [HWBLK_BEU0] = HWBLK(MSTPCR2, 4, CORE_AREA_BM), + [HWBLK_CEU0] = HWBLK(MSTPCR2, 3, CORE_AREA_BM), + [HWBLK_VEU0] = HWBLK(MSTPCR2, 2, CORE_AREA_BM), + [HWBLK_VPU] = HWBLK(MSTPCR2, 1, CORE_AREA_BM), + [HWBLK_LCDC] = HWBLK(MSTPCR2, 0, CORE_AREA_BM), +}; + +static struct hwblk_info sh7724_hwblk_info = { + .areas = sh7724_hwblk_area, + .nr_areas = ARRAY_SIZE(sh7724_hwblk_area), + .hwblks = sh7724_hwblk, + .nr_hwblks = ARRAY_SIZE(sh7724_hwblk), +}; + +int arch_hwblk_sleep_mode(void) +{ + if (!sh7724_hwblk_area[CORE_AREA].cnt[HWBLK_CNT_USAGE]) + return SUSP_SH_STANDBY | SUSP_SH_SF; + + if (!sh7724_hwblk_area[CORE_AREA_BM].cnt[HWBLK_CNT_USAGE]) + return SUSP_SH_SLEEP | SUSP_SH_SF; + + return SUSP_SH_SLEEP; +} + +int __init arch_hwblk_init(void) +{ + return hwblk_register(&sh7724_hwblk_info); +} -- cgit v1.1 From 133b170f08d6c20578f25b1ae71f80a5e638ccb6 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Fri, 31 Jul 2009 07:01:36 +0000 Subject: sh: clean up MSTPCRn register definitions This patch removes the unused MSTPCRn register definitions from the SuperH Mobile code for sh7722, sh7723 and sh7724. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/kernel/cpu/sh4a/clock-sh7722.c | 3 --- arch/sh/kernel/cpu/sh4a/clock-sh7723.c | 3 --- arch/sh/kernel/cpu/sh4a/clock-sh7724.c | 3 --- 3 files changed, 9 deletions(-) (limited to 'arch') diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7722.c b/arch/sh/kernel/cpu/sh4a/clock-sh7722.c index 1fa9e1d..5b1bbbe 100644 --- a/arch/sh/kernel/cpu/sh4a/clock-sh7722.c +++ b/arch/sh/kernel/cpu/sh4a/clock-sh7722.c @@ -32,9 +32,6 @@ #define SCLKBCR 0xa415000c #define IRDACLKCR 0xa4150018 #define PLLCR 0xa4150024 -#define MSTPCR0 0xa4150030 -#define MSTPCR1 0xa4150034 -#define MSTPCR2 0xa4150038 #define DLLFRQ 0xa4150050 /* Fixed 32 KHz root clock for RTC and Power Management purposes */ diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7723.c b/arch/sh/kernel/cpu/sh4a/clock-sh7723.c index bf64c78..e5c6391 100644 --- a/arch/sh/kernel/cpu/sh4a/clock-sh7723.c +++ b/arch/sh/kernel/cpu/sh4a/clock-sh7723.c @@ -32,9 +32,6 @@ #define SCLKBCR 0xa415000c #define IRDACLKCR 0xa4150018 #define PLLCR 0xa4150024 -#define MSTPCR0 0xa4150030 -#define MSTPCR1 0xa4150034 -#define MSTPCR2 0xa4150038 #define DLLFRQ 0xa4150050 /* Fixed 32 KHz root clock for RTC and Power Management purposes */ diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7724.c b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c index ba24e38..34611d9 100644 --- a/arch/sh/kernel/cpu/sh4a/clock-sh7724.c +++ b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c @@ -33,9 +33,6 @@ #define FCLKBCR 0xa415000c #define IRDACLKCR 0xa4150018 #define PLLCR 0xa4150024 -#define MSTPCR0 0xa4150030 -#define MSTPCR1 0xa4150034 -#define MSTPCR2 0xa4150038 #define SPUCLKCR 0xa415003c #define FLLFRQ 0xa4150050 #define LSTATS 0xa4150060 -- cgit v1.1 From 4f3243117ad42b4d1faeecd452f7b24306fcfc4a Mon Sep 17 00:00:00 2001 From: Kuninori Morimoto Date: Mon, 3 Aug 2009 04:52:03 +0000 Subject: sh: ms7724se: add 1280x720 lcdc output support There was no big meaning in the support of SVGA, but 720p support is necessary for ms7724se board. So, this patch support 720p instead of SVGA. Signed-off-by: Kuninori Morimoto Signed-off-by: Paul Mundt --- arch/sh/boards/mach-se/7724/setup.c | 28 ++++++++++++++++++---------- 1 file changed, 18 insertions(+), 10 deletions(-) (limited to 'arch') diff --git a/arch/sh/boards/mach-se/7724/setup.c b/arch/sh/boards/mach-se/7724/setup.c index 4fb7e48..957ed17 100644 --- a/arch/sh/boards/mach-se/7724/setup.c +++ b/arch/sh/boards/mach-se/7724/setup.c @@ -39,7 +39,15 @@ * SW41 : abxx xxxx -> a = 0 : Analog monitor * 1 : Digital monitor * b = 0 : VGA - * 1 : SVGA + * 1 : 720p + */ + +/* + * about 720p + * + * When you use 1280 x 720 lcdc output, + * you should change OSC6 lcdc clock from 25.175MHz to 74.25MHz, + * and change SW41 to use 720p */ /* Heartbeat */ @@ -546,15 +554,15 @@ static int __init devices_setup(void) sh_eth_init(); if (sw & SW41_B) { - /* SVGA */ - lcdc_info.ch[0].lcd_cfg.xres = 800; - lcdc_info.ch[0].lcd_cfg.yres = 600; - lcdc_info.ch[0].lcd_cfg.left_margin = 142; - lcdc_info.ch[0].lcd_cfg.right_margin = 52; - lcdc_info.ch[0].lcd_cfg.hsync_len = 96; - lcdc_info.ch[0].lcd_cfg.upper_margin = 24; - lcdc_info.ch[0].lcd_cfg.lower_margin = 2; - lcdc_info.ch[0].lcd_cfg.vsync_len = 2; + /* 720p */ + lcdc_info.ch[0].lcd_cfg.xres = 1280; + lcdc_info.ch[0].lcd_cfg.yres = 720; + lcdc_info.ch[0].lcd_cfg.left_margin = 220; + lcdc_info.ch[0].lcd_cfg.right_margin = 110; + lcdc_info.ch[0].lcd_cfg.hsync_len = 40; + lcdc_info.ch[0].lcd_cfg.upper_margin = 20; + lcdc_info.ch[0].lcd_cfg.lower_margin = 5; + lcdc_info.ch[0].lcd_cfg.vsync_len = 5; } else { /* VGA */ lcdc_info.ch[0].lcd_cfg.xres = 640; -- cgit v1.1 From edc67b29425b12312356fe9a92352ce6b7307c68 Mon Sep 17 00:00:00 2001 From: Kuninori Morimoto Date: Mon, 3 Aug 2009 04:52:24 +0000 Subject: sh: Add SH7724 DMAC support. Signed-off-by: Kuninori Morimoto Signed-off-by: Paul Mundt --- arch/sh/include/asm/dma-sh.h | 1 + arch/sh/include/cpu-sh4/cpu/dma-sh4a.h | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/sh/include/asm/dma-sh.h b/arch/sh/include/asm/dma-sh.h index 0c8f8e1..68a5f4c 100644 --- a/arch/sh/include/asm/dma-sh.h +++ b/arch/sh/include/asm/dma-sh.h @@ -16,6 +16,7 @@ /* DMAOR contorl: The DMAOR access size is different by CPU.*/ #if defined(CONFIG_CPU_SUBTYPE_SH7723) || \ + defined(CONFIG_CPU_SUBTYPE_SH7724) || \ defined(CONFIG_CPU_SUBTYPE_SH7780) || \ defined(CONFIG_CPU_SUBTYPE_SH7785) #define dmaor_read_reg(n) \ diff --git a/arch/sh/include/cpu-sh4/cpu/dma-sh4a.h b/arch/sh/include/cpu-sh4/cpu/dma-sh4a.h index 0ed5178..f0886bc 100644 --- a/arch/sh/include/cpu-sh4/cpu/dma-sh4a.h +++ b/arch/sh/include/cpu-sh4/cpu/dma-sh4a.h @@ -16,7 +16,8 @@ #define DMAE0_IRQ 38 #define SH_DMAC_BASE0 0xFF608020 #define SH_DMARS_BASE 0xFF609000 -#elif defined(CONFIG_CPU_SUBTYPE_SH7723) +#elif defined(CONFIG_CPU_SUBTYPE_SH7723) || \ + defined(CONFIG_CPU_SUBTYPE_SH7724) #define DMTE0_IRQ 48 /* DMAC0A*/ #define DMTE4_IRQ 40 /* DMAC0B */ #define DMTE6_IRQ 42 -- cgit v1.1 From 9aaa74908b2117bcd560ab3ea8a9d6a491a77c28 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Fri, 31 Jul 2009 07:47:17 +0000 Subject: sh: Runtime PM pdev hwblk These patches extend struct platform device data for a bunch of SuperH Mobile processors and embedded boards. The patches simply add hardware block ids to on-chip platform devices. Platform devices off chip (such as external ethernet controllers or flash chips) are left out which gives them a special case hardware block id of zero. Upcoming Runtime PM code will make use of the hardware block id to group devices together. The hardware block id can also be used to extend the SuperH Mobile clock framework implementation. This series of patches depend on the following: "Driver Core: Add platform device arch data V3". This patch adds a hwblk_id member to struct pdev_archdata. This member should be used to point out on-chip hardware block id. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/include/asm/device.h | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/sh/include/asm/device.h b/arch/sh/include/asm/device.h index 8688a88..783ecdc 100644 --- a/arch/sh/include/asm/device.h +++ b/arch/sh/include/asm/device.h @@ -3,7 +3,9 @@ * * This file is released under the GPLv2 */ -#include + +struct dev_archdata { +}; struct platform_device; /* allocate contiguous memory chunk and fill in struct resource */ @@ -12,3 +14,6 @@ int platform_resource_setup_memory(struct platform_device *pdev, void plat_early_device_setup(void); +struct pdev_archdata { + int hwblk_id; +}; -- cgit v1.1 From f69d5782742e57d5f047ed13440bcf1b320cb074 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Fri, 31 Jul 2009 07:47:27 +0000 Subject: sh: Runtime PM pdev hwblk - sh7722 Add hwblk_id to on-chip sh7722 platform devices. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/kernel/cpu/sh4a/setup-sh7722.c | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) (limited to 'arch') diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c index 67b0d87..3509775 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c @@ -17,6 +17,7 @@ #include #include #include +#include static struct resource rtc_resources[] = { [0] = { @@ -46,6 +47,9 @@ static struct platform_device rtc_device = { .id = -1, .num_resources = ARRAY_SIZE(rtc_resources), .resource = rtc_resources, + .archdata = { + .hwblk_id = HWBLK_RTC, + }, }; static struct m66592_platdata usbf_platdata = { @@ -76,6 +80,9 @@ static struct platform_device usbf_device = { }, .num_resources = ARRAY_SIZE(usbf_resources), .resource = usbf_resources, + .archdata = { + .hwblk_id = HWBLK_USBF, + }, }; static struct resource iic_resources[] = { @@ -97,6 +104,9 @@ static struct platform_device iic_device = { .id = 0, /* "i2c0" clock */ .num_resources = ARRAY_SIZE(iic_resources), .resource = iic_resources, + .archdata = { + .hwblk_id = HWBLK_IIC, + }, }; static struct uio_info vpu_platform_data = { @@ -125,6 +135,9 @@ static struct platform_device vpu_device = { }, .resource = vpu_resources, .num_resources = ARRAY_SIZE(vpu_resources), + .archdata = { + .hwblk_id = HWBLK_VPU, + }, }; static struct uio_info veu_platform_data = { @@ -153,6 +166,9 @@ static struct platform_device veu_device = { }, .resource = veu_resources, .num_resources = ARRAY_SIZE(veu_resources), + .archdata = { + .hwblk_id = HWBLK_VEU, + }, }; static struct uio_info jpu_platform_data = { @@ -181,6 +197,9 @@ static struct platform_device jpu_device = { }, .resource = jpu_resources, .num_resources = ARRAY_SIZE(jpu_resources), + .archdata = { + .hwblk_id = HWBLK_JPU, + }, }; static struct sh_timer_config cmt_platform_data = { @@ -213,6 +232,9 @@ static struct platform_device cmt_device = { }, .resource = cmt_resources, .num_resources = ARRAY_SIZE(cmt_resources), + .archdata = { + .hwblk_id = HWBLK_CMT, + }, }; static struct sh_timer_config tmu0_platform_data = { @@ -244,6 +266,9 @@ static struct platform_device tmu0_device = { }, .resource = tmu0_resources, .num_resources = ARRAY_SIZE(tmu0_resources), + .archdata = { + .hwblk_id = HWBLK_TMU, + }, }; static struct sh_timer_config tmu1_platform_data = { @@ -275,6 +300,9 @@ static struct platform_device tmu1_device = { }, .resource = tmu1_resources, .num_resources = ARRAY_SIZE(tmu1_resources), + .archdata = { + .hwblk_id = HWBLK_TMU, + }, }; static struct sh_timer_config tmu2_platform_data = { @@ -305,6 +333,9 @@ static struct platform_device tmu2_device = { }, .resource = tmu2_resources, .num_resources = ARRAY_SIZE(tmu2_resources), + .archdata = { + .hwblk_id = HWBLK_TMU, + }, }; static struct plat_sci_port sci_platform_data[] = { -- cgit v1.1 From 2de63cf376585508f6942aaa7337848f8c4cdd7d Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Fri, 31 Jul 2009 07:47:35 +0000 Subject: sh: Runtime PM pdev hwblk - Migo-R Add hwblk_id to Migo-R board specific on-chip sh7722 platform devices. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/boards/mach-migor/setup.c | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'arch') diff --git a/arch/sh/boards/mach-migor/setup.c b/arch/sh/boards/mach-migor/setup.c index f70f4644..a508a0f 100644 --- a/arch/sh/boards/mach-migor/setup.c +++ b/arch/sh/boards/mach-migor/setup.c @@ -98,6 +98,9 @@ static struct platform_device sh_keysc_device = { .dev = { .platform_data = &sh_keysc_info, }, + .archdata = { + .hwblk_id = HWBLK_KEYSC, + }, }; static struct mtd_partition migor_nor_flash_partitions[] = @@ -292,6 +295,9 @@ static struct platform_device migor_lcdc_device = { .dev = { .platform_data = &sh_mobile_lcdc_info, }, + .archdata = { + .hwblk_id = HWBLK_LCDC, + }, }; static struct clk *camera_clk; @@ -379,6 +385,9 @@ static struct platform_device migor_ceu_device = { .dev = { .platform_data = &sh_mobile_ceu_info, }, + .archdata = { + .hwblk_id = HWBLK_CEU, + }, }; struct spi_gpio_platform_data sdcard_cn9_platform_data = { -- cgit v1.1 From 66d9c51ac434615b0f0e864c0c9a81d7264ca3ef Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Fri, 31 Jul 2009 07:47:44 +0000 Subject: sh: Runtime PM pdev hwblk - Solution Engine 7722 Add hwblk_id to Solution Engine 7722 board specific on-chip sh7722 platform devices. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/boards/mach-se/7722/setup.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch') diff --git a/arch/sh/boards/mach-se/7722/setup.c b/arch/sh/boards/mach-se/7722/setup.c index af84904..3637407 100644 --- a/arch/sh/boards/mach-se/7722/setup.c +++ b/arch/sh/boards/mach-se/7722/setup.c @@ -22,6 +22,7 @@ #include #include #include +#include /* Heartbeat */ static struct heartbeat_data heartbeat_data = { @@ -137,6 +138,9 @@ static struct platform_device sh_keysc_device = { .dev = { .platform_data = &sh_keysc_info, }, + .archdata = { + .hwblk_id = HWBLK_KEYSC, + }, }; static struct platform_device *se7722_devices[] __initdata = { -- cgit v1.1 From 09d21f9c83c49afd04850b8701772c09954054e0 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Fri, 31 Jul 2009 07:47:53 +0000 Subject: sh: Runtime PM pdev hwblk - sh7723 Add hwblk_id to on-chip sh7723 platform devices. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/kernel/cpu/sh4a/setup-sh7723.c | 40 ++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) (limited to 'arch') diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7723.c b/arch/sh/kernel/cpu/sh4a/setup-sh7723.c index 26dc4d3..4caa5a7 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7723.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7723.c @@ -18,6 +18,7 @@ #include #include #include +#include static struct uio_info vpu_platform_data = { .name = "VPU5", @@ -45,6 +46,9 @@ static struct platform_device vpu_device = { }, .resource = vpu_resources, .num_resources = ARRAY_SIZE(vpu_resources), + .archdata = { + .hwblk_id = HWBLK_VPU, + }, }; static struct uio_info veu0_platform_data = { @@ -73,6 +77,9 @@ static struct platform_device veu0_device = { }, .resource = veu0_resources, .num_resources = ARRAY_SIZE(veu0_resources), + .archdata = { + .hwblk_id = HWBLK_VEU2H0, + }, }; static struct uio_info veu1_platform_data = { @@ -101,6 +108,9 @@ static struct platform_device veu1_device = { }, .resource = veu1_resources, .num_resources = ARRAY_SIZE(veu1_resources), + .archdata = { + .hwblk_id = HWBLK_VEU2H1, + }, }; static struct sh_timer_config cmt_platform_data = { @@ -133,6 +143,9 @@ static struct platform_device cmt_device = { }, .resource = cmt_resources, .num_resources = ARRAY_SIZE(cmt_resources), + .archdata = { + .hwblk_id = HWBLK_CMT, + }, }; static struct sh_timer_config tmu0_platform_data = { @@ -164,6 +177,9 @@ static struct platform_device tmu0_device = { }, .resource = tmu0_resources, .num_resources = ARRAY_SIZE(tmu0_resources), + .archdata = { + .hwblk_id = HWBLK_TMU0, + }, }; static struct sh_timer_config tmu1_platform_data = { @@ -195,6 +211,9 @@ static struct platform_device tmu1_device = { }, .resource = tmu1_resources, .num_resources = ARRAY_SIZE(tmu1_resources), + .archdata = { + .hwblk_id = HWBLK_TMU0, + }, }; static struct sh_timer_config tmu2_platform_data = { @@ -225,6 +244,9 @@ static struct platform_device tmu2_device = { }, .resource = tmu2_resources, .num_resources = ARRAY_SIZE(tmu2_resources), + .archdata = { + .hwblk_id = HWBLK_TMU0, + }, }; static struct sh_timer_config tmu3_platform_data = { @@ -255,6 +277,9 @@ static struct platform_device tmu3_device = { }, .resource = tmu3_resources, .num_resources = ARRAY_SIZE(tmu3_resources), + .archdata = { + .hwblk_id = HWBLK_TMU1, + }, }; static struct sh_timer_config tmu4_platform_data = { @@ -285,6 +310,9 @@ static struct platform_device tmu4_device = { }, .resource = tmu4_resources, .num_resources = ARRAY_SIZE(tmu4_resources), + .archdata = { + .hwblk_id = HWBLK_TMU1, + }, }; static struct sh_timer_config tmu5_platform_data = { @@ -315,6 +343,9 @@ static struct platform_device tmu5_device = { }, .resource = tmu5_resources, .num_resources = ARRAY_SIZE(tmu5_resources), + .archdata = { + .hwblk_id = HWBLK_TMU1, + }, }; static struct plat_sci_port sci_platform_data[] = { @@ -395,6 +426,9 @@ static struct platform_device rtc_device = { .id = -1, .num_resources = ARRAY_SIZE(rtc_resources), .resource = rtc_resources, + .archdata = { + .hwblk_id = HWBLK_RTC, + }, }; static struct r8a66597_platdata r8a66597_data = { @@ -424,6 +458,9 @@ static struct platform_device sh7723_usb_host_device = { }, .num_resources = ARRAY_SIZE(sh7723_usb_host_resources), .resource = sh7723_usb_host_resources, + .archdata = { + .hwblk_id = HWBLK_USB, + }, }; static struct resource iic_resources[] = { @@ -445,6 +482,9 @@ static struct platform_device iic_device = { .id = 0, /* "i2c0" clock */ .num_resources = ARRAY_SIZE(iic_resources), .resource = iic_resources, + .archdata = { + .hwblk_id = HWBLK_IIC, + }, }; static struct platform_device *sh7723_devices[] __initdata = { -- cgit v1.1 From d3a6f6260a3fecd9a8e301fcf37d87ee70edca12 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Fri, 31 Jul 2009 07:48:02 +0000 Subject: sh: Runtime PM pdev hwblk - AP325RXA Add hwblk_id to AP325RXA board specific on-chip sh7723 platform devices. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/boards/board-ap325rxa.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'arch') diff --git a/arch/sh/boards/board-ap325rxa.c b/arch/sh/boards/board-ap325rxa.c index 7ffd1b43..07a5474 100644 --- a/arch/sh/boards/board-ap325rxa.c +++ b/arch/sh/boards/board-ap325rxa.c @@ -227,6 +227,9 @@ static struct platform_device lcdc_device = { .dev = { .platform_data = &lcdc_info, }, + .archdata = { + .hwblk_id = HWBLK_LCDC, + }, }; static void camera_power(int val) @@ -377,6 +380,9 @@ static struct platform_device ceu_device = { .dev = { .platform_data = &sh_mobile_ceu_info, }, + .archdata = { + .hwblk_id = HWBLK_CEU, + }, }; struct spi_gpio_platform_data sdcard_cn3_platform_data = { -- cgit v1.1 From 593a0c898ac2f09f001d536f699966ec4bc1d25f Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Fri, 31 Jul 2009 07:48:11 +0000 Subject: sh: Runtime PM pdev hwblk - sh7724 Add hwblk_id to on-chip sh7724 platform devices. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/kernel/cpu/sh4a/setup-sh7724.c | 43 ++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) (limited to 'arch') diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c index a04edaa..f3851fd 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c @@ -22,6 +22,7 @@ #include #include #include +#include /* Serial */ static struct plat_sci_port sci_platform_data[] = { @@ -103,6 +104,9 @@ static struct platform_device rtc_device = { .id = -1, .num_resources = ARRAY_SIZE(rtc_resources), .resource = rtc_resources, + .archdata = { + .hwblk_id = HWBLK_RTC, + }, }; /* I2C0 */ @@ -125,6 +129,9 @@ static struct platform_device iic0_device = { .id = 0, /* "i2c0" clock */ .num_resources = ARRAY_SIZE(iic0_resources), .resource = iic0_resources, + .archdata = { + .hwblk_id = HWBLK_IIC0, + }, }; /* I2C1 */ @@ -147,6 +154,9 @@ static struct platform_device iic1_device = { .id = 1, /* "i2c1" clock */ .num_resources = ARRAY_SIZE(iic1_resources), .resource = iic1_resources, + .archdata = { + .hwblk_id = HWBLK_IIC1, + }, }; /* VPU */ @@ -176,6 +186,9 @@ static struct platform_device vpu_device = { }, .resource = vpu_resources, .num_resources = ARRAY_SIZE(vpu_resources), + .archdata = { + .hwblk_id = HWBLK_VPU, + }, }; /* VEU0 */ @@ -205,6 +218,9 @@ static struct platform_device veu0_device = { }, .resource = veu0_resources, .num_resources = ARRAY_SIZE(veu0_resources), + .archdata = { + .hwblk_id = HWBLK_VEU0, + }, }; /* VEU1 */ @@ -234,6 +250,9 @@ static struct platform_device veu1_device = { }, .resource = veu1_resources, .num_resources = ARRAY_SIZE(veu1_resources), + .archdata = { + .hwblk_id = HWBLK_VEU1, + }, }; static struct sh_timer_config cmt_platform_data = { @@ -266,6 +285,9 @@ static struct platform_device cmt_device = { }, .resource = cmt_resources, .num_resources = ARRAY_SIZE(cmt_resources), + .archdata = { + .hwblk_id = HWBLK_CMT, + }, }; static struct sh_timer_config tmu0_platform_data = { @@ -297,6 +319,9 @@ static struct platform_device tmu0_device = { }, .resource = tmu0_resources, .num_resources = ARRAY_SIZE(tmu0_resources), + .archdata = { + .hwblk_id = HWBLK_TMU0, + }, }; static struct sh_timer_config tmu1_platform_data = { @@ -328,6 +353,9 @@ static struct platform_device tmu1_device = { }, .resource = tmu1_resources, .num_resources = ARRAY_SIZE(tmu1_resources), + .archdata = { + .hwblk_id = HWBLK_TMU0, + }, }; static struct sh_timer_config tmu2_platform_data = { @@ -358,6 +386,9 @@ static struct platform_device tmu2_device = { }, .resource = tmu2_resources, .num_resources = ARRAY_SIZE(tmu2_resources), + .archdata = { + .hwblk_id = HWBLK_TMU0, + }, }; @@ -389,6 +420,9 @@ static struct platform_device tmu3_device = { }, .resource = tmu3_resources, .num_resources = ARRAY_SIZE(tmu3_resources), + .archdata = { + .hwblk_id = HWBLK_TMU1, + }, }; static struct sh_timer_config tmu4_platform_data = { @@ -419,6 +453,9 @@ static struct platform_device tmu4_device = { }, .resource = tmu4_resources, .num_resources = ARRAY_SIZE(tmu4_resources), + .archdata = { + .hwblk_id = HWBLK_TMU1, + }, }; static struct sh_timer_config tmu5_platform_data = { @@ -449,6 +486,9 @@ static struct platform_device tmu5_device = { }, .resource = tmu5_resources, .num_resources = ARRAY_SIZE(tmu5_resources), + .archdata = { + .hwblk_id = HWBLK_TMU1, + }, }; /* JPU */ @@ -478,6 +518,9 @@ static struct platform_device jpu_device = { }, .resource = jpu_resources, .num_resources = ARRAY_SIZE(jpu_resources), + .archdata = { + .hwblk_id = HWBLK_JPU, + }, }; static struct platform_device *sh7724_devices[] __initdata = { -- cgit v1.1 From 442c37534c6a46d8ed27144fb09818833edbd049 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Fri, 31 Jul 2009 07:48:21 +0000 Subject: sh: Runtime PM pdev hwblk - kfr2r09 Add hwblk_id to kfr2r09 board specific on-chip sh7724 platform devices. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/boards/mach-kfr2r09/setup.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch') diff --git a/arch/sh/boards/mach-kfr2r09/setup.c b/arch/sh/boards/mach-kfr2r09/setup.c index bf5f8f8..0e9b390 100644 --- a/arch/sh/boards/mach-kfr2r09/setup.c +++ b/arch/sh/boards/mach-kfr2r09/setup.c @@ -95,6 +95,9 @@ static struct platform_device kfr2r09_sh_keysc_device = { .dev = { .platform_data = &kfr2r09_sh_keysc_info, }, + .archdata = { + .hwblk_id = HWBLK_KEYSC, + }, }; static struct platform_device *kfr2r09_devices[] __initdata = { -- cgit v1.1 From df47cd096c8f54a5242e3a2ffb4525c804567eda Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Fri, 31 Jul 2009 07:48:29 +0000 Subject: sh: Runtime PM pdev hwblk - Solution Engine 7724 Add hwblk_id to Solution Engine 7724 board specific on-chip sh7724 platform devices. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/boards/mach-se/7724/setup.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) (limited to 'arch') diff --git a/arch/sh/boards/mach-se/7724/setup.c b/arch/sh/boards/mach-se/7724/setup.c index 4fb7e48..8de5ebc 100644 --- a/arch/sh/boards/mach-se/7724/setup.c +++ b/arch/sh/boards/mach-se/7724/setup.c @@ -174,6 +174,9 @@ static struct platform_device lcdc_device = { .dev = { .platform_data = &lcdc_info, }, + .archdata = { + .hwblk_id = HWBLK_LCDC, + }, }; /* CEU0 */ @@ -205,6 +208,9 @@ static struct platform_device ceu0_device = { .dev = { .platform_data = &sh_mobile_ceu0_info, }, + .archdata = { + .hwblk_id = HWBLK_CEU0, + }, }; /* CEU1 */ @@ -236,6 +242,9 @@ static struct platform_device ceu1_device = { .dev = { .platform_data = &sh_mobile_ceu1_info, }, + .archdata = { + .hwblk_id = HWBLK_CEU1, + }, }; /* KEYSC */ @@ -273,6 +282,9 @@ static struct platform_device keysc_device = { .dev = { .platform_data = &keysc_info, }, + .archdata = { + .hwblk_id = HWBLK_KEYSC, + }, }; /* SH Eth */ @@ -301,6 +313,9 @@ static struct platform_device sh_eth_device = { }, .num_resources = ARRAY_SIZE(sh_eth_resources), .resource = sh_eth_resources, + .archdata = { + .hwblk_id = HWBLK_ETHER, + }, }; static struct r8a66597_platdata sh7724_usb0_host_data = { @@ -330,6 +345,9 @@ static struct platform_device sh7724_usb0_host_device = { }, .num_resources = ARRAY_SIZE(sh7724_usb0_host_resources), .resource = sh7724_usb0_host_resources, + .archdata = { + .hwblk_id = HWBLK_USB0, + }, }; static struct platform_device *ms7724se_devices[] __initdata = { -- cgit v1.1 From 11d82905e0159c07fe2d1bfe5e7d80e4cea333ce Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 4 Aug 2009 15:54:33 +0900 Subject: sh: Fix up early printk build error. Missing endif in the early printk case, fix it up.. Signed-off-by: Paul Mundt --- arch/sh/kernel/early_printk.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/sh/kernel/early_printk.c b/arch/sh/kernel/early_printk.c index 64f2746..81a4614 100644 --- a/arch/sh/kernel/early_printk.c +++ b/arch/sh/kernel/early_printk.c @@ -223,6 +223,7 @@ static int __init setup_early_printk(char *buf) #if defined(CONFIG_CPU_SH4) || defined(CONFIG_CPU_SH3) scif_sercon_init(buf + 6); #endif +#endif } #endif -- cgit v1.1 From 700487c158163f14e6ff10de770b565c1c993c69 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 4 Aug 2009 15:57:44 +0900 Subject: sh: Add a PG_dcache_dirty sanity check in kmap_coherent(). This plugs in a BUG_ON() in kmap_coherent() for PG_dcache_dirty pages to catch when things go horribly wrong. Copied from the MIPS implementation. Signed-off-by: Paul Mundt --- arch/sh/mm/pg-mmu.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch') diff --git a/arch/sh/mm/pg-mmu.c b/arch/sh/mm/pg-mmu.c index 8602f68..3235120 100644 --- a/arch/sh/mm/pg-mmu.c +++ b/arch/sh/mm/pg-mmu.c @@ -37,6 +37,8 @@ static inline void *kmap_coherent(struct page *page, unsigned long addr) unsigned long vaddr, flags; pte_t pte; + BUG_ON(test_bit(PG_dcache_dirty, &page->flags)); + inc_preempt_count(); idx = (addr & current_cpu_data.dcache.alias_mask) >> PAGE_SHIFT; -- cgit v1.1 From 222db3e5f21fca563f5f692e000afcc01cb4395c Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 4 Aug 2009 15:59:15 +0900 Subject: sh: Bring kmap_coherent() out-of-line. kmap_coherent() has gotten too big to leave as an inline, so we bring it out-of-line. Signed-off-by: Paul Mundt --- arch/sh/mm/pg-mmu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/sh/mm/pg-mmu.c b/arch/sh/mm/pg-mmu.c index 3235120..e5c5122 100644 --- a/arch/sh/mm/pg-mmu.c +++ b/arch/sh/mm/pg-mmu.c @@ -31,7 +31,7 @@ void __init kmap_coherent_init(void) #endif } -static inline void *kmap_coherent(struct page *page, unsigned long addr) +static void *kmap_coherent(struct page *page, unsigned long addr) { enum fixed_addresses idx; unsigned long vaddr, flags; -- cgit v1.1 From b5eb10ae901fa797c19accb684825f0e36ecbe0f Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 4 Aug 2009 16:00:36 +0900 Subject: sh: Drop unused arguments for kunmap_coherent(). kunmap_coherent() doesn't do anything with its arguments, so just kill them off. Signed-off-by: Paul Mundt --- arch/sh/mm/pg-mmu.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/sh/mm/pg-mmu.c b/arch/sh/mm/pg-mmu.c index e5c5122..a9ede7b 100644 --- a/arch/sh/mm/pg-mmu.c +++ b/arch/sh/mm/pg-mmu.c @@ -56,7 +56,7 @@ static void *kmap_coherent(struct page *page, unsigned long addr) return (void *)vaddr; } -static inline void kunmap_coherent(struct page *page) +static inline void kunmap_coherent(void) { dec_preempt_count(); preempt_check_resched(); @@ -70,7 +70,7 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page, !test_bit(PG_dcache_dirty, &page->flags)) { void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); memcpy(vto, src, len); - kunmap_coherent(vto); + kunmap_coherent(); } else { memcpy(dst, src, len); if (boot_cpu_data.dcache.n_aliases) @@ -89,7 +89,7 @@ void copy_from_user_page(struct vm_area_struct *vma, struct page *page, !test_bit(PG_dcache_dirty, &page->flags)) { void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); memcpy(dst, vfrom, len); - kunmap_coherent(vfrom); + kunmap_coherent(); } else { memcpy(dst, src, len); if (boot_cpu_data.dcache.n_aliases) @@ -108,7 +108,7 @@ void copy_user_highpage(struct page *to, struct page *from, !test_bit(PG_dcache_dirty, &from->flags)) { vfrom = kmap_coherent(from, vaddr); copy_page(vto, vfrom); - kunmap_coherent(vfrom); + kunmap_coherent(); } else { vfrom = kmap_atomic(from, KM_USER0); copy_page(vto, vfrom); -- cgit v1.1 From c0fe478dbb14fd32e71d1383dbe302b54ce94134 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 4 Aug 2009 16:02:43 +0900 Subject: sh: Provide __flush_anon_page(). This provides a __flush_anon_page() that handles both the aliasing and non-aliasing cases. This fixes up some crashes with heavy get_user_pages() users. Signed-off-by: Paul Mundt --- arch/sh/include/asm/cacheflush.h | 14 ++++++++++++++ arch/sh/mm/pg-mmu.c | 17 +++++++++++++++++ 2 files changed, 31 insertions(+) (limited to 'arch') diff --git a/arch/sh/include/asm/cacheflush.h b/arch/sh/include/asm/cacheflush.h index 4c85d55..5dffbd1 100644 --- a/arch/sh/include/asm/cacheflush.h +++ b/arch/sh/include/asm/cacheflush.h @@ -1,6 +1,8 @@ #ifndef __ASM_SH_CACHEFLUSH_H #define __ASM_SH_CACHEFLUSH_H +#include + #ifdef __KERNEL__ #ifdef CONFIG_CACHE_OFF @@ -43,6 +45,18 @@ extern void __flush_purge_region(void *start, int size); extern void __flush_invalidate_region(void *start, int size); #endif +#ifdef CONFIG_MMU +#define ARCH_HAS_FLUSH_ANON_PAGE +extern void __flush_anon_page(struct page *page, unsigned long); + +static inline void flush_anon_page(struct vm_area_struct *vma, + struct page *page, unsigned long vmaddr) +{ + if (boot_cpu_data.dcache.n_aliases && PageAnon(page)) + __flush_anon_page(page, vmaddr); +} +#endif + #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE static inline void flush_kernel_dcache_page(struct page *page) { diff --git a/arch/sh/mm/pg-mmu.c b/arch/sh/mm/pg-mmu.c index a9ede7b..027c4d8 100644 --- a/arch/sh/mm/pg-mmu.c +++ b/arch/sh/mm/pg-mmu.c @@ -157,3 +157,20 @@ void __update_cache(struct vm_area_struct *vma, } } } + +void __flush_anon_page(struct page *page, unsigned long vmaddr) +{ + unsigned long addr = (unsigned long) page_address(page); + + if (pages_do_alias(addr, vmaddr)) { + if (boot_cpu_data.dcache.n_aliases && page_mapped(page) && + !test_bit(PG_dcache_dirty, &page->flags)) { + void *kaddr; + + kaddr = kmap_coherent(page, vmaddr); + __flush_wback_region((void *)kaddr, PAGE_SIZE); + kunmap_coherent(); + } else + __flush_wback_region((void *)addr, PAGE_SIZE); + } +} -- cgit v1.1 From c7914834ef3b8a396b7e82ea34ac07cdcfe6f868 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 4 Aug 2009 17:14:39 +0900 Subject: sh: Tidy up NEFF-based sign extension for SH-5. This consolidates all of the NEFF-based sign extension for SH-5. In the future the other SH code will need to make use of this as well, so make it generic in preparation for more 32/64 consolidation. Signed-off-by: Paul Mundt --- arch/sh/include/asm/pgtable.h | 6 ++++++ arch/sh/kernel/process_64.c | 24 ++++++++++-------------- arch/sh/kernel/signal_64.c | 38 +++++++++++++------------------------- arch/sh/mm/fault_64.c | 11 +---------- arch/sh/mm/tlb-sh5.c | 21 +++++---------------- arch/sh/mm/tlbflush_64.c | 2 +- 6 files changed, 36 insertions(+), 66 deletions(-) (limited to 'arch') diff --git a/arch/sh/include/asm/pgtable.h b/arch/sh/include/asm/pgtable.h index 43ef3e9..3cd7127 100644 --- a/arch/sh/include/asm/pgtable.h +++ b/arch/sh/include/asm/pgtable.h @@ -36,6 +36,12 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; #define NEFF_SIGN (1LL << (NEFF - 1)) #define NEFF_MASK (-1LL << NEFF) +static inline unsigned long long neff_sign_extend(unsigned long val) +{ + unsigned long long extended = val; + return (extended & NEFF_SIGN) ? (extended | NEFF_MASK) : extended; +} + #ifdef CONFIG_29BIT #define NPHYS 29 #else diff --git a/arch/sh/kernel/process_64.c b/arch/sh/kernel/process_64.c index 24de742..1192398 100644 --- a/arch/sh/kernel/process_64.c +++ b/arch/sh/kernel/process_64.c @@ -425,7 +425,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, struct task_struct *p, struct pt_regs *regs) { struct pt_regs *childregs; - unsigned long long se; /* Sign extension */ #ifdef CONFIG_SH_FPU if(last_task_used_math == current) { @@ -441,11 +440,19 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, *childregs = *regs; + /* + * Sign extend the edited stack. + * Note that thread.pc and thread.pc will stay + * 32-bit wide and context switch must take care + * of NEFF sign extension. + */ if (user_mode(regs)) { - childregs->regs[15] = usp; + childregs->regs[15] = neff_sign_extend(usp); p->thread.uregs = childregs; } else { - childregs->regs[15] = (unsigned long)task_stack_page(p) + THREAD_SIZE; + childregs->regs[15] = + neff_sign_extend((unsigned long)task_stack_page(p) + + THREAD_SIZE); } childregs->regs[9] = 0; /* Set return value for child */ @@ -454,17 +461,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, p->thread.sp = (unsigned long) childregs; p->thread.pc = (unsigned long) ret_from_fork; - /* - * Sign extend the edited stack. - * Note that thread.pc and thread.pc will stay - * 32-bit wide and context switch must take care - * of NEFF sign extension. - */ - - se = childregs->regs[15]; - se = (se & NEFF_SIGN) ? (se | NEFF_MASK) : se; - childregs->regs[15] = se; - return 0; } diff --git a/arch/sh/kernel/signal_64.c b/arch/sh/kernel/signal_64.c index 0663a0e..026fd1c 100644 --- a/arch/sh/kernel/signal_64.c +++ b/arch/sh/kernel/signal_64.c @@ -561,13 +561,11 @@ static int setup_frame(int sig, struct k_sigaction *ka, /* Set up to return from userspace. If provided, use a stub already in userspace. */ if (ka->sa.sa_flags & SA_RESTORER) { - DEREF_REG_PR = (unsigned long) ka->sa.sa_restorer | 0x1; - /* * On SH5 all edited pointers are subject to NEFF */ - DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ? - (DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR; + DEREF_REG_PR = neff_sign_extend((unsigned long) + ka->sa.sa_restorer | 0x1); } else { /* * Different approach on SH5. @@ -580,9 +578,8 @@ static int setup_frame(int sig, struct k_sigaction *ka, * . being code, linker turns ShMedia bit on, always * dereference index -1. */ - DEREF_REG_PR = (unsigned long) frame->retcode | 0x01; - DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ? - (DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR; + DEREF_REG_PR = neff_sign_extend((unsigned long) + frame->retcode | 0x01); if (__copy_to_user(frame->retcode, (void *)((unsigned long)sa_default_restorer & (~1)), 16) != 0) @@ -596,9 +593,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, * Set up registers for signal handler. * All edited pointers are subject to NEFF. */ - regs->regs[REG_SP] = (unsigned long) frame; - regs->regs[REG_SP] = (regs->regs[REG_SP] & NEFF_SIGN) ? - (regs->regs[REG_SP] | NEFF_MASK) : regs->regs[REG_SP]; + regs->regs[REG_SP] = neff_sign_extend((unsigned long)frame); regs->regs[REG_ARG1] = signal; /* Arg for signal handler */ /* FIXME: @@ -613,8 +608,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, regs->regs[REG_ARG2] = (unsigned long long)(unsigned long)(signed long)&frame->sc; regs->regs[REG_ARG3] = (unsigned long long)(unsigned long)(signed long)&frame->sc; - regs->pc = (unsigned long) ka->sa.sa_handler; - regs->pc = (regs->pc & NEFF_SIGN) ? (regs->pc | NEFF_MASK) : regs->pc; + regs->pc = neff_sign_extend((unsigned long)ka->sa.sa_handler); set_fs(USER_DS); @@ -676,13 +670,11 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, /* Set up to return from userspace. If provided, use a stub already in userspace. */ if (ka->sa.sa_flags & SA_RESTORER) { - DEREF_REG_PR = (unsigned long) ka->sa.sa_restorer | 0x1; - /* * On SH5 all edited pointers are subject to NEFF */ - DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ? - (DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR; + DEREF_REG_PR = neff_sign_extend((unsigned long) + ka->sa.sa_restorer | 0x1); } else { /* * Different approach on SH5. @@ -695,15 +687,14 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, * . being code, linker turns ShMedia bit on, always * dereference index -1. */ - - DEREF_REG_PR = (unsigned long) frame->retcode | 0x01; - DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ? - (DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR; + DEREF_REG_PR = neff_sign_extend((unsigned long) + frame->retcode | 0x01); if (__copy_to_user(frame->retcode, (void *)((unsigned long)sa_default_rt_restorer & (~1)), 16) != 0) goto give_sigsegv; + /* Cohere the trampoline with the I-cache. */ flush_icache_range(DEREF_REG_PR-1, DEREF_REG_PR-1+15); } @@ -711,14 +702,11 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, * Set up registers for signal handler. * All edited pointers are subject to NEFF. */ - regs->regs[REG_SP] = (unsigned long) frame; - regs->regs[REG_SP] = (regs->regs[REG_SP] & NEFF_SIGN) ? - (regs->regs[REG_SP] | NEFF_MASK) : regs->regs[REG_SP]; + regs->regs[REG_SP] = neff_sign_extend((unsigned long)frame); regs->regs[REG_ARG1] = signal; /* Arg for signal handler */ regs->regs[REG_ARG2] = (unsigned long long)(unsigned long)(signed long)&frame->info; regs->regs[REG_ARG3] = (unsigned long long)(unsigned long)(signed long)&frame->uc.uc_mcontext; - regs->pc = (unsigned long) ka->sa.sa_handler; - regs->pc = (regs->pc & NEFF_SIGN) ? (regs->pc | NEFF_MASK) : regs->pc; + regs->pc = neff_sign_extend((unsigned long)ka->sa.sa_handler); set_fs(USER_DS); diff --git a/arch/sh/mm/fault_64.c b/arch/sh/mm/fault_64.c index bd63b96..2b356ce 100644 --- a/arch/sh/mm/fault_64.c +++ b/arch/sh/mm/fault_64.c @@ -56,16 +56,7 @@ inline void __do_tlb_refill(unsigned long address, /* * Set PTEH register */ - pteh = address & MMU_VPN_MASK; - - /* Sign extend based on neff. */ -#if (NEFF == 32) - /* Faster sign extension */ - pteh = (unsigned long long)(signed long long)(signed long)pteh; -#else - /* General case */ - pteh = (pteh & NEFF_SIGN) ? (pteh | NEFF_MASK) : pteh; -#endif + pteh = neff_sign_extend(address & MMU_VPN_MASK); /* Set the ASID. */ pteh |= get_asid() << PTEH_ASID_SHIFT; diff --git a/arch/sh/mm/tlb-sh5.c b/arch/sh/mm/tlb-sh5.c index dae1312..fdb64e4 100644 --- a/arch/sh/mm/tlb-sh5.c +++ b/arch/sh/mm/tlb-sh5.c @@ -117,26 +117,15 @@ int sh64_put_wired_dtlb_entry(unsigned long long entry) * Load up a virtual<->physical translation for @eaddr<->@paddr in the * pre-allocated TLB slot @config_addr (see sh64_get_wired_dtlb_entry). */ -inline void sh64_setup_tlb_slot(unsigned long long config_addr, - unsigned long eaddr, - unsigned long asid, - unsigned long paddr) +void sh64_setup_tlb_slot(unsigned long long config_addr, unsigned long eaddr, + unsigned long asid, unsigned long paddr) { unsigned long long pteh, ptel; - /* Sign extension */ -#if (NEFF == 32) - pteh = (unsigned long long)(signed long long)(signed long) eaddr; -#else -#error "Can't sign extend more than 32 bits yet" -#endif + pteh = neff_sign_extend(eaddr); pteh &= PAGE_MASK; pteh |= (asid << PTEH_ASID_SHIFT) | PTEH_VALID; -#if (NEFF == 32) - ptel = (unsigned long long)(signed long long)(signed long) paddr; -#else -#error "Can't sign extend more than 32 bits yet" -#endif + ptel = neff_sign_extend(paddr); ptel &= PAGE_MASK; ptel |= (_PAGE_CACHABLE | _PAGE_READ | _PAGE_WRITE); @@ -152,5 +141,5 @@ inline void sh64_setup_tlb_slot(unsigned long long config_addr, * * Teardown any existing mapping in the TLB slot @config_addr. */ -inline void sh64_teardown_tlb_slot(unsigned long long config_addr) +void sh64_teardown_tlb_slot(unsigned long long config_addr) __attribute__ ((alias("__flush_tlb_slot"))); diff --git a/arch/sh/mm/tlbflush_64.c b/arch/sh/mm/tlbflush_64.c index f2e44e9..fa5a95a 100644 --- a/arch/sh/mm/tlbflush_64.c +++ b/arch/sh/mm/tlbflush_64.c @@ -337,7 +337,7 @@ void local_flush_tlb_one(unsigned long asid, unsigned long page) /* * Sign-extend based on neff. */ - lpage = (page & NEFF_SIGN) ? (page | NEFF_MASK) : page; + lpage = neff_sign_extend(page); match = (asid << PTEH_ASID_SHIFT) | PTEH_VALID; match |= lpage; -- cgit v1.1 From d14d751ff9234595639a16e53b3cf0c575946bde Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 4 Aug 2009 17:17:00 +0900 Subject: sh64: Kill off special clear_page() implementation. This can use the now generic clear_page() implementation, which is backed by the sh64 optimized memset routine. This also fixes up the case where PAGE_SIZE != 4kB. Signed-off-by: Paul Mundt --- arch/sh/kernel/sh_ksyms_64.c | 1 - arch/sh/lib64/Makefile | 2 +- arch/sh/lib64/clear_page.S | 54 -------------------------------------------- 3 files changed, 1 insertion(+), 56 deletions(-) delete mode 100644 arch/sh/lib64/clear_page.S (limited to 'arch') diff --git a/arch/sh/kernel/sh_ksyms_64.c b/arch/sh/kernel/sh_ksyms_64.c index f5bd156..f96c95c 100644 --- a/arch/sh/kernel/sh_ksyms_64.c +++ b/arch/sh/kernel/sh_ksyms_64.c @@ -52,7 +52,6 @@ EXPORT_SYMBOL(__get_user_asm_l); EXPORT_SYMBOL(__get_user_asm_q); EXPORT_SYMBOL(__strnlen_user); EXPORT_SYMBOL(__strncpy_from_user); -EXPORT_SYMBOL(clear_page); EXPORT_SYMBOL(__clear_user); EXPORT_SYMBOL(copy_page); EXPORT_SYMBOL(__copy_user); diff --git a/arch/sh/lib64/Makefile b/arch/sh/lib64/Makefile index 334bb2d..1fee75a 100644 --- a/arch/sh/lib64/Makefile +++ b/arch/sh/lib64/Makefile @@ -11,7 +11,7 @@ # Panic should really be compiled as PIC lib-y := udelay.o dbg.o panic.o memcpy.o memset.o \ - copy_user_memcpy.o copy_page.o clear_page.o strcpy.o strlen.o + copy_user_memcpy.o copy_page.o strcpy.o strlen.o # Extracted from libgcc lib-y += udivsi3.o udivdi3.o sdivsi3.o diff --git a/arch/sh/lib64/clear_page.S b/arch/sh/lib64/clear_page.S deleted file mode 100644 index 007ab48..0000000 --- a/arch/sh/lib64/clear_page.S +++ /dev/null @@ -1,54 +0,0 @@ -/* - Copyright 2003 Richard Curnow, SuperH (UK) Ltd. - - This file is subject to the terms and conditions of the GNU General Public - License. See the file "COPYING" in the main directory of this archive - for more details. - - Tight version of memset for the case of just clearing a page. It turns out - that having the alloco's spaced out slightly due to the increment/branch - pair causes them to contend less for access to the cache. Similarly, - keeping the stores apart from the allocos causes less contention. => Do two - separate loops. Do multiple stores per loop to amortise the - increment/branch cost a little. - - Parameters: - r2 : source effective address (start of page) - - Always clears 4096 bytes. - - Note : alloco guarded by synco to avoid TAKum03020 erratum - -*/ - - .section .text..SHmedia32,"ax" - .little - - .balign 8 - .global clear_page -clear_page: - pta/l 1f, tr1 - pta/l 2f, tr2 - ptabs/l r18, tr0 - - movi 4096, r7 - add r2, r7, r7 - add r2, r63, r6 -1: - alloco r6, 0 - synco ! TAKum03020 - addi r6, 32, r6 - bgt/l r7, r6, tr1 - - add r2, r63, r6 -2: - st.q r6, 0, r63 - st.q r6, 8, r63 - st.q r6, 16, r63 - st.q r6, 24, r63 - addi r6, 32, r6 - bgt/l r7, r6, tr2 - - blink tr0, r63 - - -- cgit v1.1 From 817425275271f2514f0dc6952182aa057ce80973 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 4 Aug 2009 18:06:01 +0900 Subject: sh: Split out SH-4 __flush_xxx_region() ops. This splits out the SH-4 __flush_xxx_region() functions and defines them as weak symbols. This allows us to provide optimized versions without having to ifdef cache-sh4.c to death. Signed-off-by: Paul Mundt --- arch/sh/mm/Makefile_32 | 2 +- arch/sh/mm/cache-sh4.c | 60 ----------------------------------------------- arch/sh/mm/flush-sh4.c | 63 ++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 64 insertions(+), 61 deletions(-) create mode 100644 arch/sh/mm/flush-sh4.c (limited to 'arch') diff --git a/arch/sh/mm/Makefile_32 b/arch/sh/mm/Makefile_32 index 62e2807..17b0252 100644 --- a/arch/sh/mm/Makefile_32 +++ b/arch/sh/mm/Makefile_32 @@ -8,7 +8,7 @@ ifndef CONFIG_CACHE_OFF cache-$(CONFIG_CPU_SH2) := cache-sh2.o cache-$(CONFIG_CPU_SH2A) := cache-sh2a.o cache-$(CONFIG_CPU_SH3) := cache-sh3.o -cache-$(CONFIG_CPU_SH4) := cache-sh4.o +cache-$(CONFIG_CPU_SH4) := cache-sh4.o flush-sh4.o cache-$(CONFIG_SH7705_CACHE_32KB) += cache-sh7705.o endif diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c index c3a09b2..dfc1d03 100644 --- a/arch/sh/mm/cache-sh4.c +++ b/arch/sh/mm/cache-sh4.c @@ -119,66 +119,6 @@ void __init p3_cache_init(void) } /* - * Write back the dirty D-caches, but not invalidate them. - * - * START: Virtual Address (U0, P1, or P3) - * SIZE: Size of the region. - */ -void __flush_wback_region(void *start, int size) -{ - unsigned long v; - unsigned long begin, end; - - begin = (unsigned long)start & ~(L1_CACHE_BYTES-1); - end = ((unsigned long)start + size + L1_CACHE_BYTES-1) - & ~(L1_CACHE_BYTES-1); - for (v = begin; v < end; v+=L1_CACHE_BYTES) { - asm volatile("ocbwb %0" - : /* no output */ - : "m" (__m(v))); - } -} - -/* - * Write back the dirty D-caches and invalidate them. - * - * START: Virtual Address (U0, P1, or P3) - * SIZE: Size of the region. - */ -void __flush_purge_region(void *start, int size) -{ - unsigned long v; - unsigned long begin, end; - - begin = (unsigned long)start & ~(L1_CACHE_BYTES-1); - end = ((unsigned long)start + size + L1_CACHE_BYTES-1) - & ~(L1_CACHE_BYTES-1); - for (v = begin; v < end; v+=L1_CACHE_BYTES) { - asm volatile("ocbp %0" - : /* no output */ - : "m" (__m(v))); - } -} - -/* - * No write back please - */ -void __flush_invalidate_region(void *start, int size) -{ - unsigned long v; - unsigned long begin, end; - - begin = (unsigned long)start & ~(L1_CACHE_BYTES-1); - end = ((unsigned long)start + size + L1_CACHE_BYTES-1) - & ~(L1_CACHE_BYTES-1); - for (v = begin; v < end; v+=L1_CACHE_BYTES) { - asm volatile("ocbi %0" - : /* no output */ - : "m" (__m(v))); - } -} - -/* * Write back the range of D-cache, and purge the I-cache. * * Called from kernel/module.c:sys_init_module and routine for a.out format, diff --git a/arch/sh/mm/flush-sh4.c b/arch/sh/mm/flush-sh4.c new file mode 100644 index 0000000..e6d918f --- /dev/null +++ b/arch/sh/mm/flush-sh4.c @@ -0,0 +1,63 @@ +#include +#include +#include + +/* + * Write back the dirty D-caches, but not invalidate them. + * + * START: Virtual Address (U0, P1, or P3) + * SIZE: Size of the region. + */ +void __weak __flush_wback_region(void *start, int size) +{ + unsigned long v; + unsigned long begin, end; + + begin = (unsigned long)start & ~(L1_CACHE_BYTES-1); + end = ((unsigned long)start + size + L1_CACHE_BYTES-1) + & ~(L1_CACHE_BYTES-1); + for (v = begin; v < end; v+=L1_CACHE_BYTES) { + asm volatile("ocbwb %0" + : /* no output */ + : "m" (__m(v))); + } +} + +/* + * Write back the dirty D-caches and invalidate them. + * + * START: Virtual Address (U0, P1, or P3) + * SIZE: Size of the region. + */ +void __weak __flush_purge_region(void *start, int size) +{ + unsigned long v; + unsigned long begin, end; + + begin = (unsigned long)start & ~(L1_CACHE_BYTES-1); + end = ((unsigned long)start + size + L1_CACHE_BYTES-1) + & ~(L1_CACHE_BYTES-1); + for (v = begin; v < end; v+=L1_CACHE_BYTES) { + asm volatile("ocbp %0" + : /* no output */ + : "m" (__m(v))); + } +} + +/* + * No write back please + */ +void __weak __flush_invalidate_region(void *start, int size) +{ + unsigned long v; + unsigned long begin, end; + + begin = (unsigned long)start & ~(L1_CACHE_BYTES-1); + end = ((unsigned long)start + size + L1_CACHE_BYTES-1) + & ~(L1_CACHE_BYTES-1); + for (v = begin; v < end; v+=L1_CACHE_BYTES) { + asm volatile("ocbi %0" + : /* no output */ + : "m" (__m(v))); + } +} -- cgit v1.1 From 0837f52463583f76670ab2350e0f1541cb0351f5 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 4 Aug 2009 18:09:54 +0900 Subject: sh: Partially unroll the SH-4 __flush_xxx_region() flushers. This does a bit of unrolling for the SH-4 region flushers. Based on an earlier patch by SUGIOKA Toshinobu. Signed-off-by: Paul Mundt --- arch/sh/mm/flush-sh4.c | 104 +++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 83 insertions(+), 21 deletions(-) (limited to 'arch') diff --git a/arch/sh/mm/flush-sh4.c b/arch/sh/mm/flush-sh4.c index e6d918f..edefc53 100644 --- a/arch/sh/mm/flush-sh4.c +++ b/arch/sh/mm/flush-sh4.c @@ -10,16 +10,37 @@ */ void __weak __flush_wback_region(void *start, int size) { - unsigned long v; - unsigned long begin, end; + unsigned long v, cnt, end; - begin = (unsigned long)start & ~(L1_CACHE_BYTES-1); + v = (unsigned long)start & ~(L1_CACHE_BYTES-1); end = ((unsigned long)start + size + L1_CACHE_BYTES-1) & ~(L1_CACHE_BYTES-1); - for (v = begin; v < end; v+=L1_CACHE_BYTES) { - asm volatile("ocbwb %0" - : /* no output */ - : "m" (__m(v))); + cnt = (end - v) / L1_CACHE_BYTES; + + while (cnt >= 8) { + asm volatile("ocbwb @%0" : : "r" (v)); + v += L1_CACHE_BYTES; + asm volatile("ocbwb @%0" : : "r" (v)); + v += L1_CACHE_BYTES; + asm volatile("ocbwb @%0" : : "r" (v)); + v += L1_CACHE_BYTES; + asm volatile("ocbwb @%0" : : "r" (v)); + v += L1_CACHE_BYTES; + asm volatile("ocbwb @%0" : : "r" (v)); + v += L1_CACHE_BYTES; + asm volatile("ocbwb @%0" : : "r" (v)); + v += L1_CACHE_BYTES; + asm volatile("ocbwb @%0" : : "r" (v)); + v += L1_CACHE_BYTES; + asm volatile("ocbwb @%0" : : "r" (v)); + v += L1_CACHE_BYTES; + cnt -= 8; + } + + while (cnt) { + asm volatile("ocbwb @%0" : : "r" (v)); + v += L1_CACHE_BYTES; + cnt--; } } @@ -31,16 +52,36 @@ void __weak __flush_wback_region(void *start, int size) */ void __weak __flush_purge_region(void *start, int size) { - unsigned long v; - unsigned long begin, end; + unsigned long v, cnt, end; - begin = (unsigned long)start & ~(L1_CACHE_BYTES-1); + v = (unsigned long)start & ~(L1_CACHE_BYTES-1); end = ((unsigned long)start + size + L1_CACHE_BYTES-1) & ~(L1_CACHE_BYTES-1); - for (v = begin; v < end; v+=L1_CACHE_BYTES) { - asm volatile("ocbp %0" - : /* no output */ - : "m" (__m(v))); + cnt = (end - v) / L1_CACHE_BYTES; + + while (cnt >= 8) { + asm volatile("ocbp @%0" : : "r" (v)); + v += L1_CACHE_BYTES; + asm volatile("ocbp @%0" : : "r" (v)); + v += L1_CACHE_BYTES; + asm volatile("ocbp @%0" : : "r" (v)); + v += L1_CACHE_BYTES; + asm volatile("ocbp @%0" : : "r" (v)); + v += L1_CACHE_BYTES; + asm volatile("ocbp @%0" : : "r" (v)); + v += L1_CACHE_BYTES; + asm volatile("ocbp @%0" : : "r" (v)); + v += L1_CACHE_BYTES; + asm volatile("ocbp @%0" : : "r" (v)); + v += L1_CACHE_BYTES; + asm volatile("ocbp @%0" : : "r" (v)); + v += L1_CACHE_BYTES; + cnt -= 8; + } + while (cnt) { + asm volatile("ocbp @%0" : : "r" (v)); + v += L1_CACHE_BYTES; + cnt--; } } @@ -49,15 +90,36 @@ void __weak __flush_purge_region(void *start, int size) */ void __weak __flush_invalidate_region(void *start, int size) { - unsigned long v; - unsigned long begin, end; + unsigned long v, cnt, end; - begin = (unsigned long)start & ~(L1_CACHE_BYTES-1); + v = (unsigned long)start & ~(L1_CACHE_BYTES-1); end = ((unsigned long)start + size + L1_CACHE_BYTES-1) & ~(L1_CACHE_BYTES-1); - for (v = begin; v < end; v+=L1_CACHE_BYTES) { - asm volatile("ocbi %0" - : /* no output */ - : "m" (__m(v))); + cnt = (end - v) / L1_CACHE_BYTES; + + while (cnt >= 8) { + asm volatile("ocbi @%0" : : "r" (v)); + v += L1_CACHE_BYTES; + asm volatile("ocbi @%0" : : "r" (v)); + v += L1_CACHE_BYTES; + asm volatile("ocbi @%0" : : "r" (v)); + v += L1_CACHE_BYTES; + asm volatile("ocbi @%0" : : "r" (v)); + v += L1_CACHE_BYTES; + asm volatile("ocbi @%0" : : "r" (v)); + v += L1_CACHE_BYTES; + asm volatile("ocbi @%0" : : "r" (v)); + v += L1_CACHE_BYTES; + asm volatile("ocbi @%0" : : "r" (v)); + v += L1_CACHE_BYTES; + asm volatile("ocbi @%0" : : "r" (v)); + v += L1_CACHE_BYTES; + cnt -= 8; + } + + while (cnt) { + asm volatile("ocbi @%0" : : "r" (v)); + v += L1_CACHE_BYTES; + cnt--; } } -- cgit v1.1 From 19943b0e30b05d42e494ae6fef78156ebc8c637e Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Tue, 4 Aug 2009 16:19:20 +0100 Subject: intel-iommu: Unify hardware and software passthrough support This makes the hardware passthrough mode work a lot more like the software version, so that the behaviour of a kernel with 'iommu=pt' is the same whether the hardware supports passthrough or not. In particular: - We use a single si_domain for the pass-through devices. - 32-bit devices can be taken out of the pass-through domain so that they don't have to use swiotlb. - Devices will work again after being removed from a KVM guest. - A potential oops on OOM (in init_context_pass_through()) is fixed. Signed-off-by: David Woodhouse --- arch/x86/kernel/pci-swiotlb.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c index 6af96ee..1e66b18 100644 --- a/arch/x86/kernel/pci-swiotlb.c +++ b/arch/x86/kernel/pci-swiotlb.c @@ -71,9 +71,8 @@ void __init pci_swiotlb_init(void) { /* don't initialize swiotlb if iommu=off (no_iommu=1) */ #ifdef CONFIG_X86_64 - if ((!iommu_detected && !no_iommu && max_pfn > MAX_DMA32_PFN) || - iommu_pass_through) - swiotlb = 1; + if ((!iommu_detected && !no_iommu && max_pfn > MAX_DMA32_PFN)) + swiotlb = 1; #endif if (swiotlb_force) swiotlb = 1; -- cgit v1.1 From c1155e34998bc07937cdf0c9db16b6902633a255 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 4 Aug 2009 16:55:29 +0200 Subject: sh: convert to asm-generic/hardirq.h Signed-off-by: Christoph Hellwig Signed-off-by: Paul Mundt --- arch/sh/include/asm/hardirq.h | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) (limited to 'arch') diff --git a/arch/sh/include/asm/hardirq.h b/arch/sh/include/asm/hardirq.h index 715ee23..a5be4af 100644 --- a/arch/sh/include/asm/hardirq.h +++ b/arch/sh/include/asm/hardirq.h @@ -1,16 +1,9 @@ #ifndef __ASM_SH_HARDIRQ_H #define __ASM_SH_HARDIRQ_H -#include -#include - -/* entry.S is sensitive to the offsets of these fields */ -typedef struct { - unsigned int __softirq_pending; -} ____cacheline_aligned irq_cpustat_t; - -#include /* Standard mappings for irq_cpustat_t above */ - extern void ack_bad_irq(unsigned int irq); +#define ack_bad_irq ack_bad_irq + +#include #endif /* __ASM_SH_HARDIRQ_H */ -- cgit v1.1 From 9f26e659d8caf5820c51b9c695f0a313e636b99c Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Thu, 6 Aug 2009 14:51:30 +0000 Subject: sh: kfr2r09 board support - LCDC panel This patch adds support for the WQVGA LCD display used by the KFR2R09 board. The LCD module is a TX07D34VM0AAA made by Hitachi, and this module is made up by a R61517 chip together with a 240x400 pixel display. The screen is attached to the SuperH Mobile LCDC using a 18-bit SYS bus. The register settings used by the SYS panel setup code are based on an out-of-tree driver which apart from duplicating all LCDC driver code and writing to non-existing hardware registers also never was posted for upstream merge. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/boards/mach-kfr2r09/Makefile | 1 + arch/sh/boards/mach-kfr2r09/lcd_wqvga.c | 332 ++++++++++++++++++++++++++++ arch/sh/boards/mach-kfr2r09/setup.c | 94 ++++++++ arch/sh/include/mach-kfr2r09/mach/kfr2r09.h | 21 ++ 4 files changed, 448 insertions(+) create mode 100644 arch/sh/boards/mach-kfr2r09/lcd_wqvga.c create mode 100644 arch/sh/include/mach-kfr2r09/mach/kfr2r09.h (limited to 'arch') diff --git a/arch/sh/boards/mach-kfr2r09/Makefile b/arch/sh/boards/mach-kfr2r09/Makefile index 7703756..5d58678 100644 --- a/arch/sh/boards/mach-kfr2r09/Makefile +++ b/arch/sh/boards/mach-kfr2r09/Makefile @@ -1 +1,2 @@ obj-y := setup.o +obj-$(CONFIG_FB_SH_MOBILE_LCDC) += lcd_wqvga.o diff --git a/arch/sh/boards/mach-kfr2r09/lcd_wqvga.c b/arch/sh/boards/mach-kfr2r09/lcd_wqvga.c new file mode 100644 index 0000000..8ccb1cc --- /dev/null +++ b/arch/sh/boards/mach-kfr2r09/lcd_wqvga.c @@ -0,0 +1,332 @@ +/* + * KFR2R09 LCD panel support + * + * Copyright (C) 2009 Magnus Damm + * + * Register settings based on the out-of-tree t33fb.c driver + * Copyright (C) 2008 Lineo Solutions, Inc. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of this archive for + * more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include